diff --git a/CHANGELOG.md b/CHANGELOG.md index 0fa7f36226e23..60a83ad790bcb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,9 +3,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) ## [Unreleased] - ### Added - - Add support for s390x architecture ([#4001](https://github.com/opensearch-project/OpenSearch/pull/4001)) - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Point in time rest layer changes for create and delete PIT API ([#4064](https://github.com/opensearch-project/OpenSearch/pull/4064)) @@ -29,15 +27,21 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Added release notes for 1.3.6 ([#4681](https://github.com/opensearch-project/OpenSearch/pull/4681)) - Added precommit support for MacOS ([#4682](https://github.com/opensearch-project/OpenSearch/pull/4682)) - Recommission API changes for service layer ([#4320](https://github.com/opensearch-project/OpenSearch/pull/4320)) +- Update GeoGrid base class access modifier to support extensibility ([#4572](https://github.com/opensearch-project/OpenSearch/pull/4572)) +- Add a new node role 'search' which is dedicated to provide search capability ([#4689](https://github.com/opensearch-project/OpenSearch/pull/4689)) +- Introduce experimental searchable snapshot API ([#4680](https://github.com/opensearch-project/OpenSearch/pull/4680)) +- Recommissioning of zone. REST layer support. ([#4624](https://github.com/opensearch-project/OpenSearch/pull/4604)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 - Bumps `reactor-netty-http` from 1.0.18 to 1.0.23 - Bumps `jettison` from 1.5.0 to 1.5.1 - Bumps `azure-storage-common` from 12.18.0 to 12.18.1 - Bumps `forbiddenapis` from 3.3 to 3.4 - - -### Dependencies +- Bumps `gson` from 2.9.0 to 2.9.1 +- Bumps `protobuf-java` from 3.21.2 to 3.21.7 +- Bumps `azure-core` from 1.31.0 to 1.33.0 +- Bumps `avro` from 1.11.0 to 1.11.1 +- Bumps `woodstox-core` from 6.3.0 to 6.3.1 - Bumps `xmlbeans` from 5.1.0 to 5.1.1 ([#4354](https://github.com/opensearch-project/OpenSearch/pull/4354)) - Bumps `azure-core-http-netty` from 1.12.0 to 1.12.4 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) - Bumps `azure-core` from 1.27.0 to 1.31.0 ([#4160](https://github.com/opensearch-project/OpenSearch/pull/4160)) @@ -50,9 +54,7 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Bumps `jempbox` from 1.8.16 to 1.8.17 ([#4550](https://github.com/opensearch-project/OpenSearch/pull/4550)) - Bumps `hadoop-hdfs` from 3.3.3 to 3.3.4 ([#4644](https://github.com/opensearch-project/OpenSearch/pull/4644)) - Bumps `jna` from 5.11.0 to 5.12.1 ([#4656](https://github.com/opensearch-project/OpenSearch/pull/4656)) - ### Changed - - Dependency updates (httpcore, mockito, slf4j, httpasyncclient, commons-codec) ([#4308](https://github.com/opensearch-project/OpenSearch/pull/4308)) - Use RemoteSegmentStoreDirectory instead of RemoteDirectory ([#4240](https://github.com/opensearch-project/OpenSearch/pull/4240)) - Plugin ZIP publication groupId value is configurable ([#4156](https://github.com/opensearch-project/OpenSearch/pull/4156)) @@ -73,16 +75,20 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Add APIs (GET/PUT) to decommission awareness attribute ([#4261](https://github.com/opensearch-project/OpenSearch/pull/4261)) - Improve Gradle pre-commit checks to pre-empt Jenkins build ([#4660](https://github.com/opensearch-project/OpenSearch/pull/4660)) - Update to Apache Lucene 9.4.0 ([#4661](https://github.com/opensearch-project/OpenSearch/pull/4661)) +- Controlling discovery for decommissioned nodes ([#4590](https://github.com/opensearch-project/OpenSearch/pull/4590)) - Backport Apache Lucene version change for 2.4.0 ([#4677](https://github.com/opensearch-project/OpenSearch/pull/4677)) - +- Fix weighted routing metadata deserialization error on process restart ([#4691](https://github.com/opensearch-project/OpenSearch/pull/4691)) +- Refactor Base Action class javadocs to OpenSearch.API ([#4732](https://github.com/opensearch-project/OpenSearch/pull/4732)) +- Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) ### Deprecated - ### Removed - Remove deprecated code to add node name into log pattern of log4j property file ([#4568](https://github.com/opensearch-project/OpenSearch/pull/4568)) - Unused object and import within TransportClusterAllocationExplainAction ([#4639](https://github.com/opensearch-project/OpenSearch/pull/4639)) - +- Remove LegacyESVersion.V_7_0_* and V_7_1_* Constants ([#2768](https://https://github.com/opensearch-project/OpenSearch/pull/2768)) +- Remove LegacyESVersion.V_7_2_ and V_7_3_ Constants ([#4702](https://github.com/opensearch-project/OpenSearch/pull/4702)) +- Always auto release the flood stage block ([#4703](https://github.com/opensearch-project/OpenSearch/pull/4703)) +- Remove LegacyESVersion.V_7_4_ and V_7_5_ Constants ([#4704](https://github.com/opensearch-project/OpenSearch/pull/4704)) ### Fixed - - `opensearch-service.bat start` and `opensearch-service.bat manager` failing to run ([#4289](https://github.com/opensearch-project/OpenSearch/pull/4289)) - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - `opensearch.bat` and `opensearch-service.bat install` failing to run, missing logs directory ([#4305](https://github.com/opensearch-project/OpenSearch/pull/4305)) @@ -122,15 +128,12 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Fix flaky DecommissionControllerTests.testTimesOut ([4683](https://github.com/opensearch-project/OpenSearch/pull/4683)) - Fix new race condition in DecommissionControllerTests ([4688](https://github.com/opensearch-project/OpenSearch/pull/4688)) - Fix SearchStats (de)serialization (caused by https://github.com/opensearch-project/OpenSearch/pull/4616) ([#4697](https://github.com/opensearch-project/OpenSearch/pull/4697)) - +- Fixing Gradle warnings associated with publishPluginZipPublicationToXxx tasks ([#4696](https://github.com/opensearch-project/OpenSearch/pull/4696)) ### Security - - CVE-2022-25857 org.yaml:snakeyaml DOS vulnerability ([#4341](https://github.com/opensearch-project/OpenSearch/pull/4341)) ## [2.x] - ### Added - - Github workflow for changelog verification ([#4085](https://github.com/opensearch-project/OpenSearch/pull/4085)) - Label configuration for dependabot PRs ([#4348](https://github.com/opensearch-project/OpenSearch/pull/4348)) - Added RestLayer Changes for PIT stats ([#4217](https://github.com/opensearch-project/OpenSearch/pull/4217)) @@ -138,19 +141,15 @@ Inspired from [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) - Addition of Doc values on the GeoShape Field - Addition of GeoShape ValueSource level code interfaces for accessing the DocValues. - Addition of Missing Value feature in the GeoShape Aggregations. - +- Install and configure Log4j JUL Adapter for Lucene 9.4 ([#4754](https://github.com/opensearch-project/OpenSearch/pull/4754)) ### Changed - ### Deprecated - ### Removed - ### Fixed - - PR reference to checkout code for changelog verifier ([#4296](https://github.com/opensearch-project/OpenSearch/pull/4296)) - Commit workflow for dependabot changelog helper ([#4331](https://github.com/opensearch-project/OpenSearch/pull/4331)) +- Better plural stemmer than minimal_english ([#4738](https://github.com/opensearch-project/OpenSearch/pull/4738)) ### Security - [Unreleased]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...HEAD [2.x]: https://github.com/opensearch-project/OpenSearch/compare/2.2.0...2.x diff --git a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java index 2bdef8e4cd244..be12fdd99c1df 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/PublishPlugin.java @@ -92,7 +92,7 @@ public String call() throws Exception { return String.format( "%s/distributions/%s-%s.pom", project.getBuildDir(), - getArchivesBaseName(project), + pomTask.getName().toLowerCase().contains("zip") ? project.getName() : getArchivesBaseName(project), project.getVersion() ); } @@ -130,7 +130,6 @@ public String call() throws Exception { publication.getPom().withXml(PublishPlugin::addScmInfo); if (!publication.getName().toLowerCase().contains("zip")) { - // have to defer this until archivesBaseName is set project.afterEvaluate(p -> publication.setArtifactId(getArchivesBaseName(project))); @@ -139,6 +138,8 @@ public String call() throws Exception { publication.artifact(project.getTasks().getByName("sourcesJar")); publication.artifact(project.getTasks().getByName("javadocJar")); } + } else { + project.afterEvaluate(p -> publication.setArtifactId(project.getName())); } generatePomTask.configure( diff --git a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java index 6dc7d660922b2..6b581fcaa7774 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/pluginzip/Publish.java @@ -13,12 +13,14 @@ import org.gradle.api.publish.maven.MavenPublication; import java.nio.file.Path; +import java.util.Set; +import java.util.stream.Collectors; + import org.gradle.api.Task; import org.gradle.api.publish.maven.plugins.MavenPublishPlugin; public class Publish implements Plugin { - // public final static String PLUGIN_ZIP_PUBLISH_POM_TASK = "generatePomFileForPluginZipPublication"; public final static String PUBLICATION_NAME = "pluginZip"; public final static String STAGING_REPO = "zipStaging"; public final static String LOCAL_STAGING_REPO_PATH = "/build/local-staging-repo"; @@ -67,10 +69,15 @@ public void apply(Project project) { if (validatePluginZipPom != null) { validatePluginZipPom.dependsOn("generatePomFileForNebulaPublication"); } - Task publishPluginZipPublicationToZipStagingRepository = project.getTasks() - .findByName("publishPluginZipPublicationToZipStagingRepository"); - if (publishPluginZipPublicationToZipStagingRepository != null) { - publishPluginZipPublicationToZipStagingRepository.dependsOn("generatePomFileForNebulaPublication"); + + // There are number of tasks prefixed by 'publishPluginZipPublication', f.e.: + // publishPluginZipPublicationToZipStagingRepository, publishPluginZipPublicationToMavenLocal + final Set publishPluginZipPublicationToTasks = project.getTasks() + .stream() + .filter(t -> t.getName().startsWith("publishPluginZipPublicationTo")) + .collect(Collectors.toSet()); + if (!publishPluginZipPublicationToTasks.isEmpty()) { + publishPluginZipPublicationToTasks.forEach(t -> t.dependsOn("generatePomFileForNebulaPublication")); } } else { project.getLogger() diff --git a/buildSrc/src/main/resources/forbidden/http-signatures.txt b/buildSrc/src/main/resources/forbidden/http-signatures.txt index dcf20bbb09387..bfd81b3521a40 100644 --- a/buildSrc/src/main/resources/forbidden/http-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/http-signatures.txt @@ -15,31 +15,14 @@ # language governing permissions and limitations under the License. @defaultMessage Explicitly specify the ContentType of HTTP entities when creating -org.apache.http.entity.StringEntity#(java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ByteArrayEntity#(byte[]) -org.apache.http.entity.ByteArrayEntity#(byte[],int,int) -org.apache.http.entity.FileEntity#(java.io.File) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream) -org.apache.http.entity.InputStreamEntity#(java.io.InputStream,long) -org.apache.http.nio.entity.NByteArrayEntity#(byte[]) -org.apache.http.nio.entity.NByteArrayEntity#(byte[],int,int) -org.apache.http.nio.entity.NFileEntity#(java.io.File) -org.apache.http.nio.entity.NStringEntity#(java.lang.String) -org.apache.http.nio.entity.NStringEntity#(java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String) +org.apache.hc.core5.http.io.entity.StringEntity#(java.lang.String,java.nio.charset.Charset) @defaultMessage Use non-deprecated constructors -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String) -org.apache.http.nio.entity.NFileEntity#(java.io.File,java.lang.String,boolean) -org.apache.http.entity.FileEntity#(java.io.File,java.lang.String) -org.apache.http.entity.StringEntity#(java.lang.String,java.lang.String,java.lang.String) +org.apache.hc.core5.http.io.entity.FileEntity#(java.io.File,org.apache.hc.core5.http.ContentType) @defaultMessage BasicEntity is easy to mess up and forget to set content type -org.apache.http.entity.BasicHttpEntity#() - -@defaultMessage EntityTemplate is easy to mess up and forget to set content type -org.apache.http.entity.EntityTemplate#(org.apache.http.entity.ContentProducer) +org.apache.hc.core5.http.io.entity.BasicHttpEntity#(java.io.InputStream,org.apache.hc.core5.http.ContentType) @defaultMessage SerializableEntity uses java serialization and makes it easy to forget to set content type -org.apache.http.entity.SerializableEntity#(java.io.Serializable) +org.apache.hc.core5.http.io.entity.SerializableEntity#(java.io.Serializable,org.apache.hc.core5.http.ContentType) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index bf72245c63918..a779389b3ca82 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -25,6 +25,8 @@ netty = 4.1.79.Final joda = 2.10.13 # client dependencies +httpclient5 = 5.1.3 +httpcore5 = 5.1.4 httpclient = 4.5.13 httpcore = 4.4.15 httpasyncclient = 4.1.5 diff --git a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java index d2d7163b8dee2..e8dcff814603d 100644 --- a/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java +++ b/client/benchmark/src/main/java/org/opensearch/client/benchmark/rest/RestClientBenchmark.java @@ -31,10 +31,10 @@ package org.opensearch.client.benchmark.rest; -import org.apache.http.HttpHeaders; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.message.BasicHeader; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.message.BasicHeader; import org.opensearch.OpenSearchException; import org.opensearch.client.Request; import org.opensearch.client.Response; diff --git a/client/rest-high-level/build.gradle b/client/rest-high-level/build.gradle index 07147ce81b72e..7fa2855d85487 100644 --- a/client/rest-high-level/build.gradle +++ b/client/rest-high-level/build.gradle @@ -104,3 +104,9 @@ testClusters.all { extraConfigFile nodeTrustStore.name, nodeTrustStore extraConfigFile pkiTrustCert.name, pkiTrustCert } + +thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.Logger', + 'org.slf4j.LoggerFactory' +) diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java index 37a1ab8812845..4ff8e75b521b6 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/ClusterRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java index 3a5384f23b90e..ca9154340a660 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IndicesRequestConverters.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.indices.alias.IndicesAliasesRequest; import org.opensearch.action.admin.indices.alias.get.GetAliasesRequest; import org.opensearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java index 2504dec3af36e..4c044413642ac 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/IngestRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java index 91c339cc92c1b..88e3a3a904830 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RequestConverters.java @@ -32,14 +32,14 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.lucene.util.BytesRef; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -269,7 +269,7 @@ static Request bulk(BulkRequest bulkRequest) throws IOException { } } request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); + request.setEntity(new ByteArrayEntity(content.toByteArray(), 0, content.size(), requestContentType)); return request; } @@ -358,7 +358,7 @@ static Request index(IndexRequest indexRequest) { BytesRef source = indexRequest.source().toBytesRef(); ContentType contentType = createContentType(indexRequest.getContentType()); request.addParameters(parameters.asMap()); - request.setEntity(new NByteArrayEntity(source.bytes, source.offset, source.length, contentType)); + request.setEntity(new ByteArrayEntity(source.bytes, source.offset, source.length, contentType)); return request; } @@ -514,7 +514,7 @@ static Request multiSearch(MultiSearchRequest multiSearchRequest) throws IOExcep XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, xContent); request.addParameters(params.asMap()); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -549,7 +549,7 @@ static Request multiSearchTemplate(MultiSearchTemplateRequest multiSearchTemplat XContent xContent = REQUEST_BODY_CONTENT_TYPE.xContent(); byte[] source = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, xContent); - request.setEntity(new NByteArrayEntity(source, createContentType(xContent.type()))); + request.setEntity(new ByteArrayEntity(source, createContentType(xContent.type()))); return request; } @@ -817,7 +817,7 @@ static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType) static HttpEntity createEntity(ToXContent toXContent, XContentType xContentType, ToXContent.Params toXContentParams) throws IOException { BytesRef source = XContentHelper.toXContent(toXContent, xContentType, toXContentParams, false).toBytesRef(); - return new NByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); + return new ByteArrayEntity(source.bytes, source.offset, source.length, createContentType(xContentType)); } static String endpoint(String index, String id) { diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java index 0a5880b778942..27f13fc3c00c4 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/RestHighLevelClient.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; @@ -2220,9 +2220,9 @@ protected final Resp parseEntity(final HttpEntity entity, final CheckedFu if (entity.getContentType() == null) { throw new IllegalStateException("OpenSearch didn't return the [Content-Type] header, unable to parse response body"); } - XContentType xContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(entity.getContentType()); if (xContentType == null) { - throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType().getValue()); + throw new IllegalStateException("Unsupported Content-Type: " + entity.getContentType()); } try (XContentParser parser = xContentType.xContent().createParser(registry, DEPRECATION_HANDLER, entity.getContent())) { return entityParser.apply(parser); diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java index 3d44820966608..263d7db82ba08 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/SnapshotRequestConverters.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.cleanup.CleanupRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java index ff89950f37cb9..78a74ca04ff9b 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/TasksRequestConverters.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.RequestConverters.EndpointBuilder; import org.opensearch.client.tasks.CancelTasksRequest; diff --git a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt index 68dc509e5ff27..0d7749b39fb91 100644 --- a/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt +++ b/client/rest-high-level/src/main/resources/forbidden/rest-high-level-signatures.txt @@ -15,10 +15,9 @@ # language governing permissions and limitations under the License. @defaultMessage Use Request#createContentType(XContentType) to be sure to pass the right MIME type -org.apache.http.entity.ContentType#create(java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.lang.String) -org.apache.http.entity.ContentType#create(java.lang.String,java.nio.charset.Charset) -org.apache.http.entity.ContentType#create(java.lang.String,org.apache.http.NameValuePair[]) +org.apache.hc.core5.http.ContentType#create(java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.lang.String) +org.apache.hc.core5.http.ContentType#create(java.lang.String,java.nio.charset.Charset) @defaultMessage ES's logging infrastructure uses log4j2 which we don't want to force on high level rest client users org.opensearch.common.logging.DeprecationLogger @@ -30,7 +29,3 @@ org.opensearch.common.logging.PrefixLogger @defaultMessage We can't rely on log4j2 being on the classpath so don't log deprecations! org.opensearch.common.xcontent.LoggingDeprecationHandler - -@defaultMessage Use Nonblocking org.apache.http.nio.entity.NByteArrayEntity -org.apache.http.entity.ByteArrayEntity -org.apache.http.entity.StringEntity diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java index 71b869fb59e7b..82d2cbe9149ca 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterClientIT.java @@ -32,7 +32,8 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -220,7 +221,7 @@ public void testClusterHealthGreen() throws IOException { assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN)); } - public void testClusterHealthYellowClusterLevel() throws IOException { + public void testClusterHealthYellowClusterLevel() throws IOException, ParseException { createIndex("index", Settings.EMPTY); createIndex("index2", Settings.EMPTY); ClusterHealthRequest request = new ClusterHealthRequest(); @@ -231,7 +232,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException { assertThat(response.getIndices().size(), equalTo(0)); } - public void testClusterHealthYellowIndicesLevel() throws IOException { + public void testClusterHealthYellowIndicesLevel() throws IOException, ParseException { String firstIndex = "index"; String secondIndex = "index2"; // including another index that we do not assert on, to ensure that we are not diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java index 27adc18fd37b8..f201599632969 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/ClusterRequestConvertersTests.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; import org.opensearch.action.admin.cluster.settings.ClusterGetSettingsRequest; import org.opensearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; @@ -44,6 +42,8 @@ import org.opensearch.common.Priority; import org.opensearch.common.util.CollectionUtils; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.CoreMatchers; import org.junit.Assert; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java index 1d94f190c611c..972c96999945f 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/CustomRestHighLevelClientTests.java @@ -32,15 +32,14 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.apache.lucene.util.BytesRef; import org.opensearch.Build; import org.opensearch.Version; @@ -172,13 +171,13 @@ private Response mockPerformRequest(Request request) throws IOException { when(mockResponse.getHost()).thenReturn(new HttpHost("localhost", 9200)); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); MainResponse response = new MainResponse(httpHeader.getValue(), Version.CURRENT, ClusterName.DEFAULT, "_na", Build.CURRENT); BytesRef bytesRef = XContentHelper.toXContent(response, XContentType.JSON, false).toBytesRef(); - when(mockResponse.getEntity()).thenReturn(new NByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); + when(mockResponse.getEntity()).thenReturn(new ByteArrayEntity(bytesRef.bytes, ContentType.APPLICATION_JSON)); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, ENDPOINT, protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); return mockResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java index c0c03ed1d0e7c..054d0ae8670b5 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/HighLevelRestClientCompressionIT.java @@ -31,9 +31,9 @@ package org.opensearch.client; -import org.apache.http.HttpHeaders; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpHeaders; import org.opensearch.action.search.SearchRequest; import org.opensearch.action.search.SearchResponse; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java index f9c8851f8839e..750b0c15e9c14 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesClientIT.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index fdb5f2843b44d..512cc058a64a7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -32,11 +32,11 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.apache.lucene.tests.util.LuceneTestCase; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.admin.indices.alias.Alias; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java index 200069ade1ea2..8aae33307279b 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IngestRequestConvertersTests.java @@ -32,10 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.action.ingest.DeletePipelineRequest; import org.opensearch.action.ingest.GetPipelineRequest; import org.opensearch.action.ingest.PutPipelineRequest; @@ -44,6 +40,10 @@ import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Assert; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java index bd57c5c9e53f6..e1179c0f24cb8 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/MockRestHighLevelTests.java @@ -32,13 +32,12 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.junit.Before; import java.io.IOException; @@ -64,9 +63,9 @@ private void setupClient() throws IOException { when(mockResponse.getWarnings()).thenReturn(WARNINGS); ProtocolVersion protocol = new ProtocolVersion("HTTP", 1, 1); - when(mockResponse.getStatusLine()).thenReturn(new BasicStatusLine(protocol, 200, "OK")); + when(mockResponse.getStatusLine()).thenReturn(new StatusLine(protocol, 200, "OK")); - RequestLine requestLine = new BasicRequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); + RequestLine requestLine = new RequestLine(HttpGet.METHOD_NAME, "/_blah", protocol); when(mockResponse.getRequestLine()).thenReturn(requestLine); WarningFailureException expectedException = new WarningFailureException(mockResponse); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java index efac508cf6814..a8c73393f54ce 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/OpenSearchRestHighLevelClientTestCase.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.util.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; @@ -61,6 +61,8 @@ import org.opensearch.search.SearchModule; import org.opensearch.tasks.TaskId; import org.opensearch.test.rest.OpenSearchRestTestCase; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.AfterClass; import org.junit.Before; @@ -324,7 +326,7 @@ protected static void setupRemoteClusterConfig(String remoteClusterName) throws }); } - protected static Map toMap(Response response) throws IOException { + protected static Map toMap(Response response) throws IOException, OpenSearchParseException, ParseException { return XContentHelper.convertToMap(JsonXContent.jsonXContent, EntityUtils.toString(response.getEntity()), false); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java index 09ef90cef144d..6f66a5279afa3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PingAndInfoIT.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpGet; import org.opensearch.client.core.MainResponse; import java.io.IOException; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java index be9b614a8720f..1f10deb400ecc 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/PitIT.java @@ -8,8 +8,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.junit.Before; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.ActionListener; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java index ee5795deb165d..576fe02718ba3 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RequestConvertersTests.java @@ -32,14 +32,6 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.util.EntityUtils; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.admin.cluster.storedscripts.DeleteStoredScriptRequest; import org.opensearch.action.admin.cluster.storedscripts.GetStoredScriptRequest; @@ -120,6 +112,14 @@ import org.opensearch.tasks.TaskId; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.RandomObjects; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matchers; import java.io.IOException; @@ -733,8 +733,8 @@ public void testIndex() throws IOException { assertEquals(method, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); - assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType().getValue()); + assertTrue(entity instanceof ByteArrayEntity); + assertEquals(indexRequest.getContentType().mediaTypeWithoutParameters(), entity.getContentType()); try (XContentParser parser = createParser(xContentType.xContent(), entity.getContent())) { assertEquals(nbFields, parser.map().size()); } @@ -805,11 +805,11 @@ public void testUpdate() throws IOException { assertEquals(HttpPost.METHOD_NAME, request.getMethod()); HttpEntity entity = request.getEntity(); - assertTrue(entity instanceof NByteArrayEntity); + assertTrue(entity instanceof ByteArrayEntity); UpdateRequest parsedUpdateRequest = new UpdateRequest(); - XContentType entityContentType = XContentType.fromMediaType(entity.getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(entity.getContentType()); try (XContentParser parser = createParser(entityContentType.xContent(), entity.getContent())) { parsedUpdateRequest.fromXContent(parser); } @@ -926,7 +926,7 @@ public void testBulk() throws IOException { assertEquals("/_bulk", request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertEquals(HttpPost.METHOD_NAME, request.getMethod()); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); byte[] content = new byte[(int) request.getEntity().getContentLength()]; try (InputStream inputStream = request.getEntity().getContent()) { Streams.readFully(inputStream, content); @@ -979,7 +979,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -989,7 +989,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new DeleteRequest("index", "2")); Request request = RequestConverters.bulk(bulkRequest); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { XContentType xContentType = randomFrom(XContentType.JSON, XContentType.SMILE); @@ -1001,7 +1001,7 @@ public void testBulkWithDifferentContentTypes() throws IOException { } Request request = RequestConverters.bulk(new BulkRequest().add(updateRequest)); - assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(xContentType.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } { BulkRequest bulkRequest = new BulkRequest(); @@ -1289,7 +1289,7 @@ public void testSearchScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(searchScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testClearScroll() throws IOException { @@ -1303,7 +1303,7 @@ public void testClearScroll() throws IOException { assertEquals("/_search/scroll", request.getEndpoint()); assertEquals(0, request.getParameters().size()); assertToXContentBody(clearScrollRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testCreatePit() throws IOException { @@ -1324,7 +1324,7 @@ public void testCreatePit() throws IOException { assertEquals(endpoint.toString(), request.getEndpoint()); assertEquals(expectedParams, request.getParameters()); assertToXContentBody(createPitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeletePit() throws IOException { @@ -1337,7 +1337,7 @@ public void testDeletePit() throws IOException { assertEquals(HttpDelete.METHOD_NAME, request.getMethod()); assertEquals(endpoint, request.getEndpoint()); assertToXContentBody(deletePitRequest, request.getEntity()); - assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType().getValue()); + assertEquals(REQUEST_BODY_CONTENT_TYPE.mediaTypeWithoutParameters(), request.getEntity().getContentType()); } public void testDeleteAllPits() { @@ -1456,7 +1456,7 @@ public void testMultiSearchTemplate() throws Exception { HttpEntity actualEntity = multiRequest.getEntity(); byte[] expectedBytes = MultiSearchTemplateRequest.writeMultiLineFormat(multiSearchTemplateRequest, XContentType.JSON.xContent()); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(new BytesArray(expectedBytes), new BytesArray(EntityUtils.toByteArray(actualEntity))); } @@ -1763,7 +1763,7 @@ public void testDeleteScriptRequest() { static void assertToXContentBody(ToXContent expectedBody, HttpEntity actualEntity) throws IOException { BytesReference expectedBytes = XContentHelper.toXContent(expectedBody, REQUEST_BODY_CONTENT_TYPE, false); - assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType().getValue()); + assertEquals(XContentType.JSON.mediaTypeWithoutParameters(), actualEntity.getContentType()); assertEquals(expectedBytes, new BytesArray(EntityUtils.toByteArray(actualEntity))); } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java index dbdf7eba3dca4..5743820ff0175 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientExtTests.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.NamedXContentRegistry; @@ -64,14 +64,14 @@ public void initClient() { public void testParseEntityCustomResponseSection() throws IOException { { - HttpEntity jsonEntity = new NStringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom1\":{ \"field\":\"value\"}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection1.class)); CustomResponseSection1 customResponseSection1 = (CustomResponseSection1) customSection; assertEquals("value", customResponseSection1.value); } { - HttpEntity jsonEntity = new NStringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"custom2\":{ \"array\": [\"item1\", \"item2\"]}}", ContentType.APPLICATION_JSON); BaseCustomResponseSection customSection = restHighLevelClient.parseEntity(jsonEntity, BaseCustomResponseSection::fromXContent); assertThat(customSection, instanceOf(CustomResponseSection2.class)); CustomResponseSection2 customResponseSection2 = (CustomResponseSection2) customSection; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java index 4d989ff53df35..d522bf8c4d005 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/RestHighLevelClientTests.java @@ -33,19 +33,6 @@ package org.opensearch.client; import com.fasterxml.jackson.core.JsonParseException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRequest; @@ -87,6 +74,17 @@ import org.opensearch.test.InternalAggregationTestCase; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestApi; import org.opensearch.test.rest.yaml.restspec.ClientYamlSuiteRestSpec; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import org.hamcrest.Matchers; import org.junit.Before; @@ -123,7 +121,7 @@ public class RestHighLevelClientTests extends OpenSearchTestCase { private static final String SUBMIT_TASK_PREFIX = "submit_"; private static final String SUBMIT_TASK_SUFFIX = "_task"; private static final ProtocolVersion HTTP_PROTOCOL = new ProtocolVersion("http", 1, 1); - private static final RequestLine REQUEST_LINE = new BasicRequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); + private static final RequestLine REQUEST_LINE = new RequestLine(HttpGet.METHOD_NAME, "/", HTTP_PROTOCOL); /** * These APIs do not use a Request object (because they don't have a body, or any request parameters). @@ -258,7 +256,7 @@ private void mockResponse(ToXContent toXContent) throws IOException { Response response = mock(Response.class); ContentType contentType = ContentType.parse(RequestConverters.REQUEST_BODY_CONTENT_TYPE.mediaType()); String requestBody = toXContent(toXContent, RequestConverters.REQUEST_BODY_CONTENT_TYPE, false).utf8ToString(); - when(response.getEntity()).thenReturn(new NStringEntity(requestBody, contentType)); + when(response.getEntity()).thenReturn(new StringEntity(requestBody, contentType)); when(restClient.performRequest(any(Request.class))).thenReturn(response); } @@ -308,14 +306,14 @@ public void testParseEntity() throws IOException { { IllegalStateException ise = expectThrows( IllegalStateException.class, - () -> restHighLevelClient.parseEntity(new NStringEntity("", (ContentType) null), null) + () -> restHighLevelClient.parseEntity(new StringEntity("", (ContentType) null), null) ); assertEquals("OpenSearch didn't return the [Content-Type] header, unable to parse response body", ise.getMessage()); } { - NStringEntity entity = new NStringEntity("", ContentType.APPLICATION_SVG_XML); + StringEntity entity = new StringEntity("", ContentType.APPLICATION_SVG_XML); IllegalStateException ise = expectThrows(IllegalStateException.class, () -> restHighLevelClient.parseEntity(entity, null)); - assertEquals("Unsupported Content-Type: " + entity.getContentType().getValue(), ise.getMessage()); + assertEquals("Unsupported Content-Type: " + entity.getContentType(), ise.getMessage()); } { CheckedFunction entityParser = parser -> { @@ -326,9 +324,9 @@ public void testParseEntity() throws IOException { assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); return value; }; - HttpEntity jsonEntity = new NStringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); + HttpEntity jsonEntity = new StringEntity("{\"field\":\"value\"}", ContentType.APPLICATION_JSON); assertEquals("value", restHighLevelClient.parseEntity(jsonEntity, entityParser)); - HttpEntity yamlEntity = new NStringEntity("---\nfield: value\n", ContentType.create("application/yaml")); + HttpEntity yamlEntity = new StringEntity("---\nfield: value\n", ContentType.create("application/yaml")); assertEquals("value", restHighLevelClient.parseEntity(yamlEntity, entityParser)); HttpEntity smileEntity = createBinaryEntity(SmileXContent.contentBuilder(), ContentType.create("application/smile")); assertEquals("value", restHighLevelClient.parseEntity(smileEntity, entityParser)); @@ -342,13 +340,13 @@ private static HttpEntity createBinaryEntity(XContentBuilder xContentBuilder, Co builder.startObject(); builder.field("field", "value"); builder.endObject(); - return new NByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); + return new ByteArrayEntity(BytesReference.bytes(builder).toBytesRef().bytes, contentType); } } public void testConvertExistsResponse() { RestStatus restStatus = randomBoolean() ? RestStatus.OK : randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); boolean result = RestHighLevelClient.convertExistsResponse(response); assertEquals(restStatus == RestStatus.OK, result); @@ -357,7 +355,7 @@ public void testConvertExistsResponse() { public void testParseResponseException() throws IOException { { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -367,9 +365,9 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity( + new StringEntity( "{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON ) @@ -383,8 +381,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -395,8 +393,8 @@ public void testParseResponseException() throws IOException { } { RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); OpenSearchException openSearchException = restHighLevelClient.parseResponseException(responseException); @@ -411,7 +409,7 @@ public void testPerformRequestOnSuccess() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); when(restClient.performRequest(any(Request.class))).thenReturn(mockResponse); { @@ -437,7 +435,7 @@ public void testPerformRequestOnSuccess() throws IOException { ); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -451,7 +449,7 @@ public void testPerformRequestOnResponseExceptionWithoutEntity() throws IOExcept MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -474,9 +472,9 @@ public void testPerformRequestOnResponseExceptionWithEntity() throws IOException MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); @@ -500,8 +498,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity() throws IOExc MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -525,8 +523,8 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -549,7 +547,7 @@ public void testPerformRequestOnResponseExceptionWithBrokenEntity2() throws IOEx public void testPerformRequestOnResponseExceptionWithIgnores() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -569,7 +567,7 @@ public void testPerformRequestOnResponseExceptionWithIgnores() throws IOExceptio public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -591,8 +589,8 @@ public void testPerformRequestOnResponseExceptionWithIgnoresErrorNoBody() throws public void testPerformRequestOnResponseExceptionWithIgnoresErrorValidBody() throws IOException { MainRequest mainRequest = new MainRequest(); CheckedFunction requestConverter = request -> new Request(HttpGet.METHOD_NAME, "/"); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response mockResponse = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(mockResponse); when(restClient.performRequest(any(Request.class))).thenThrow(responseException); @@ -620,7 +618,7 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertNull(trackingActionListener.exception.get()); assertEquals(restStatus.getStatus(), trackingActionListener.statusCode.get()); @@ -633,13 +631,13 @@ public void testWrapResponseListenerOnSuccess() { Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); responseListener.onSuccess(new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse)); assertThat(trackingActionListener.exception.get(), instanceOf(IOException.class)); IOException ioe = (IOException) trackingActionListener.exception.get(); assertEquals( "Unable to parse response body for Response{requestLine=GET / http/1.1, host=http://localhost:9200, " - + "response=http/1.1 " + + "response=HTTP/1.1 " + restStatus.getStatus() + " " + restStatus.name() @@ -670,7 +668,7 @@ public void testWrapResponseListenerOnResponseExceptionWithoutEntity() throws IO Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -689,9 +687,9 @@ public void testWrapResponseListenerOnResponseExceptionWithEntity() throws IOExc Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); httpResponse.setEntity( - new NStringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) + new StringEntity("{\"error\":\"test error message\",\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON) ); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); @@ -712,8 +710,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"error\":", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"error\":", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -732,8 +730,8 @@ public void testWrapResponseListenerOnResponseExceptionWithBrokenEntity() throws Collections.emptySet() ); RestStatus restStatus = randomFrom(RestStatus.values()); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(restStatus)); - httpResponse.setEntity(new NStringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(restStatus.getStatus(), restStatus.name()); + httpResponse.setEntity(new StringEntity("{\"status\":" + restStatus.getStatus() + "}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -753,7 +751,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnores() throws IOEx trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -771,7 +769,7 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorNoBody() trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -791,8 +789,8 @@ public void testWrapResponseListenerOnResponseExceptionWithIgnoresErrorValidBody trackingActionListener, Collections.singleton(404) ); - HttpResponse httpResponse = new BasicHttpResponse(newStatusLine(RestStatus.NOT_FOUND)); - httpResponse.setEntity(new NStringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.NOT_FOUND.getStatus(), RestStatus.NOT_FOUND.name()); + httpResponse.setEntity(new StringEntity("{\"error\":\"test error message\",\"status\":404}", ContentType.APPLICATION_JSON)); Response response = new Response(REQUEST_LINE, new HttpHost("localhost", 9200), httpResponse); ResponseException responseException = new ResponseException(response); responseListener.onFailure(responseException); @@ -893,7 +891,8 @@ public void testApiNamingConventions() throws Exception { "cluster.get_weighted_routing", "cluster.delete_weighted_routing", "cluster.put_decommission_awareness", - "cluster.get_decommission_awareness", }; + "cluster.get_decommission_awareness", + "cluster.delete_decommission_awareness", }; List booleanReturnMethods = Arrays.asList("security.enable_user", "security.disable_user", "security.change_password"); Set deprecatedMethods = new HashSet<>(); deprecatedMethods.add("indices.force_merge"); @@ -1163,6 +1162,6 @@ public void onFailure(Exception e) { } private static StatusLine newStatusLine(RestStatus restStatus) { - return new BasicStatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); + return new StatusLine(HTTP_PROTOCOL, restStatus.getStatus(), restStatus.name()); } } diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java index 8b509e5d19e92..cc6f08217d057 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SearchIT.java @@ -32,8 +32,6 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchStatusException; import org.opensearch.action.explain.ExplainRequest; @@ -101,6 +99,8 @@ import org.opensearch.search.suggest.Suggest; import org.opensearch.search.suggest.SuggestBuilder; import org.opensearch.search.suggest.phrase.PhraseSuggestionBuilder; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.hamcrest.Matchers; import org.junit.Before; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java index 10baaa2e53dd4..e86de6ba718f9 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/SnapshotRequestConvertersTests.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; import org.opensearch.action.admin.cluster.repositories.delete.DeleteRepositoryRequest; import org.opensearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.opensearch.action.admin.cluster.repositories.put.PutRepositoryRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java index 64fec3c8fb810..a777bbc5d1868 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/TasksRequestConvertersTests.java @@ -32,8 +32,8 @@ package org.opensearch.client; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPost; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.client.tasks.CancelTasksRequest; import org.opensearch.tasks.TaskId; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java index 959c5a827f143..c63b311feebc7 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/CRUDDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.DocWriteRequest; diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java index 0213441a0b6a7..3edf639da8867 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/documentation/MiscellaneousDocumentationIT.java @@ -32,7 +32,7 @@ package org.opensearch.client.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.OpenSearchRestHighLevelClientTestCase; import org.opensearch.client.RequestOptions; import org.opensearch.client.RestClient; @@ -92,8 +92,8 @@ public void testInitializationFromClientBuilder() throws IOException { //tag::rest-high-level-client-init RestHighLevelClient client = new RestHighLevelClient( RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http"))); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201))); //end::rest-high-level-client-init //tag::rest-high-level-client-close diff --git a/client/rest/build.gradle b/client/rest/build.gradle index 01c186ed83fc2..eacef14d17ce2 100644 --- a/client/rest/build.gradle +++ b/client/rest/build.gradle @@ -40,12 +40,12 @@ group = 'org.opensearch.client' archivesBaseName = 'opensearch-rest-client' dependencies { - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" - api "org.apache.httpcomponents:httpasyncclient:${versions.httpasyncclient}" - api "org.apache.httpcomponents:httpcore-nio:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" + api "org.apache.httpcomponents.core5:httpcore5-h2:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" + api "org.slf4j:slf4j-api:${versions.slf4j}" testImplementation project(":client:test") testImplementation "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" @@ -54,6 +54,10 @@ dependencies { testImplementation "org.mockito:mockito-core:${versions.mockito}" testImplementation "org.objenesis:objenesis:${versions.objenesis}" testImplementation "net.bytebuddy:byte-buddy:${versions.bytebuddy}" + testImplementation "org.apache.logging.log4j:log4j-api:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-core:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-jul:${versions.log4j}" + testImplementation "org.apache.logging.log4j:log4j-slf4j-impl:${versions.log4j}" } tasks.withType(CheckForbiddenApis).configureEach { @@ -85,6 +89,10 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', + 'org.slf4j.impl.StaticLoggerBinder', + 'org.slf4j.impl.StaticMDCBinder', + 'org.slf4j.impl.StaticMarkerBinder', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 b/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 deleted file mode 100644 index 366a9e31069a6..0000000000000 --- a/client/rest/licenses/httpasyncclient-4.1.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -cd18227f1eb8e9a263286c1d7362ceb24f6f9b32 \ No newline at end of file diff --git a/client/rest/licenses/httpasyncclient-LICENSE.txt b/client/rest/licenses/httpasyncclient-LICENSE.txt deleted file mode 100644 index 2c41ec88f61cf..0000000000000 --- a/client/rest/licenses/httpasyncclient-LICENSE.txt +++ /dev/null @@ -1,182 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -This project contains annotations derived from JCIP-ANNOTATIONS -Copyright (c) 2005 Brian Goetz and Tim Peierls. -See http://www.jcip.net and the Creative Commons Attribution License -(http://creativecommons.org/licenses/by/2.5) - diff --git a/client/rest/licenses/httpasyncclient-NOTICE.txt b/client/rest/licenses/httpasyncclient-NOTICE.txt deleted file mode 100644 index b45be98d168a4..0000000000000 --- a/client/rest/licenses/httpasyncclient-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Apache HttpComponents AsyncClient -Copyright 2010-2016 The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpclient-4.5.13.jar.sha1 b/client/rest/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/rest/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/rest/licenses/httpclient5-5.1.3.jar.sha1 b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/rest/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/rest/licenses/httpclient-LICENSE.txt b/client/rest/licenses/httpclient5-LICENSE.txt similarity index 100% rename from client/rest/licenses/httpclient-LICENSE.txt rename to client/rest/licenses/httpclient5-LICENSE.txt diff --git a/client/rest/licenses/httpclient-NOTICE.txt b/client/rest/licenses/httpclient5-NOTICE.txt similarity index 72% rename from client/rest/licenses/httpclient-NOTICE.txt rename to client/rest/licenses/httpclient5-NOTICE.txt index 91e5c40c4c6d3..afee7c6e6880b 100644 --- a/client/rest/licenses/httpclient-NOTICE.txt +++ b/client/rest/licenses/httpclient5-NOTICE.txt @@ -1,5 +1,5 @@ Apache HttpComponents Client -Copyright 1999-2016 The Apache Software Foundation +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/client/rest/licenses/httpcore-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/rest/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/rest/licenses/httpcore-LICENSE.txt b/client/rest/licenses/httpcore-LICENSE.txt deleted file mode 100644 index e454a52586f29..0000000000000 --- a/client/rest/licenses/httpcore-LICENSE.txt +++ /dev/null @@ -1,178 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - diff --git a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 b/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 deleted file mode 100644 index 251b35ab6a1a5..0000000000000 --- a/client/rest/licenses/httpcore-nio-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -85d2b6825d42db909a1474f0ffbd6328429b7a32 \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/rest/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-LICENSE.txt b/client/rest/licenses/httpcore5-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-nio-NOTICE.txt b/client/rest/licenses/httpcore5-NOTICE.txt similarity index 56% rename from client/rest/licenses/httpcore-nio-NOTICE.txt rename to client/rest/licenses/httpcore5-NOTICE.txt index a2e17bb60009f..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-nio-NOTICE.txt +++ b/client/rest/licenses/httpcore5-NOTICE.txt @@ -1,8 +1,6 @@ - -Apache HttpCore NIO -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). - diff --git a/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..2369ee9dfb7e1 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-5.1.4.jar.sha1 @@ -0,0 +1 @@ +04de79e0bb34d65c86e4d163ae2f45d53746b70d \ No newline at end of file diff --git a/client/rest/licenses/httpcore5-h2-LICENSE.txt b/client/rest/licenses/httpcore5-h2-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/rest/licenses/httpcore5-h2-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/rest/licenses/httpcore-NOTICE.txt b/client/rest/licenses/httpcore5-h2-NOTICE.txt similarity index 55% rename from client/rest/licenses/httpcore-NOTICE.txt rename to client/rest/licenses/httpcore5-h2-NOTICE.txt index 013448d3e9561..afee7c6e6880b 100644 --- a/client/rest/licenses/httpcore-NOTICE.txt +++ b/client/rest/licenses/httpcore5-h2-NOTICE.txt @@ -1,5 +1,6 @@ -Apache HttpComponents Core -Copyright 2005-2016 The Apache Software Foundation +Apache HttpComponents Client +Copyright 1999-2022 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 new file mode 100644 index 0000000000000..77b9917528382 --- /dev/null +++ b/client/rest/licenses/slf4j-api-1.7.36.jar.sha1 @@ -0,0 +1 @@ +6c62681a2f655b49963a5983b8b0950a6120ae14 \ No newline at end of file diff --git a/client/rest/licenses/slf4j-api-LICENSE.txt b/client/rest/licenses/slf4j-api-LICENSE.txt new file mode 100644 index 0000000000000..8fda22f4d72f6 --- /dev/null +++ b/client/rest/licenses/slf4j-api-LICENSE.txt @@ -0,0 +1,21 @@ +Copyright (c) 2004-2014 QOS.ch +All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/client/rest/licenses/slf4j-api-NOTICE.txt b/client/rest/licenses/slf4j-api-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/client/rest/src/main/java/org/opensearch/client/Cancellable.java b/client/rest/src/main/java/org/opensearch/client/Cancellable.java index 4bfc0704227aa..56e31a3742f35 100644 --- a/client/rest/src/main/java/org/opensearch/client/Cancellable.java +++ b/client/rest/src/main/java/org/opensearch/client/Cancellable.java @@ -31,24 +31,26 @@ package org.opensearch.client; -import org.apache.http.client.methods.AbstractExecutionAwareRequest; -import org.apache.http.client.methods.HttpRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.concurrent.CancellableDependency; import java.util.concurrent.CancellationException; /** * Represents an operation that can be cancelled. * Returned when executing async requests through {@link RestClient#performRequestAsync(Request, ResponseListener)}, so that the request - * can be cancelled if needed. Cancelling a request will result in calling {@link AbstractExecutionAwareRequest#abort()} on the underlying + * can be cancelled if needed. Cancelling a request will result in calling {@link CancellableDependency#cancel()} on the underlying * request object, which will in turn cancel its corresponding {@link java.util.concurrent.Future}. * Note that cancelling a request does not automatically translate to aborting its execution on the server side, which needs to be * specifically implemented in each API. */ -public class Cancellable { +public class Cancellable implements org.apache.hc.core5.concurrent.Cancellable { static final Cancellable NO_OP = new Cancellable(null) { @Override - public void cancel() {} + public boolean cancel() { + throw new UnsupportedOperationException(); + } @Override void runIfNotCancelled(Runnable runnable) { @@ -56,13 +58,13 @@ void runIfNotCancelled(Runnable runnable) { } }; - static Cancellable fromRequest(HttpRequestBase httpRequest) { + static Cancellable fromRequest(CancellableDependency httpRequest) { return new Cancellable(httpRequest); } - private final HttpRequestBase httpRequest; + private final CancellableDependency httpRequest; - private Cancellable(HttpRequestBase httpRequest) { + private Cancellable(CancellableDependency httpRequest) { this.httpRequest = httpRequest; } @@ -70,15 +72,15 @@ private Cancellable(HttpRequestBase httpRequest) { * Cancels the on-going request that is associated with the current instance of {@link Cancellable}. * */ - public synchronized void cancel() { - this.httpRequest.abort(); + public synchronized boolean cancel() { + return this.httpRequest.cancel(); } /** * Executes some arbitrary code iff the on-going request has not been cancelled, otherwise throws {@link CancellationException}. * This is needed to guarantee that cancelling a request works correctly even in case {@link #cancel()} is called between different - * attempts of the same request. The low-level client reuses the same instance of the {@link AbstractExecutionAwareRequest} by calling - * {@link AbstractExecutionAwareRequest#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, + * attempts of the same request. The low-level client reuses the same instance of the {@link CancellableDependency} by calling + * {@link HttpUriRequestBase#reset()} between subsequent retries. The {@link #cancel()} method can be called at anytime, * and we need to handle the case where it gets called while there is no request being executed as one attempt may have failed and * the subsequent attempt has not been started yet. * If the request has already been cancelled we don't go ahead with the next attempt, and artificially raise the @@ -87,7 +89,7 @@ public synchronized void cancel() { * when there is no future to cancel, which would make cancelling the request a no-op. */ synchronized void runIfNotCancelled(Runnable runnable) { - if (this.httpRequest.isAborted()) { + if (this.httpRequest.isCancelled()) { throw newCancellationException(); } runnable.run(); diff --git a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java index e6005c207ec93..0a54dbaf30364 100644 --- a/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/HasAttributeNodeSelector.java @@ -57,6 +57,10 @@ public HasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { Iterator itr = nodes.iterator(); @@ -70,6 +74,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -82,11 +90,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java deleted file mode 100644 index e2993e48a5a05..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HeapBufferedAsyncResponseConsumer.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpException; -import org.apache.http.HttpResponse; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.entity.ContentBufferEntity; -import org.apache.http.nio.protocol.AbstractAsyncResponseConsumer; -import org.apache.http.nio.util.ByteBufferAllocator; -import org.apache.http.nio.util.HeapByteBufferAllocator; -import org.apache.http.nio.util.SimpleInputBuffer; -import org.apache.http.protocol.HttpContext; - -import java.io.IOException; - -/** - * Default implementation of {@link org.apache.http.nio.protocol.HttpAsyncResponseConsumer}. Buffers the whole - * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. - * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer - * than the configured buffer limit. - */ -public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { - - private final int bufferLimitBytes; - private volatile HttpResponse response; - private volatile SimpleInputBuffer buf; - - /** - * Creates a new instance of this consumer with the provided buffer limit. - * - * @param bufferLimit the buffer limit. Must be greater than 0. - * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. - */ - public HeapBufferedAsyncResponseConsumer(int bufferLimit) { - if (bufferLimit <= 0) { - throw new IllegalArgumentException("bufferLimit must be greater than 0"); - } - this.bufferLimitBytes = bufferLimit; - } - - /** - * Get the limit of the buffer. - */ - public int getBufferLimit() { - return bufferLimitBytes; - } - - @Override - protected void onResponseReceived(HttpResponse response) throws HttpException, IOException { - this.response = response; - } - - @Override - protected void onEntityEnclosed(HttpEntity entity, ContentType contentType) throws IOException { - long len = entity.getContentLength(); - if (len > bufferLimitBytes) { - throw new ContentTooLongException( - "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" - ); - } - if (len < 0) { - len = 4096; - } - this.buf = new SimpleInputBuffer((int) len, getByteBufferAllocator()); - this.response.setEntity(new ContentBufferEntity(entity, this.buf)); - } - - /** - * Returns the instance of {@link ByteBufferAllocator} to use for content buffering. - * Allows to plug in any {@link ByteBufferAllocator} implementation. - */ - protected ByteBufferAllocator getByteBufferAllocator() { - return HeapByteBufferAllocator.INSTANCE; - } - - @Override - protected void onContentReceived(ContentDecoder decoder, IOControl ioctrl) throws IOException { - this.buf.consumeContent(decoder); - } - - @Override - protected HttpResponse buildResult(HttpContext context) throws Exception { - return response; - } - - @Override - protected void releaseResources() { - response = null; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java index 7a56e03a1162c..6420a615484d0 100644 --- a/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java +++ b/client/rest/src/main/java/org/opensearch/client/HttpAsyncResponseConsumerFactory.java @@ -32,30 +32,31 @@ package org.opensearch.client; -import org.apache.http.HttpResponse; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import static org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.DEFAULT_BUFFER_LIMIT; /** - * Factory used to create instances of {@link HttpAsyncResponseConsumer}. Each request retry needs its own instance of the + * Factory used to create instances of {@link AsyncResponseConsumer}. Each request retry needs its own instance of the * consumer object. Users can implement this interface and pass their own instance to the specialized * performRequest methods that accept an {@link HttpAsyncResponseConsumerFactory} instance as argument. */ public interface HttpAsyncResponseConsumerFactory { /** - * Creates the default type of {@link HttpAsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. + * Creates the default type of {@link AsyncResponseConsumer}, based on heap buffering with a buffer limit of 100MB. */ HttpAsyncResponseConsumerFactory DEFAULT = new HeapBufferedResponseConsumerFactory(DEFAULT_BUFFER_LIMIT); /** - * Creates the {@link HttpAsyncResponseConsumer}, called once per request attempt. + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. */ - HttpAsyncResponseConsumer createHttpAsyncResponseConsumer(); + AsyncResponseConsumer createHttpAsyncResponseConsumer(); /** - * Default factory used to create instances of {@link HttpAsyncResponseConsumer}. + * Default factory used to create instances of {@link AsyncResponseConsumer}. * Creates one instance of {@link HeapBufferedAsyncResponseConsumer} for each request attempt, with a configurable * buffer limit which defaults to 100MB. */ @@ -75,8 +76,11 @@ public HeapBufferedResponseConsumerFactory(int bufferLimitBytes) { this.bufferLimit = bufferLimitBytes; } + /** + * Creates the {@link AsyncResponseConsumer}, called once per request attempt. + */ @Override - public HttpAsyncResponseConsumer createHttpAsyncResponseConsumer() { + public AsyncResponseConsumer createHttpAsyncResponseConsumer() { return new HeapBufferedAsyncResponseConsumer(bufferLimit); } } diff --git a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java deleted file mode 100644 index 52618cd7edc75..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpDeleteWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -import java.net.URI; - -/** - * Allows to send DELETE requests providing a body (not supported out of the box) - */ -final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpDelete.METHOD_NAME; - - HttpDeleteWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java b/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java deleted file mode 100644 index 8ab639433f6be..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/HttpGetWithEntity.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpGet; - -import java.net.URI; - -/** - * Allows to send GET requests providing a body (not supported out of the box) - */ -final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { - - static final String METHOD_NAME = HttpGet.METHOD_NAME; - - HttpGetWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/Node.java b/client/rest/src/main/java/org/opensearch/client/Node.java index c02ac6c68718f..8fe5dcfa00db0 100644 --- a/client/rest/src/main/java/org/opensearch/client/Node.java +++ b/client/rest/src/main/java/org/opensearch/client/Node.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.List; import java.util.Map; @@ -152,6 +152,9 @@ public Map> getAttributes() { return attributes; } + /** + * Convert node to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -174,6 +177,10 @@ public String toString() { return b.append(']').toString(); } + /** + * Compare two nodes for equality + * @param obj node instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -188,6 +195,9 @@ public boolean equals(Object obj) { && Objects.equals(attributes, other.attributes); } + /** + * Calculate the hash code of the node + */ @Override public int hashCode() { return Objects.hash(host, boundHosts, name, version, roles, attributes); @@ -239,11 +249,25 @@ public boolean isIngest() { return roles.contains("ingest"); } + /** + * Returns whether the node is dedicated to provide search capability. + */ + public boolean isSearch() { + return roles.contains("search"); + } + + /** + * Convert roles to string representation + */ @Override public String toString() { return String.join(",", roles); } + /** + * Compare two roles for equality + * @param obj roles instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || obj.getClass() != getClass()) { @@ -253,6 +277,9 @@ public boolean equals(Object obj) { return roles.equals(other.roles); } + /** + * Calculate the hash code of the roles + */ @Override public int hashCode() { return roles.hashCode(); diff --git a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java b/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java deleted file mode 100644 index 8a35d6eb607ca..0000000000000 --- a/client/rest/src/main/java/org/opensearch/client/PersistentCredentialsAuthenticationStrategy.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - * - * - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScheme; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.protocol.HttpContext; - -/** - * An {@link org.apache.http.client.AuthenticationStrategy} implementation that does not perform - * any special handling if authentication fails. - * The default handler in Apache HTTP client mimics standard browser behaviour of clearing authentication - * credentials if it receives a 401 response from the server. While this can be useful for browser, it is - * rarely the desired behaviour with the OpenSearch REST API. - * If the code using the REST client has configured credentials for the REST API, then we can and should - * assume that this is intentional, and those credentials represent the best possible authentication - * mechanism to the OpenSearch node. - * If we receive a 401 status, a probably cause is that the authentication mechanism in place was unable - * to perform the requisite password checks (the node has not yet recovered its state, or an external - * authentication provider was unavailable). - * If this occurs, then the desired behaviour is for the Rest client to retry with the same credentials - * (rather than trying with no credentials, or expecting the calling code to provide alternate credentials). - */ -final class PersistentCredentialsAuthenticationStrategy extends TargetAuthenticationStrategy { - - private final Log logger = LogFactory.getLog(PersistentCredentialsAuthenticationStrategy.class); - - @Override - public void authFailed(HttpHost host, AuthScheme authScheme, HttpContext context) { - if (logger.isDebugEnabled()) { - logger.debug( - "Authentication to " - + host - + " failed (scheme: " - + authScheme.getSchemeName() - + "). Preserving credentials for next request" - ); - } - // Do nothing. - // The superclass implementation of method will clear the credentials from the cache, but we don't - } -} diff --git a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java index ddec1da068bf0..7cf7490692650 100644 --- a/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java +++ b/client/rest/src/main/java/org/opensearch/client/PreferHasAttributeNodeSelector.java @@ -58,6 +58,10 @@ public PreferHasAttributeNodeSelector(String key, String value) { this.value = value; } + /** + * Select the {@link Node}s to which to send requests. + * @param nodes the {@link Node}s targeted for the sending requests + */ @Override public void select(Iterable nodes) { boolean foundAtLeastOne = false; @@ -99,6 +103,10 @@ public void select(Iterable nodes) { } } + /** + * Compare two node selectors for equality + * @param o node selector instance to compare with + */ @Override public boolean equals(Object o) { if (this == o) { @@ -111,11 +119,17 @@ public boolean equals(Object o) { return Objects.equals(key, that.key) && Objects.equals(value, that.value); } + /** + * Calculate the hash code of the node selector + */ @Override public int hashCode() { return Objects.hash(key, value); } + /** + * Convert this node selector to string representation + */ @Override public String toString() { return key + "=" + value; diff --git a/client/rest/src/main/java/org/opensearch/client/Request.java b/client/rest/src/main/java/org/opensearch/client/Request.java index df81ca7f717ae..441b01b0891ad 100644 --- a/client/rest/src/main/java/org/opensearch/client/Request.java +++ b/client/rest/src/main/java/org/opensearch/client/Request.java @@ -32,9 +32,9 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import java.util.HashMap; import java.util.Map; @@ -133,7 +133,7 @@ public void setEntity(HttpEntity entity) { * @param entity JSON string to be set as the entity body of the request. */ public void setJsonEntity(String entity) { - setEntity(entity == null ? null : new NStringEntity(entity, ContentType.APPLICATION_JSON)); + setEntity(entity == null ? null : new StringEntity(entity, ContentType.APPLICATION_JSON)); } /** @@ -176,6 +176,9 @@ public RequestOptions getOptions() { return options; } + /** + * Convert request to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -192,6 +195,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two requests for equality + * @param obj request instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -209,6 +216,9 @@ public boolean equals(Object obj) { && options.equals(other.options); } + /** + * Calculate the hash code of the request + */ @Override public int hashCode() { return Objects.hash(method, endpoint, parameters, entity, options); diff --git a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java index 297885fa3131b..0f2e0e6da834d 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestLogger.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestLogger.java @@ -34,16 +34,16 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.StatusLine; import java.io.BufferedReader; import java.io.IOException; @@ -66,17 +66,10 @@ private RequestLogger() {} /** * Logs a request that yielded a response */ - static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, ClassicHttpResponse httpResponse) { if (logger.isDebugEnabled()) { logger.debug( - "request [" - + request.getMethod() - + " " - + host - + getUri(request.getRequestLine()) - + "] returned [" - + httpResponse.getStatusLine() - + "]" + "request [" + request.getMethod() + " " + host + getUri(request) + "] returned [" + new StatusLine(httpResponse) + "]" ); } if (logger.isWarnEnabled()) { @@ -109,7 +102,7 @@ static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpR */ static void logFailedRequest(Log logger, HttpUriRequest request, Node node, Exception e) { if (logger.isDebugEnabled()) { - logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request.getRequestLine()) + "] failed", e); + logger.debug("request [" + request.getMethod() + " " + node.getHost() + getUri(request) + "] failed", e); } if (tracer.isTraceEnabled()) { String traceRequest; @@ -127,7 +120,7 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ StringBuilder message = new StringBuilder("request [").append(request.getMethod()) .append(" ") .append(host) - .append(getUri(request.getRequestLine())) + .append(getUri(request)) .append("] returned ") .append(warnings.length) .append(" warnings: "); @@ -144,17 +137,18 @@ static String buildWarningMessage(HttpUriRequest request, HttpHost host, Header[ * Creates curl output for given request */ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException { - String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request.getRequestLine()) + "'"; - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; - if (enclosingRequest.getEntity() != null) { - requestLine += " -d '"; - HttpEntity entity = enclosingRequest.getEntity(); - if (entity.isRepeatable() == false) { - entity = new BufferedHttpEntity(enclosingRequest.getEntity()); - enclosingRequest.setEntity(entity); - } + String requestLine = "curl -iX " + request.getMethod() + " '" + host + getUri(request) + "'"; + if (request.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = request.getEntity(); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(request.getEntity()); + request.setEntity(entity); + } + try { requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; + } catch (final ParseException ex) { + throw new IOException(ex); } } return requestLine; @@ -163,10 +157,10 @@ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IO /** * Creates curl output for given response */ - static String buildTraceResponse(HttpResponse httpResponse) throws IOException { + static String buildTraceResponse(ClassicHttpResponse httpResponse) throws IOException { StringBuilder responseLine = new StringBuilder(); - responseLine.append("# ").append(httpResponse.getStatusLine()); - for (Header header : httpResponse.getAllHeaders()) { + responseLine.append("# ").append(new StatusLine(httpResponse)); + for (Header header : httpResponse.getHeaders()) { responseLine.append("\n# ").append(header.getName()).append(": ").append(header.getValue()); } responseLine.append("\n#"); @@ -176,7 +170,7 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { entity = new BufferedHttpEntity(entity); } httpResponse.setEntity(entity); - ContentType contentType = ContentType.get(entity); + ContentType contentType = ContentType.parse(entity.getContentType()); Charset charset = StandardCharsets.UTF_8; if (contentType != null && contentType.getCharset() != null) { charset = contentType.getCharset(); @@ -191,10 +185,14 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { return responseLine.toString(); } - private static String getUri(RequestLine requestLine) { - if (requestLine.getUri().charAt(0) != '/') { - return "/" + requestLine.getUri(); + private static String getUri(HttpUriRequest request) { + final String uri = request.getRequestUri(); + if (uri == null) { + return "/"; + } else if (!uri.startsWith("/")) { + return "/" + uri; + } else { + return uri; } - return requestLine.getUri(); } } diff --git a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java index 5390e303ff499..189d785faaf45 100644 --- a/client/rest/src/main/java/org/opensearch/client/RequestOptions.java +++ b/client/rest/src/main/java/org/opensearch/client/RequestOptions.java @@ -32,10 +32,10 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -86,7 +86,7 @@ public List
getHeaders() { /** * The {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. */ @@ -124,6 +124,9 @@ public RequestConfig getRequestConfig() { return requestConfig; } + /** + * Convert request options to string representation + */ @Override public String toString() { StringBuilder b = new StringBuilder(); @@ -152,6 +155,10 @@ public String toString() { return b.append('}').toString(); } + /** + * Compare two request options for equality + * @param obj request options instance to compare with + */ @Override public boolean equals(Object obj) { if (obj == null || (obj.getClass() != getClass())) { @@ -167,6 +174,9 @@ public boolean equals(Object obj) { && Objects.equals(warningsHandler, other.warningsHandler); } + /** + * Calculate the hash code of the request options + */ @Override public int hashCode() { return Objects.hash(headers, httpAsyncResponseConsumerFactory, warningsHandler); @@ -218,11 +228,11 @@ public Builder addHeader(String name, String value) { /** * Set the {@link HttpAsyncResponseConsumerFactory} used to create one - * {@link HttpAsyncResponseConsumer} callback per retry. Controls how the + * {@link AsyncResponseConsumer} callback per retry. Controls how the * response body gets streamed from a non-blocking HTTP connection on the * client side. * - * @param httpAsyncResponseConsumerFactory factory for creating {@link HttpAsyncResponseConsumer}. + * @param httpAsyncResponseConsumerFactory factory for creating {@link AsyncResponseConsumer}. * @throws NullPointerException if {@code httpAsyncResponseConsumerFactory} is null. */ public void setHttpAsyncResponseConsumerFactory(HttpAsyncResponseConsumerFactory httpAsyncResponseConsumerFactory) { diff --git a/client/rest/src/main/java/org/opensearch/client/Response.java b/client/rest/src/main/java/org/opensearch/client/Response.java index d380607b7df9e..c758826b776ba 100644 --- a/client/rest/src/main/java/org/opensearch/client/Response.java +++ b/client/rest/src/main/java/org/opensearch/client/Response.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.StatusLine; import java.util.ArrayList; import java.util.List; @@ -53,9 +54,9 @@ public class Response { private final RequestLine requestLine; private final HttpHost host; - private final HttpResponse response; + private final ClassicHttpResponse response; - Response(RequestLine requestLine, HttpHost host, HttpResponse response) { + Response(RequestLine requestLine, HttpHost host, ClassicHttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(host, "host cannot be null"); Objects.requireNonNull(response, "response cannot be null"); @@ -82,14 +83,14 @@ public HttpHost getHost() { * Returns the status line of the current response */ public StatusLine getStatusLine() { - return response.getStatusLine(); + return new StatusLine(response); } /** * Returns all the response headers */ public Header[] getHeaders() { - return response.getAllHeaders(); + return response.getHeaders(); } /** @@ -199,12 +200,15 @@ public boolean hasWarnings() { return warnings != null && warnings.length > 0; } - HttpResponse getHttpResponse() { + ClassicHttpResponse getHttpResponse() { return response; } + /** + * Convert response to string representation + */ @Override public String toString() { - return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + '}'; + return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + getStatusLine() + '}'; } } diff --git a/client/rest/src/main/java/org/opensearch/client/ResponseException.java b/client/rest/src/main/java/org/opensearch/client/ResponseException.java index 8104c32c422e5..ed816c7e1177e 100644 --- a/client/rest/src/main/java/org/opensearch/client/ResponseException.java +++ b/client/rest/src/main/java/org/opensearch/client/ResponseException.java @@ -32,9 +32,10 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.BufferedHttpEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.BufferedHttpEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; import java.io.IOException; import java.util.Locale; @@ -77,7 +78,11 @@ static String buildMessage(Response response) throws IOException { entity = new BufferedHttpEntity(entity); response.getHttpResponse().setEntity(entity); } - message += "\n" + EntityUtils.toString(entity); + try { + message += "\n" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } return message; } diff --git a/client/rest/src/main/java/org/opensearch/client/RestClient.java b/client/rest/src/main/java/org/opensearch/client/RestClient.java index 92aed2c8fb179..9d140a145b004 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClient.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClient.java @@ -33,36 +33,43 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.entity.HttpEntityWrapper; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.client.AuthCache; -import org.apache.http.client.ClientProtocolException; -import org.apache.http.client.entity.GzipCompressingEntity; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.nio.client.methods.HttpAsyncMethods; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.HttpEntityWrapper; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.auth.AuthScheme; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.Credentials; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.ClientProtocolException; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.protocol.HttpClientContext; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; import javax.net.ssl.SSLHandshakeException; import java.io.ByteArrayInputStream; @@ -70,6 +77,7 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStream; import java.net.ConnectException; import java.net.SocketTimeoutException; import java.net.URI; @@ -92,6 +100,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.zip.GZIPOutputStream; @@ -218,7 +227,7 @@ public static RestClientBuilder builder(String cloudId) { } String url = decodedParts[1] + "." + domain; - return builder(new HttpHost(url, port, "https")); + return builder(new HttpHost("https", url, port)); } /** @@ -287,7 +296,7 @@ public List getNodes() { * @return client running status */ public boolean isRunning() { - return client.isRunning(); + return client.getStatus() == IOReactorStatus.ACTIVE; } /** @@ -323,7 +332,7 @@ public Response performRequest(Request request) throws IOException { private Response performRequest(final NodeTuple> nodeTuple, final InternalRequest request, Exception previousException) throws IOException { RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - HttpResponse httpResponse; + ClassicHttpResponse httpResponse; try { httpResponse = client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, null).get(); } catch (Exception e) { @@ -353,18 +362,18 @@ private Response performRequest(final NodeTuple> nodeTuple, final throw responseOrResponseException.responseException; } - private ResponseOrResponseException convertResponse(InternalRequest request, Node node, HttpResponse httpResponse) throws IOException { + private ResponseOrResponseException convertResponse(InternalRequest request, Node node, ClassicHttpResponse httpResponse) + throws IOException { RequestLogger.logResponse(logger, request.httpRequest, node.getHost(), httpResponse); - int statusCode = httpResponse.getStatusLine().getStatusCode(); + int statusCode = httpResponse.getCode(); Optional.ofNullable(httpResponse.getEntity()) .map(HttpEntity::getContentEncoding) - .map(Header::getValue) .filter("gzip"::equalsIgnoreCase) .map(gzipHeaderValue -> new GzipDecompressingEntity(httpResponse.getEntity())) .ifPresent(httpResponse::setEntity); - Response response = new Response(request.httpRequest.getRequestLine(), node.getHost(), httpResponse); + Response response = new Response(new RequestLine(request.httpRequest), node.getHost(), httpResponse); if (isSuccessfulResponse(statusCode) || request.ignoreErrorCodes.contains(response.getStatusLine().getStatusCode())) { onResponse(node); if (request.warningsHandler.warningsShouldFailRequest(response.getWarnings())) { @@ -418,47 +427,56 @@ private void performRequestAsync( ) { request.cancellable.runIfNotCancelled(() -> { final RequestContext context = request.createContextForNextAttempt(nodeTuple.nodes.next(), nodeTuple.authCache); - client.execute(context.requestProducer, context.asyncResponseConsumer, context.context, new FutureCallback() { - @Override - public void completed(HttpResponse httpResponse) { - try { - ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); - if (responseOrResponseException.responseException == null) { - listener.onSuccess(responseOrResponseException.response); - } else { + Future future = client.execute( + context.requestProducer, + context.asyncResponseConsumer, + context.context, + new FutureCallback() { + @Override + public void completed(ClassicHttpResponse httpResponse) { + try { + ResponseOrResponseException responseOrResponseException = convertResponse(request, context.node, httpResponse); + if (responseOrResponseException.responseException == null) { + listener.onSuccess(responseOrResponseException.response); + } else { + if (nodeTuple.nodes.hasNext()) { + listener.trackFailure(responseOrResponseException.responseException); + performRequestAsync(nodeTuple, request, listener); + } else { + listener.onDefinitiveFailure(responseOrResponseException.responseException); + } + } + } catch (Exception e) { + listener.onDefinitiveFailure(e); + } + } + + @Override + public void failed(Exception failure) { + try { + RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); + onFailure(context.node); if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(responseOrResponseException.responseException); + listener.trackFailure(failure); performRequestAsync(nodeTuple, request, listener); } else { - listener.onDefinitiveFailure(responseOrResponseException.responseException); + listener.onDefinitiveFailure(failure); } + } catch (Exception e) { + listener.onDefinitiveFailure(e); } - } catch (Exception e) { - listener.onDefinitiveFailure(e); } - } - @Override - public void failed(Exception failure) { - try { - RequestLogger.logFailedRequest(logger, request.httpRequest, context.node, failure); - onFailure(context.node); - if (nodeTuple.nodes.hasNext()) { - listener.trackFailure(failure); - performRequestAsync(nodeTuple, request, listener); - } else { - listener.onDefinitiveFailure(failure); - } - } catch (Exception e) { - listener.onDefinitiveFailure(e); + @Override + public void cancelled() { + listener.onDefinitiveFailure(Cancellable.newCancellationException()); } } + ); - @Override - public void cancelled() { - listener.onDefinitiveFailure(Cancellable.newCancellationException()); - } - }); + if (future instanceof org.apache.hc.core5.concurrent.Cancellable) { + request.httpRequest.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); + } }); } @@ -583,6 +601,9 @@ private void onFailure(Node node) { failureListener.onFailure(node); } + /** + * Close the underlying {@link CloseableHttpAsyncClient} instance + */ @Override public void close() throws IOException { client.close(); @@ -608,12 +629,12 @@ private static void addSuppressedException(Exception suppressedException, Except } } - private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + private HttpUriRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { switch (method.toUpperCase(Locale.ROOT)) { - case HttpDeleteWithEntity.METHOD_NAME: - return addRequestBody(new HttpDeleteWithEntity(uri), entity); - case HttpGetWithEntity.METHOD_NAME: - return addRequestBody(new HttpGetWithEntity(uri), entity); + case HttpDelete.METHOD_NAME: + return addRequestBody(new HttpDelete(uri), entity); + case HttpGet.METHOD_NAME: + return addRequestBody(new HttpGet(uri), entity); case HttpHead.METHOD_NAME: return addRequestBody(new HttpHead(uri), entity); case HttpOptions.METHOD_NAME: @@ -633,22 +654,18 @@ private HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity ent } } - private HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { + private HttpUriRequestBase addRequestBody(HttpUriRequestBase httpRequest, HttpEntity entity) { if (entity != null) { - if (httpRequest instanceof HttpEntityEnclosingRequestBase) { - if (compressionEnabled) { - if (chunkedEnabled.isPresent()) { - entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); - } else { - entity = new ContentCompressingEntity(entity); - } - } else if (chunkedEnabled.isPresent()) { - entity = new ContentHttpEntity(entity, chunkedEnabled.get()); + if (compressionEnabled) { + if (chunkedEnabled.isPresent()) { + entity = new ContentCompressingEntity(entity, chunkedEnabled.get()); + } else { + entity = new ContentCompressingEntity(entity); } - ((HttpEntityEnclosingRequestBase) httpRequest).setEntity(entity); - } else { - throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported"); + } else if (chunkedEnabled.isPresent()) { + entity = new ContentHttpEntity(entity, chunkedEnabled.get()); } + httpRequest.setEntity(entity); } return httpRequest; } @@ -673,7 +690,12 @@ static URI buildUri(String pathPrefix, String path, Map params) for (Map.Entry param : params.entrySet()) { uriBuilder.addParameter(param.getKey(), param.getValue()); } - return uriBuilder.build(); + + // The Apache HttpClient 5.x **does not** encode URIs but Apache HttpClient 4.x does. It leads + // to the issues with Unicode characters (f.e. document IDs could contain Unicode characters) and + // weird characters are being passed instead. By using `toASCIIString()`, the URI is already created + // with proper encoding. + return new URI(uriBuilder.build().toASCIIString()); } catch (URISyntaxException e) { throw new IllegalArgumentException(e.getMessage(), e); } @@ -802,7 +824,7 @@ public void remove() { private class InternalRequest { private final Request request; private final Set ignoreErrorCodes; - private final HttpRequestBase httpRequest; + private final HttpUriRequestBase httpRequest; private final Cancellable cancellable; private final WarningsHandler warningsHandler; @@ -839,7 +861,7 @@ private void setHeaders(HttpRequest httpRequest, Collection
requestHeade } } - private void setRequestConfig(HttpRequestBase httpRequest, RequestConfig requestConfig) { + private void setRequestConfig(HttpUriRequestBase httpRequest, RequestConfig requestConfig) { if (requestConfig != null) { httpRequest.setConfig(requestConfig); } @@ -851,21 +873,81 @@ RequestContext createContextForNextAttempt(Node node, AuthCache authCache) { } } + /** + * The Apache HttpClient 5 adds "Authorization" header even if the credentials for basic authentication are not provided + * (effectively, username and password are 'null'). To workaround that, wrapping the AuthCache around current HttpClientContext + * and ensuring that the credentials are indeed provided for particular HttpHost, otherwise returning no authentication scheme + * even if it is present in the cache. + */ + private static class WrappingAuthCache implements AuthCache { + private final HttpClientContext context; + private final AuthCache delegate; + private final boolean usePersistentCredentials = true; + + public WrappingAuthCache(HttpClientContext context, AuthCache delegate) { + this.context = context; + this.delegate = delegate; + } + + @Override + public void put(HttpHost host, AuthScheme authScheme) { + delegate.put(host, authScheme); + } + + @Override + public AuthScheme get(HttpHost host) { + AuthScheme authScheme = delegate.get(host); + + if (authScheme != null) { + final CredentialsProvider credsProvider = context.getCredentialsProvider(); + if (credsProvider != null) { + final String schemeName = authScheme.getName(); + final AuthScope authScope = new AuthScope(host, null, schemeName); + final Credentials creds = credsProvider.getCredentials(authScope, context); + + // See please https://issues.apache.org/jira/browse/HTTPCLIENT-2203 + if (authScheme instanceof BasicScheme) { + ((BasicScheme) authScheme).initPreemptive(creds); + } + + if (creds == null) { + return null; + } + } + } + + return authScheme; + } + + @Override + public void remove(HttpHost host) { + if (!usePersistentCredentials) { + delegate.remove(host); + } + } + + @Override + public void clear() { + delegate.clear(); + } + + } + private static class RequestContext { private final Node node; - private final HttpAsyncRequestProducer requestProducer; - private final HttpAsyncResponseConsumer asyncResponseConsumer; + private final AsyncRequestProducer requestProducer; + private final AsyncResponseConsumer asyncResponseConsumer; private final HttpClientContext context; RequestContext(InternalRequest request, Node node, AuthCache authCache) { this.node = node; // we stream the request body if the entity allows for it - this.requestProducer = HttpAsyncMethods.create(node.getHost(), request.httpRequest); + this.requestProducer = HttpUriRequestProducer.create(request.httpRequest, node.getHost()); this.asyncResponseConsumer = request.request.getOptions() .getHttpAsyncResponseConsumerFactory() .createHttpAsyncResponseConsumer(); this.context = HttpClientContext.create(); - context.setAuthCache(authCache); + context.setAuthCache(new WrappingAuthCache(context, authCache)); } } @@ -966,7 +1048,9 @@ private static Exception extractAndWrapCause(Exception exception) { /** * A gzip compressing entity that also implements {@code getContent()}. */ - public static class ContentCompressingEntity extends GzipCompressingEntity { + public static class ContentCompressingEntity extends HttpEntityWrapper { + private static final String GZIP_CODEC = "gzip"; + private Optional chunkedEnabled; /** @@ -979,6 +1063,14 @@ public ContentCompressingEntity(HttpEntity entity) { this.chunkedEnabled = Optional.empty(); } + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return GZIP_CODEC; + } + /** * Creates a {@link ContentCompressingEntity} instance with the provided HTTP entity. * @@ -990,11 +1082,14 @@ public ContentCompressingEntity(HttpEntity entity, boolean chunkedEnabled) { this.chunkedEnabled = Optional.of(chunkedEnabled); } + /** + * Returns a content stream of the entity. + */ @Override public InputStream getContent() throws IOException { ByteArrayInputOutputStream out = new ByteArrayInputOutputStream(1024); try (GZIPOutputStream gzipOut = new GZIPOutputStream(out)) { - wrappedEntity.writeTo(gzipOut); + super.writeTo(gzipOut); } return out.asInput(); } @@ -1030,9 +1125,24 @@ public long getContentLength() { return size; } } else { - return super.getContentLength(); + return -1; } } + + /** + * Writes the entity content out to the output stream. + * @param outStream the output stream to write entity content to + * @throws IOException if an I/O error occurs + */ + @Override + public void writeTo(final OutputStream outStream) throws IOException { + Args.notNull(outStream, "Output stream"); + final GZIPOutputStream gzip = new GZIPOutputStream(outStream); + super.writeTo(gzip); + // Only close output stream if the wrapped entity has been + // successfully written out + gzip.close(); + } } /** diff --git a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java index 8841d371754c3..679a7ccb17d49 100644 --- a/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java +++ b/client/rest/src/main/java/org/opensearch/client/RestClientBuilder.java @@ -32,15 +32,23 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.nio.conn.SchemeIOSessionStrategy; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.util.Timeout; +import org.apache.hc.client5.http.async.HttpAsyncClient; +import org.apache.hc.client5.http.auth.CredentialsProvider; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; import javax.net.ssl.SSLContext; + import java.security.AccessController; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedAction; @@ -50,19 +58,19 @@ /** * Helps creating a new {@link RestClient}. Allows to set the most common http client configuration options when internally - * creating the underlying {@link org.apache.http.nio.client.HttpAsyncClient}. Also allows to provide an externally created - * {@link org.apache.http.nio.client.HttpAsyncClient} in case additional customization is needed. + * creating the underlying {@link HttpAsyncClient}. Also allows to provide an externally created + * {@link HttpAsyncClient} in case additional customization is needed. */ public final class RestClientBuilder { /** - * The default connection timout in milliseconds. + * The default connection timeout in milliseconds. */ public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; /** - * The default socket timeout in milliseconds. + * The default response timeout in milliseconds. */ - public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 30000; + public static final int DEFAULT_RESPONSE_TIMEOUT_MILLIS = 30000; /** * The default maximum of connections per route. @@ -296,20 +304,26 @@ public RestClient build() { private CloseableHttpAsyncClient createHttpClient() { // default timeouts are all infinite RequestConfig.Builder requestConfigBuilder = RequestConfig.custom() - .setConnectTimeout(DEFAULT_CONNECT_TIMEOUT_MILLIS) - .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT_MILLIS); + .setConnectTimeout(Timeout.ofMilliseconds(DEFAULT_CONNECT_TIMEOUT_MILLIS)) + .setResponseTimeout(Timeout.ofMilliseconds(DEFAULT_RESPONSE_TIMEOUT_MILLIS)); if (requestConfigCallback != null) { requestConfigBuilder = requestConfigCallback.customizeRequestConfig(requestConfigBuilder); } try { - HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() - .setDefaultRequestConfig(requestConfigBuilder.build()) - // default settings for connection pooling may be too constraining + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(SSLContext.getDefault()).build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() .setMaxConnPerRoute(DEFAULT_MAX_CONN_PER_ROUTE) .setMaxConnTotal(DEFAULT_MAX_CONN_TOTAL) - .setSSLContext(SSLContext.getDefault()) - .setTargetAuthenticationStrategy(new PersistentCredentialsAuthenticationStrategy()); + .setTlsStrategy(tlsStrategy) + .build(); + + HttpAsyncClientBuilder httpClientBuilder = HttpAsyncClientBuilder.create() + .setDefaultRequestConfig(requestConfigBuilder.build()) + .setConnectionManager(connectionManager) + .setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE) + .disableAutomaticRetries(); if (httpClientConfigCallback != null) { httpClientBuilder = httpClientConfigCallback.customizeHttpClient(httpClientBuilder); } @@ -344,9 +358,9 @@ public interface RequestConfigCallback { public interface HttpClientConfigCallback { /** * Allows to customize the {@link CloseableHttpAsyncClient} being created and used by the {@link RestClient}. - * Commonly used to customize the default {@link org.apache.http.client.CredentialsProvider} for authentication - * or the {@link SchemeIOSessionStrategy} for communication through ssl without losing any other useful default - * value that the {@link RestClientBuilder} internally sets, like connection pooling. + * Commonly used to customize the default {@link CredentialsProvider} for authentication for communication + * through TLS/SSL without losing any other useful default value that the {@link RestClientBuilder} internally + * sets, like connection pooling. * * @param httpClientBuilder the {@link HttpClientBuilder} for customizing the client instance. */ diff --git a/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java new file mode 100644 index 0000000000000..a65427cd0b032 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/HttpUriRequestProducer.java @@ -0,0 +1,63 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.http; + +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.support.BasicRequestProducer; +import org.apache.hc.core5.net.URIAuthority; +import org.apache.hc.core5.util.Args; +import org.opensearch.client.nio.HttpEntityAsyncEntityProducer; + +/** + * The producer of the {@link HttpUriRequestBase} instances associated with a particular {@link HttpHost} + */ +public class HttpUriRequestProducer extends BasicRequestProducer { + private final HttpUriRequestBase request; + + HttpUriRequestProducer(final HttpUriRequestBase request, final AsyncEntityProducer entityProducer) { + super(request, entityProducer); + this.request = request; + } + + /** + * Get the produced {@link HttpUriRequestBase} instance + * @return produced {@link HttpUriRequestBase} instance + */ + public HttpUriRequestBase getRequest() { + return request; + } + + /** + * Create new request producer for {@link HttpUriRequestBase} instance and {@link HttpHost} + * @param request {@link HttpUriRequestBase} instance + * @param host {@link HttpHost} instance + * @return new request producer + */ + public static HttpUriRequestProducer create(final HttpUriRequestBase request, final HttpHost host) { + Args.notNull(request, "Request"); + Args.notNull(host, "HttpHost"); + + // TODO: Should we copy request here instead of modifying in place? + request.setAuthority(new URIAuthority(host)); + request.setScheme(host.getSchemeName()); + + final HttpEntity entity = request.getEntity(); + AsyncEntityProducer entityProducer = null; + + if (entity != null) { + entityProducer = new HttpEntityAsyncEntityProducer(entity); + } + + return new HttpUriRequestProducer(request, entityProducer); + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/http/package-info.java b/client/rest/src/main/java/org/opensearch/client/http/package-info.java new file mode 100644 index 0000000000000..32e0aa2016d53 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/http/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * HTTP support classes for REST client. + */ +package org.opensearch.client.http; diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java new file mode 100644 index 0000000000000..9bd17d1c24c7e --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncEntityConsumer.java @@ -0,0 +1,139 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.nio.AsyncEntityConsumer; +import org.apache.hc.core5.http.nio.entity.AbstractBinAsyncEntityConsumer; +import org.apache.hc.core5.util.ByteArrayBuffer; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Default implementation of {@link AsyncEntityConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncEntityConsumer extends AbstractBinAsyncEntityConsumer { + + private final int bufferLimitBytes; + private AtomicReference bufferRef = new AtomicReference<>(); + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncEntityConsumer(int bufferLimit) { + if (bufferLimit <= 0) { + throw new IllegalArgumentException("bufferLimit must be greater than 0"); + } + this.bufferLimitBytes = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimitBytes; + } + + /** + * Triggered to signal beginning of entity content stream. + * + * @param contentType the entity content type + */ + @Override + protected void streamStart(final ContentType contentType) throws HttpException, IOException {} + + /** + * Triggered to obtain the capacity increment. + * + * @return the number of bytes this consumer is prepared to process. + */ + @Override + protected int capacityIncrement() { + return Integer.MAX_VALUE; + } + + /** + * Triggered to pass incoming data packet to the data consumer. + * + * @param src the data packet. + * @param endOfStream flag indicating whether this data packet is the last in the data stream. + * + */ + @Override + protected void data(final ByteBuffer src, final boolean endOfStream) throws IOException { + if (src == null) { + return; + } + + ByteArrayBuffer buffer = bufferRef.get(); + if (buffer == null) { + buffer = new ByteArrayBuffer(bufferLimitBytes); + if (bufferRef.compareAndSet(null, buffer) == false) { + buffer = bufferRef.get(); + } + } + + int len = src.limit(); + if (buffer.length() + len > bufferLimitBytes) { + throw new ContentTooLongException( + "entity content is too long [" + len + "] for the configured buffer limit [" + bufferLimitBytes + "]" + ); + } + + if (len < 0) { + len = 4096; + } + + if (src.hasArray()) { + buffer.append(src.array(), src.arrayOffset() + src.position(), src.remaining()); + } else { + while (src.hasRemaining()) { + buffer.append(src.get()); + } + } + } + + /** + * Triggered to generate entity representation. + * + * @return the entity content + */ + @Override + protected byte[] generateContent() throws IOException { + final ByteArrayBuffer buffer = bufferRef.get(); + return buffer == null ? new byte[0] : buffer.toByteArray(); + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + ByteArrayBuffer buffer = bufferRef.getAndSet(null); + if (buffer != null) { + buffer.clear(); + buffer = null; + } + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java new file mode 100644 index 0000000000000..3d93478f49f99 --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HeapBufferedAsyncResponseConsumer.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.client.nio; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpException; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.HttpResponse; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.support.AbstractAsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; + +import java.io.IOException; + +/** + * Default implementation of {@link AsyncResponseConsumer}. Buffers the whole + * response content in heap memory, meaning that the size of the buffer is equal to the content-length of the response. + * Limits the size of responses that can be read based on a configurable argument. Throws an exception in case the entity is longer + * than the configured buffer limit. + */ +public class HeapBufferedAsyncResponseConsumer extends AbstractAsyncResponseConsumer { + private static final Log LOGGER = LogFactory.getLog(HeapBufferedAsyncResponseConsumer.class); + private final int bufferLimit; + + /** + * Creates a new instance of this consumer with the provided buffer limit. + * + * @param bufferLimit the buffer limit. Must be greater than 0. + * @throws IllegalArgumentException if {@code bufferLimit} is less than or equal to 0. + */ + public HeapBufferedAsyncResponseConsumer(int bufferLimit) { + super(new HeapBufferedAsyncEntityConsumer(bufferLimit)); + this.bufferLimit = bufferLimit; + } + + /** + * Get the limit of the buffer. + */ + public int getBufferLimit() { + return bufferLimit; + } + + /** + * Triggered to signal receipt of an intermediate (1xx) HTTP response. + * + * @param response the intermediate (1xx) HTTP response. + * @param context the actual execution context. + */ + @Override + public void informationResponse(final HttpResponse response, final HttpContext context) throws HttpException, IOException {} + + /** + * Triggered to generate object that represents a result of response message processing. + * @param response the response message. + * @param entity the response entity. + * @param contentType the response content type. + * @return the result of response processing. + */ + @Override + protected ClassicHttpResponse buildResult(final HttpResponse response, final byte[] entity, final ContentType contentType) { + final ClassicHttpResponse classicResponse = new BasicClassicHttpResponse(response.getCode()); + classicResponse.setVersion(response.getVersion()); + classicResponse.setHeaders(response.getHeaders()); + classicResponse.setReasonPhrase(response.getReasonPhrase()); + if (response.getLocale() != null) { + classicResponse.setLocale(response.getLocale()); + } + + if (entity != null) { + String encoding = null; + + try { + final Header contentEncoding = response.getHeader(HttpHeaders.CONTENT_ENCODING); + if (contentEncoding != null) { + encoding = contentEncoding.getValue(); + } + } catch (final HttpException ex) { + LOGGER.debug("Unable to detect content encoding", ex); + } + + final ByteArrayEntity httpEntity = new ByteArrayEntity(entity, contentType, encoding); + classicResponse.setEntity(httpEntity); + } + + return classicResponse; + } +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java new file mode 100644 index 0000000000000..81fe77ddcfbed --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/HttpEntityAsyncEntityProducer.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.client.nio; + +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.nio.AsyncEntityProducer; +import org.apache.hc.core5.http.nio.DataStreamChannel; +import org.apache.hc.core5.http.nio.ResourceHolder; +import org.apache.hc.core5.util.Args; +import org.apache.hc.core5.util.Asserts; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.util.Set; +import java.util.concurrent.atomic.AtomicReference; + +/** + * The {@link AsyncEntityProducer} implementation for {@link HttpEntity} + */ +public class HttpEntityAsyncEntityProducer implements AsyncEntityProducer { + + private final HttpEntity entity; + private final ByteBuffer byteBuffer; + private final boolean chunked; + private final AtomicReference exception; + private final AtomicReference channelRef; + private boolean eof; + + /** + * Create new async HTTP entity producer + * @param entity HTTP entity + * @param bufferSize buffer size + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity, final int bufferSize) { + this.entity = Args.notNull(entity, "Http Entity"); + this.byteBuffer = ByteBuffer.allocate(bufferSize); + this.chunked = entity.isChunked(); + this.exception = new AtomicReference<>(); + this.channelRef = new AtomicReference<>(); + } + + /** + * Create new async HTTP entity producer with default buffer size (8192 bytes) + * @param entity HTTP entity + */ + public HttpEntityAsyncEntityProducer(final HttpEntity entity) { + this(entity, 8192); + } + + /** + * Determines whether the producer can consistently produce the same content + * after invocation of {@link ResourceHolder#releaseResources()}. + */ + @Override + public boolean isRepeatable() { + return entity.isRepeatable(); + } + + /** + * Returns content type of the entity, if known. + */ + @Override + public String getContentType() { + return entity.getContentType(); + } + + /** + * Returns length of the entity, if known. + */ + @Override + public long getContentLength() { + return entity.getContentLength(); + } + + /** + * Returns the number of bytes immediately available for output. + * This method can be used as a hint to control output events + * of the underlying I/O session. + * + * @return the number of bytes immediately available for output + */ + @Override + public int available() { + return Integer.MAX_VALUE; + } + + /** + * Returns content encoding of the entity, if known. + */ + @Override + public String getContentEncoding() { + return entity.getContentEncoding(); + } + + /** + * Returns chunked transfer hint for this entity. + *

+ * The behavior of wrapping entities is implementation dependent, + * but should respect the primary purpose. + *

+ */ + @Override + public boolean isChunked() { + return chunked; + } + + /** + * Preliminary declaration of trailing headers. + */ + @Override + public Set getTrailerNames() { + return entity.getTrailerNames(); + } + + /** + * Triggered to signal the ability of the underlying data channel + * to accept more data. The data producer can choose to write data + * immediately inside the call or asynchronously at some later point. + * + * @param channel the data channel capable to accepting more data. + */ + @Override + public void produce(final DataStreamChannel channel) throws IOException { + ReadableByteChannel stream = channelRef.get(); + if (stream == null) { + stream = Channels.newChannel(entity.getContent()); + Asserts.check(channelRef.getAndSet(stream) == null, "Illegal producer state"); + } + if (!eof) { + final int bytesRead = stream.read(byteBuffer); + if (bytesRead < 0) { + eof = true; + } + } + if (byteBuffer.position() > 0) { + byteBuffer.flip(); + channel.write(byteBuffer); + byteBuffer.compact(); + } + if (eof && byteBuffer.position() == 0) { + channel.endStream(); + releaseResources(); + } + } + + /** + * Triggered to signal a failure in data generation. + * + * @param cause the cause of the failure. + */ + @Override + public void failed(final Exception cause) { + if (exception.compareAndSet(null, cause)) { + releaseResources(); + } + } + + /** + * Release resources being held + */ + @Override + public void releaseResources() { + eof = false; + final ReadableByteChannel stream = channelRef.getAndSet(null); + if (stream != null) { + try { + stream.close(); + } catch (final IOException ex) { + /* Close quietly */ + } + } + } + +} diff --git a/client/rest/src/main/java/org/opensearch/client/nio/package-info.java b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java new file mode 100644 index 0000000000000..ce4961ed21f7c --- /dev/null +++ b/client/rest/src/main/java/org/opensearch/client/nio/package-info.java @@ -0,0 +1,12 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** + * NIO support classes for REST client. + */ +package org.opensearch.client.nio; diff --git a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java index 0a997a586acc9..9722ec867a376 100644 --- a/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/FailureTrackingResponseListenerTests.java @@ -32,14 +32,11 @@ package org.opensearch.client; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; import java.util.concurrent.atomic.AtomicReference; @@ -116,9 +113,8 @@ public void onFailure(Exception exception) { private static Response mockResponse() { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, "OK"); return new Response(requestLine, new HttpHost("localhost", 9200), httpResponse); } } diff --git a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java index fd18bba6ee548..b5aca86e95d6c 100644 --- a/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java index 22852fe4cb793..ed329d973eb78 100644 --- a/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/HeapBufferedAsyncResponseConsumerTests.java @@ -32,34 +32,31 @@ package org.opensearch.client; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.ContentDecoder; -import org.apache.http.nio.IOControl; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.protocol.HttpContext; - +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.EntityDetails; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.impl.BasicEntityDetails; +import org.apache.hc.core5.http.io.entity.AbstractHttpEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicReference; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { @@ -67,33 +64,6 @@ public class HeapBufferedAsyncResponseConsumerTests extends RestClientTestCase { private static final int MAX_TEST_BUFFER_SIZE = 50 * 1024 * 1024; private static final int TEST_BUFFER_LIMIT = 10 * 1024 * 1024; - public void testResponseProcessing() throws Exception { - ContentDecoder contentDecoder = mock(ContentDecoder.class); - IOControl ioControl = mock(IOControl.class); - HttpContext httpContext = mock(HttpContext.class); - - HeapBufferedAsyncResponseConsumer consumer = spy(new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT)); - - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new StringEntity("test", ContentType.TEXT_PLAIN)); - - // everything goes well - consumer.responseReceived(httpResponse); - consumer.consumeContent(contentDecoder, ioControl); - consumer.responseCompleted(httpContext); - - verify(consumer).releaseResources(); - verify(consumer).buildResult(httpContext); - assertTrue(consumer.isDone()); - assertSame(httpResponse, consumer.getResult()); - - consumer.responseCompleted(httpContext); - verify(consumer, times(1)).releaseResources(); - verify(consumer, times(1)).buildResult(httpContext); - } - public void testDefaultBufferLimit() throws Exception { HeapBufferedAsyncResponseConsumer consumer = new HeapBufferedAsyncResponseConsumer(TEST_BUFFER_LIMIT); bufferLimitTest(consumer, TEST_BUFFER_LIMIT); @@ -127,7 +97,7 @@ public void testCanConfigureHeapBufferLimitFromOutsidePackage() throws ClassNotF assertThat(object, instanceOf(HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory.class)); HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory consumerFactory = (HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory) object; - HttpAsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); + AsyncResponseConsumer consumer = consumerFactory.createHttpAsyncResponseConsumer(); assertThat(consumer, instanceOf(HeapBufferedAsyncResponseConsumer.class)); HeapBufferedAsyncResponseConsumer bufferedAsyncResponseConsumer = (HeapBufferedAsyncResponseConsumer) consumer; assertEquals(bufferLimit, bufferedAsyncResponseConsumer.getBufferLimit()); @@ -138,23 +108,40 @@ public void testHttpAsyncResponseConsumerFactoryVisibility() throws ClassNotFoun } private static void bufferLimitTest(HeapBufferedAsyncResponseConsumer consumer, int bufferLimit) throws Exception { - ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, "OK"); - consumer.onResponseReceived(new BasicHttpResponse(statusLine)); + HttpContext httpContext = mock(HttpContext.class); + + BasicClassicHttpResponse response = new BasicClassicHttpResponse(200, "OK"); + consumer.consumeResponse(response, null, httpContext, null); final AtomicReference contentLength = new AtomicReference<>(); - HttpEntity entity = new StringEntity("", ContentType.APPLICATION_JSON) { + HttpEntity entity = new AbstractHttpEntity(ContentType.APPLICATION_JSON, null, false) { @Override public long getContentLength() { return contentLength.get(); } + + @Override + public InputStream getContent() throws IOException, UnsupportedOperationException { + return new ByteArrayInputStream("".getBytes(StandardCharsets.UTF_8)); + } + + @Override + public boolean isStreaming() { + return false; + } + + @Override + public void close() throws IOException {} }; contentLength.set(randomLongBetween(0L, bufferLimit)); - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + response.setEntity(entity); + + final EntityDetails details = new BasicEntityDetails(4096, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); contentLength.set(randomLongBetween(bufferLimit + 1, MAX_TEST_BUFFER_SIZE)); try { - consumer.onEntityEnclosed(entity, ContentType.APPLICATION_JSON); + consumer.consumeResponse(response, details, httpContext, null); } catch (ContentTooLongException e) { assertEquals( "entity content is too long [" + entity.getContentLength() + "] for the configured buffer limit [" + bufferLimit + "]", diff --git a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java index 2b256e7205397..0e454c6f919f5 100644 --- a/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java +++ b/client/rest/src/test/java/org/opensearch/client/HostsTrackingFailureListener.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import java.util.HashSet; import java.util.List; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java index 65a831e59bfb0..cfc95f0281bcc 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/NodeTests.java b/client/rest/src/test/java/org/opensearch/client/NodeTests.java index 352296fa3024a..748bec5fb7de5 100644 --- a/client/rest/src/test/java/org/opensearch/client/NodeTests.java +++ b/client/rest/src/test/java/org/opensearch/client/NodeTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.Arrays; @@ -48,7 +48,9 @@ import static java.util.Collections.singletonMap; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.equalTo; public class NodeTests extends RestClientTestCase { public void testToString() { @@ -161,4 +163,9 @@ public void testEqualsAndHashCode() { ) ); } + + public void testIsSearchNode() { + Roles searchRole = new Roles(Collections.singleton("search")); + assertThat(searchRole.isSearch(), equalTo(true)); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java index 0135cde573743..7dde1b96b3b45 100644 --- a/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java +++ b/client/rest/src/test/java/org/opensearch/client/PreferHasAttributeNodeSelectorTests.java @@ -32,7 +32,7 @@ package org.opensearch.client; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node.Roles; import java.util.ArrayList; diff --git a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java index 3c317db1b72d9..8dea2ad922bd6 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestLoggerTests.java @@ -32,27 +32,29 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.ProtocolVersion; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHeader; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.StatusLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -66,8 +68,8 @@ import static org.junit.Assert.assertThat; public class RequestLoggerTests extends RestClientTestCase { - public void testTraceRequest() throws IOException, URISyntaxException { - HttpHost host = new HttpHost("localhost", 9200, randomBoolean() ? "http" : "https"); + public void testTraceRequest() throws IOException, URISyntaxException, ParseException { + HttpHost host = new HttpHost(randomBoolean() ? "http" : "https", "localhost", 9200); String expectedEndpoint = "/index/type/_api"; URI uri; if (randomBoolean()) { @@ -77,11 +79,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { } HttpUriRequest request = randomHttpRequest(uri); String expected = "curl -iX " + request.getMethod() + " '" + host + expectedEndpoint + "'"; - boolean hasBody = request instanceof HttpEntityEnclosingRequest && randomBoolean(); + boolean hasBody = !(request instanceof HttpTrace) && randomBoolean(); String requestBody = "{ \"field\": \"value\" }"; if (hasBody) { expected += " -d '" + requestBody + "'"; - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; HttpEntity entity; switch (randomIntBetween(0, 4)) { case 0: @@ -94,10 +95,10 @@ public void testTraceRequest() throws IOException, URISyntaxException { ); break; case 2: - entity = new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + entity = new StringEntity(requestBody, ContentType.APPLICATION_JSON); break; case 3: - entity = new NByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); + entity = new ByteArrayEntity(requestBody.getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON); break; case 4: // Evil entity without a charset @@ -106,24 +107,24 @@ public void testTraceRequest() throws IOException, URISyntaxException { default: throw new UnsupportedOperationException(); } - enclosingRequest.setEntity(entity); + request.setEntity(entity); } String traceRequest = RequestLogger.buildTraceRequest(request, host); assertThat(traceRequest, equalTo(expected)); if (hasBody) { // check that the body is still readable as most entities are not repeatable - String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8); + String body = EntityUtils.toString(request.getEntity(), StandardCharsets.UTF_8); assertThat(body, equalTo(requestBody)); } } - public void testTraceResponse() throws IOException { + public void testTraceResponse() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); int statusCode = randomIntBetween(200, 599); String reasonPhrase = "REASON"; - BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); + StatusLine statusLine = new StatusLine(protocolVersion, statusCode, reasonPhrase); String expected = "# " + statusLine.toString(); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, reasonPhrase); int numHeaders = randomIntBetween(0, 3); for (int i = 0; i < numHeaders; i++) { httpResponse.setHeader("header" + i, "value"); @@ -192,13 +193,13 @@ private static HttpUriRequest randomHttpRequest(URI uri) { int requestType = randomIntBetween(0, 7); switch (requestType) { case 0: - return new HttpGetWithEntity(uri); + return new HttpGet(uri); case 1: return new HttpPost(uri); case 2: return new HttpPut(uri); case 3: - return new HttpDeleteWithEntity(uri); + return new HttpDelete(uri); case 4: return new HttpHead(uri); case 5: diff --git a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java index aaa40db1442ee..a7f9a48c73393 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestOptionsTests.java @@ -32,8 +32,9 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.client.config.RequestConfig; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; import java.util.ArrayList; @@ -108,15 +109,15 @@ public void testSetRequestBuilder() { RequestOptions.Builder builder = RequestOptions.DEFAULT.toBuilder(); RequestConfig.Builder requestConfigBuilder = RequestConfig.custom(); - int socketTimeout = 10000; - int connectTimeout = 100; - requestConfigBuilder.setSocketTimeout(socketTimeout).setConnectTimeout(connectTimeout); + Timeout responseTimeout = Timeout.ofMilliseconds(10000); + Timeout connectTimeout = Timeout.ofMilliseconds(100); + requestConfigBuilder.setResponseTimeout(responseTimeout).setConnectTimeout(connectTimeout); RequestConfig requestConfig = requestConfigBuilder.build(); builder.setRequestConfig(requestConfig); RequestOptions options = builder.build(); assertSame(options.getRequestConfig(), requestConfig); - assertEquals(options.getRequestConfig().getSocketTimeout(), socketTimeout); + assertEquals(options.getRequestConfig().getResponseTimeout(), responseTimeout); assertEquals(options.getRequestConfig().getConnectTimeout(), connectTimeout); } diff --git a/client/rest/src/test/java/org/opensearch/client/RequestTests.java b/client/rest/src/test/java/org/opensearch/client/RequestTests.java index ba15c0d0b733c..d11982e9f9642 100644 --- a/client/rest/src/test/java/org/opensearch/client/RequestTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RequestTests.java @@ -32,15 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.client.HttpAsyncResponseConsumerFactory.HeapBufferedResponseConsumerFactory; +import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; @@ -133,7 +135,7 @@ public void testSetJsonEntity() throws IOException { final String json = randomAsciiLettersOfLengthBetween(1, 100); request.setJsonEntity(json); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); ByteArrayOutputStream os = new ByteArrayOutputStream(); request.getEntity().writeTo(os); assertEquals(json, new String(os.toByteArray(), ContentType.APPLICATION_JSON.getCharset())); @@ -201,7 +203,10 @@ private static Request randomRequest() { randomFrom( new HttpEntity[] { new StringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), - new NStringEntity(randomAsciiAlphanumOfLength(10), ContentType.APPLICATION_JSON), + new InputStreamEntity( + new ByteArrayInputStream(randomAsciiAlphanumOfLength(10).getBytes(StandardCharsets.UTF_8)), + ContentType.APPLICATION_JSON + ), new ByteArrayEntity(randomBytesOfLength(40), ContentType.APPLICATION_JSON) } ) ); diff --git a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java index 8ecd3e1a29c99..dfbf105637962 100644 --- a/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/ResponseExceptionTests.java @@ -32,19 +32,17 @@ package org.opensearch.client; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicRequestLine; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.ProtocolVersion; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.message.RequestLine; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -57,10 +55,9 @@ public class ResponseExceptionTests extends RestClientTestCase { - public void testResponseException() throws IOException { + public void testResponseException() throws IOException, ParseException { ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - StatusLine statusLine = new BasicStatusLine(protocolVersion, 500, "Internal Server Error"); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(500, "Internal Server Error"); String responseBody = "{\"error\":{\"root_cause\": {}}}"; boolean hasBody = getRandom().nextBoolean(); @@ -78,7 +75,7 @@ public void testResponseException() throws IOException { httpResponse.setEntity(entity); } - RequestLine requestLine = new BasicRequestLine("GET", "/", protocolVersion); + RequestLine requestLine = new RequestLine("GET", "/", protocolVersion); HttpHost httpHost = new HttpHost("localhost", 9200); Response response = new Response(requestLine, httpHost, httpResponse); ResponseException responseException = new ResponseException(response); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java index 10bf9568c8798..f5e1735042e66 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderIntegTests.java @@ -36,7 +36,8 @@ import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpsConfigurator; import com.sun.net.httpserver.HttpsServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -117,7 +118,7 @@ public void testBuilderUsesDefaultSSLContext() throws Exception { private RestClient buildRestClient() { InetSocketAddress address = httpsServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "https")).build(); + return RestClient.builder(new HttpHost("https", address.getHostString(), address.getPort())).build(); } private static SSLContext getSslContext() throws Exception { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java index ac81cd1132a2f..7165174e688e1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientBuilderTests.java @@ -32,11 +32,12 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.util.Timeout; import java.io.IOException; import java.util.Base64; @@ -271,7 +272,7 @@ public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder reques RequestConfig requestConfig = requestConfigBuilder.build(); assertEquals(RequestConfig.DEFAULT.getConnectionRequestTimeout(), requestConfig.getConnectionRequestTimeout()); // this way we get notified if the default ever changes - assertEquals(-1, requestConfig.getConnectionRequestTimeout()); + assertEquals(Timeout.ofMinutes(3), requestConfig.getConnectionRequestTimeout()); return requestConfigBuilder; } }); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java index e8b7742044f67..bf2c19b8127a1 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientCompressionTests.java @@ -11,10 +11,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -108,7 +109,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression, boolean chunkedEnabled) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .setChunkedEnabled(chunkedEnabled) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java index 8c4d993517fee..fdcb65ff101c9 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientGzipCompressionTests.java @@ -35,10 +35,11 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -126,7 +127,7 @@ private static byte[] readAll(InputStream in) throws IOException { private RestClient createClient(boolean enableCompression) { InetSocketAddress address = httpServer.getAddress(); - return RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + return RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(enableCompression) .build(); } @@ -184,7 +185,7 @@ public void testCompressingClientSync() throws Exception { public void testCompressingClientAsync() throws Exception { InetSocketAddress address = httpServer.getAddress(); - RestClient restClient = RestClient.builder(new HttpHost(address.getHostString(), address.getPort(), "http")) + RestClient restClient = RestClient.builder(new HttpHost("http", address.getHostString(), address.getPort())) .setCompressionEnabled(true) .build(); diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java index 277446191a36e..8c62533072c70 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsIntegTests.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.junit.BeforeClass; @@ -56,6 +57,7 @@ import static org.opensearch.client.RestClientTestUtil.getAllStatusCodes; import static org.opensearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; import static org.opensearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; @@ -63,7 +65,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against real http servers, multiple hosts. Also tests failover by randomly shutting down hosts. */ public class RestClientMultipleHostsIntegTests extends RestClientTestCase { @@ -299,7 +301,7 @@ public void testNodeSelector() throws Exception { } catch (ConnectException e) { // Windows isn't consistent here. Sometimes the message is even null! if (false == System.getProperty("os.name").startsWith("Windows")) { - assertEquals("Connection refused", e.getMessage()); + assertThat(e.getMessage(), containsString("Connection refused")); } } } else { diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java index d88d4f4afd9b1..62574e5ed6d5a 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientMultipleHostsTests.java @@ -33,9 +33,10 @@ package org.opensearch.client; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; + +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import java.io.IOException; diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java index 0500d282a506d..beee1c5ca21a0 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostIntegTests.java @@ -36,30 +36,34 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.client.TargetAuthenticationStrategy; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.classic.methods.HttpUriRequestBase; +import org.apache.hc.client5.http.impl.DefaultAuthenticationStrategy; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.net.URIBuilder; import org.junit.After; import org.junit.Before; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; import java.util.List; @@ -86,7 +90,7 @@ import static org.junit.Assert.fail; /** - * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Integration test to check interaction between {@link RestClient} and {@link org.apache.hc.client5.http.classic.HttpClient}. * Works against a real http server, one single host. */ public class RestClientSingleHostIntegTests extends RestClientTestCase { @@ -147,7 +151,7 @@ private static class ResponseHandler implements HttpHandler { public void handle(HttpExchange httpExchange) throws IOException { // copy request body to response body so we can verify it was sent StringBuilder body = new StringBuilder(); - try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), StandardCharsets.UTF_8)) { char[] buffer = new char[256]; int read; while ((read = reader.read(buffer)) != -1) { @@ -164,7 +168,7 @@ public void handle(HttpExchange httpExchange) throws IOException { httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); if (body.length() > 0) { try (OutputStream out = httpExchange.getResponseBody()) { - out.write(body.toString().getBytes(Consts.UTF_8)); + out.write(body.toString().getBytes(StandardCharsets.UTF_8)); } } httpExchange.close(); @@ -172,18 +176,20 @@ public void handle(HttpExchange httpExchange) throws IOException { } private RestClient createRestClient(final boolean useAuth, final boolean usePreemptiveAuth) { - // provide the username/password for every request - final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials("user", "pass")); - - final RestClientBuilder restClientBuilder = RestClient.builder( - new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()) - ).setDefaultHeaders(defaultHeaders); + final HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); + final RestClientBuilder restClientBuilder = RestClient.builder(httpHost).setDefaultHeaders(defaultHeaders); if (pathPrefix.length() > 0) { restClientBuilder.setPathPrefix(pathPrefix); } if (useAuth) { + // provide the username/password for every request + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials( + new AuthScope(httpHost, null, "Basic"), + new UsernamePasswordCredentials("user", "pass".toCharArray()) + ); + restClientBuilder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder httpClientBuilder) { @@ -191,7 +197,7 @@ public HttpAsyncClientBuilder customizeHttpClient(final HttpAsyncClientBuilder h // disable preemptive auth by ignoring any authcache httpClientBuilder.disableAuthCaching(); // don't use the "persistent credentials strategy" - httpClientBuilder.setTargetAuthenticationStrategy(new TargetAuthenticationStrategy()); + httpClientBuilder.setTargetAuthenticationStrategy(DefaultAuthenticationStrategy.INSTANCE); } return httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider); @@ -220,7 +226,7 @@ public void testManyAsyncRequests() throws Exception { final List exceptions = new CopyOnWriteArrayList<>(); for (int i = 0; i < iters; i++) { Request request = new Request("PUT", "/200"); - request.setEntity(new NStringEntity("{}", ContentType.APPLICATION_JSON)); + request.setEntity(new StringEntity("{}", ContentType.APPLICATION_JSON)); restClient.performRequestAsync(request, new ResponseListener() { @Override public void onSuccess(Response response) { @@ -271,7 +277,7 @@ public void onFailure(Exception exception) { /** * This test verifies some assumptions that we rely upon around the way the async http client works when reusing the same request - * throughout multiple retries, and the use of the {@link HttpRequestBase#abort()} method. + * throughout multiple retries, and the use of the {@link HttpUriRequestBase#abort()} method. * In fact the low-level REST client reuses the same request instance throughout multiple retries, and relies on the http client * to set the future ref to the request properly so that when abort is called, the proper future gets cancelled. */ @@ -279,7 +285,10 @@ public void testRequestResetAndAbort() throws Exception { try (CloseableHttpAsyncClient client = HttpAsyncClientBuilder.create().build()) { client.start(); HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); - HttpGet httpGet = new HttpGet(pathPrefix + "/200"); + HttpUriRequestBase httpGet = new HttpUriRequestBase( + "GET", + new URIBuilder().setHttpHost(httpHost).setPath(pathPrefix + "/200").build() + ); // calling abort before the request is sent is a no-op httpGet.abort(); @@ -288,8 +297,11 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); + + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); - Future future = client.execute(httpHost, httpGet, null); + try { future.get(); fail("expected cancellation exception"); @@ -300,8 +312,9 @@ public void testRequestResetAndAbort() throws Exception { } { httpGet.reset(); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); + httpGet.setDependency((org.apache.hc.core5.concurrent.Cancellable) future); httpGet.abort(); assertTrue(httpGet.isAborted()); try { @@ -315,9 +328,9 @@ public void testRequestResetAndAbort() throws Exception { { httpGet.reset(); assertFalse(httpGet.isAborted()); - Future future = client.execute(httpHost, httpGet, null); + Future future = client.execute(getRequestProducer(httpGet, httpHost), getResponseConsumer(), null); assertFalse(httpGet.isAborted()); - assertEquals(200, future.get().getStatusLine().getStatusCode()); + assertEquals(200, future.get().getCode()); assertFalse(future.isCancelled()); } } @@ -325,7 +338,7 @@ public void testRequestResetAndAbort() throws Exception { /** * End to end test for headers. We test it explicitly against a real http client as there are different ways - * to set/add headers to the {@link org.apache.http.client.HttpClient}. + * to set/add headers to the {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever headers it received. */ public void testHeaders() throws Exception { @@ -365,7 +378,7 @@ public void testHeaders() throws Exception { /** * End to end test for delete with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testDeleteWithBody() throws Exception { @@ -374,7 +387,7 @@ public void testDeleteWithBody() throws Exception { /** * End to end test for get with body. We test it explicitly as it is not supported - * out of the box by {@link org.apache.http.client.HttpClient}. + * out of the box by {@link org.apache.hc.client5.http.classic.HttpClient}. * Exercises the test http server ability to send back whatever body it received. */ public void testGetWithBody() throws Exception { @@ -410,7 +423,7 @@ public void testEncodeParams() throws Exception { Request request = new Request("PUT", "/200"); request.addParameter("routing", "foo bar"); Response response = RestClientSingleHostTests.performRequestSyncOrAsync(restClient, request); - assertEquals(pathPrefix + "/200?routing=foo+bar", response.getRequestLine().getUri()); + assertEquals(pathPrefix + "/200?routing=foo%20bar", response.getRequestLine().getUri()); } { Request request = new Request("PUT", "/200"); @@ -540,4 +553,13 @@ private Response bodyTest(RestClient restClient, String method, int statusCode, return esResponse; } + + private AsyncResponseConsumer getResponseConsumer() { + return new HeapBufferedAsyncResponseConsumer(1024); + } + + private HttpUriRequestProducer getRequestProducer(HttpUriRequestBase request, HttpHost host) { + return HttpUriRequestProducer.create(request, host); + + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java index e5ce5eb91ad5a..f46a91aa910f8 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientSingleHostTests.java @@ -34,38 +34,42 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.ConnectionClosedException; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequest; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPatch; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpTrace; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.conn.ConnectTimeoutException; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.ConnectTimeoutException; +import org.apache.hc.client5.http.classic.methods.HttpDelete; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.client5.http.classic.methods.HttpOptions; +import org.apache.hc.client5.http.classic.methods.HttpPatch; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.classic.methods.HttpTrace; +import org.apache.hc.client5.http.classic.methods.HttpUriRequest; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequest; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.net.URIBuilder; +import org.apache.hc.core5.reactor.IOReactorStatus; +import org.apache.hc.core5.util.TimeValue; import org.junit.After; import org.junit.Before; -import org.mockito.ArgumentCaptor; -import org.mockito.stubbing.Answer; +import org.opensearch.client.http.HttpUriRequestProducer; import javax.net.ssl.SSLHandshakeException; import java.io.IOException; @@ -85,6 +89,7 @@ import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.LongAdder; import static java.util.Collections.singletonList; import static org.opensearch.client.RestClientTestUtil.getAllErrorStatusCodes; @@ -100,12 +105,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Mockito.any; -import static org.mockito.Mockito.nullable; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; /** * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers, @@ -122,10 +121,17 @@ public class RestClientSingleHostTests extends RestClientTestCase { private CloseableHttpAsyncClient httpClient; private HostsTrackingFailureListener failureListener; private boolean strictDeprecationMode; + private LongAdder requests; + private AtomicReference requestProducerCapture; @Before public void createRestClient() { - httpClient = mockHttpClient(exec); + requests = new LongAdder(); + requestProducerCapture = new AtomicReference<>(); + httpClient = mockHttpClient(exec, (target, requestProducer, responseConsumer, pushHandlerFactory, context, callback) -> { + requests.increment(); + requestProducerCapture.set(requestProducer); + }); defaultHeaders = RestClientTestUtil.randomHeaders(getRandom(), "Header-default"); node = new Node(new HttpHost("localhost", 9200)); failureListener = new HostsTrackingFailureListener(); @@ -143,41 +149,78 @@ public void createRestClient() { ); } + interface CloseableHttpAsyncClientListener { + void onExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ); + } + @SuppressWarnings("unchecked") - static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec) { - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ) - ).thenAnswer((Answer>) invocationOnMock -> { - final HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - final FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - // Call the callback asynchronous to better simulate how async http client works - return exec.submit(() -> { - if (futureCallback != null) { - try { - HttpResponse httpResponse = responseOrException(requestProducer); - futureCallback.completed(httpResponse); - } catch (Exception e) { - futureCallback.failed(e); + static CloseableHttpAsyncClient mockHttpClient(final ExecutorService exec, final CloseableHttpAsyncClientListener... listeners) { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + Arrays.stream(listeners) + .forEach(l -> l.onExecute(target, requestProducer, responseConsumer, pushHandlerFactory, context, callback)); + // Call the callback asynchronous to better simulate how async http client works + return exec.submit(() -> { + if (callback != null) { + try { + ClassicHttpResponse httpResponse = responseOrException(requestProducer); + callback.completed((T) httpResponse); + } catch (Exception e) { + callback.failed(e); + } + return null; } - return null; - } - return responseOrException(requestProducer); - }); - }); + return (T) responseOrException(requestProducer); + }); + } + + @Override + public void awaitShutdown(TimeValue waitTime) throws InterruptedException {} + }; + return httpClient; } - private static HttpResponse responseOrException(HttpAsyncRequestProducer requestProducer) throws Exception { - final HttpUriRequest request = (HttpUriRequest) requestProducer.generateRequest(); - final HttpHost httpHost = requestProducer.getTarget(); + private static ClassicHttpResponse responseOrException(AsyncRequestProducer requestProducer) throws Exception { + final ClassicHttpRequest request = getRequest(requestProducer); + final HttpHost httpHost = new HttpHost(request.getAuthority()); // return the desired status code or exception depending on the path - switch (request.getURI().getPath()) { + switch (request.getRequestUri()) { case "/soe": throw new SocketTimeoutException(httpHost.toString()); case "/coe": @@ -193,20 +236,17 @@ private static HttpResponse responseOrException(HttpAsyncRequestProducer request case "/runtime": throw new RuntimeException(); default: - int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); - StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + int statusCode = Integer.parseInt(request.getRequestUri().substring(1)); - final HttpResponse httpResponse = new BasicHttpResponse(statusLine); + final ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(statusCode, ""); // return the same body that was sent - if (request instanceof HttpEntityEnclosingRequest) { - HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); - if (entity != null) { - assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); - httpResponse.setEntity(entity); - } + HttpEntity entity = request.getEntity(); + if (entity != null) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); + httpResponse.setEntity(entity); } // return the same headers that were sent - httpResponse.setHeaders(request.getAllHeaders()); + httpResponse.setHeaders(request.getHeaders()); return httpResponse; } } @@ -224,26 +264,20 @@ public void shutdownExec() { */ @SuppressWarnings("unchecked") public void testInternalHttpRequest() throws Exception { - ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpAsyncRequestProducer.class); int times = 0; for (String httpMethod : getHttpMethods()) { - HttpUriRequest expectedRequest = performRandomRequest(httpMethod); - verify(httpClient, times(++times)).execute( - requestArgumentCaptor.capture(), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - nullable(FutureCallback.class) - ); - HttpUriRequest actualRequest = (HttpUriRequest) requestArgumentCaptor.getValue().generateRequest(); - assertEquals(expectedRequest.getURI(), actualRequest.getURI()); - assertEquals(expectedRequest.getClass(), actualRequest.getClass()); - assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); - if (expectedRequest instanceof HttpEntityEnclosingRequest) { - HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity(); - if (expectedEntity != null) { - HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity(); - assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); - } + ClassicHttpRequest expectedRequest = performRandomRequest(httpMethod); + assertThat(requests.intValue(), equalTo(++times)); + + ClassicHttpRequest actualRequest = getRequest(requestProducerCapture.get()); + assertEquals(expectedRequest.getRequestUri(), actualRequest.getRequestUri()); + assertEquals(expectedRequest.getMethod(), actualRequest.getMethod()); + assertArrayEquals(expectedRequest.getHeaders(), actualRequest.getHeaders()); + + HttpEntity expectedEntity = expectedRequest.getEntity(); + if (expectedEntity != null) { + HttpEntity actualEntity = actualRequest.getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); } } } @@ -414,14 +448,14 @@ public void testBody() throws Exception { } } } - for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + for (String method : Arrays.asList("TRACE")) { Request request = new Request(method, "/" + randomStatusCode(getRandom())); request.setEntity(entity); try { performRequestSyncOrAsync(restClient, request); fail("request should have failed"); - } catch (UnsupportedOperationException e) { - assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } catch (IllegalStateException e) { + assertThat(e.getMessage(), equalTo(method + " requests may not include an entity.")); } } } @@ -587,10 +621,10 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { HttpUriRequest expectedRequest; switch (method) { case "DELETE": - expectedRequest = new HttpDeleteWithEntity(uri); + expectedRequest = new HttpDelete(uri); break; case "GET": - expectedRequest = new HttpGetWithEntity(uri); + expectedRequest = new HttpGet(uri); break; case "HEAD": expectedRequest = new HttpHead(uri); @@ -614,14 +648,14 @@ private HttpUriRequest performRandomRequest(String method) throws Exception { throw new UnsupportedOperationException("method not supported: " + method); } - if (expectedRequest instanceof HttpEntityEnclosingRequest && getRandom().nextBoolean()) { + if (getRandom().nextBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { HttpEntity entity = new StringEntity(randomAsciiAlphanumOfLengthBetween(10, 100), ContentType.APPLICATION_JSON); - ((HttpEntityEnclosingRequest) expectedRequest).setEntity(entity); + expectedRequest.setEntity(entity); request.setEntity(entity); } final Set uniqueNames = new HashSet<>(); - if (randomBoolean()) { + if (randomBoolean() && !(expectedRequest instanceof HttpTrace /* no entity */)) { Header[] headers = RestClientTestUtil.randomHeaders(getRandom(), "Header"); RequestOptions.Builder options = request.getOptions().toBuilder(); for (Header header : headers) { @@ -698,4 +732,9 @@ private static void assertExceptionStackContainsCallingMethod(Throwable t) { t.printStackTrace(new PrintWriter(stack)); fail("didn't find the calling method (looks like " + myMethod + ") in:\n" + stack); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) throws NoSuchFieldException, IllegalAccessException { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java index ca761dcb6b9b6..dd51da3a30d8c 100644 --- a/client/rest/src/test/java/org/opensearch/client/RestClientTests.java +++ b/client/rest/src/test/java/org/opensearch/client/RestClientTests.java @@ -32,12 +32,13 @@ package org.opensearch.client; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.client.AuthCache; -import org.apache.http.impl.auth.BasicScheme; -import org.apache.http.impl.client.BasicAuthCache; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.auth.AuthCache; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.auth.BasicAuthCache; +import org.apache.hc.client5.http.impl.auth.BasicScheme; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.opensearch.client.RestClient.NodeTuple; import java.io.IOException; @@ -410,10 +411,10 @@ public void testIsRunning() { CloseableHttpAsyncClient client = mock(CloseableHttpAsyncClient.class); RestClient restClient = new RestClient(client, new Header[] {}, nodes, null, null, null, false, false); - when(client.isRunning()).thenReturn(true); + when(client.getStatus()).thenReturn(IOReactorStatus.ACTIVE); assertTrue(restClient.isRunning()); - when(client.isRunning()).thenReturn(false); + when(client.getStatus()).thenReturn(IOReactorStatus.INACTIVE); assertFalse(restClient.isRunning()); } diff --git a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java index 066419844f048..f4c1c98dd4ce9 100644 --- a/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java +++ b/client/rest/src/test/java/org/opensearch/client/documentation/RestClientDocumentation.java @@ -32,23 +32,28 @@ package org.opensearch.client.documentation; -import org.apache.http.Header; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.RequestLine; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.entity.ContentType; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.ssl.SSLContextBuilder; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.config.RequestConfig; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.message.RequestLine; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.ssl.SSLContextBuilder; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.opensearch.client.Cancellable; import org.opensearch.client.HttpAsyncResponseConsumerFactory; import org.opensearch.client.Node; @@ -109,12 +114,12 @@ public class RestClientDocumentation { // end::rest-client-options-singleton @SuppressWarnings("unused") - public void usage() throws IOException, InterruptedException { + public void usage() throws IOException, InterruptedException, ParseException { //tag::rest-client-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http"), - new HttpHost("localhost", 9201, "http")).build(); + new HttpHost("http", "localhost", 9200), + new HttpHost("http", "localhost", 9201)).build(); //end::rest-client-init //tag::rest-client-close @@ -124,7 +129,7 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-default-headers RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("header", "value")}; builder.setDefaultHeaders(defaultHeaders); // <1> //end::rest-client-init-default-headers @@ -132,14 +137,14 @@ public void usage() throws IOException, InterruptedException { { //tag::rest-client-init-node-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(NodeSelector.SKIP_DEDICATED_CLUSTER_MANAGERS); // <1> //end::rest-client-init-node-selector } { //tag::rest-client-init-allocation-aware-selector RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setNodeSelector(new NodeSelector() { // <1> @Override public void select(Iterable nodes) { @@ -173,7 +178,7 @@ public void select(Iterable nodes) { { //tag::rest-client-init-failure-listener RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setFailureListener(new RestClient.FailureListener() { @Override public void onFailure(Node node) { @@ -185,13 +190,13 @@ public void onFailure(Node node) { { //tag::rest-client-init-request-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setRequestConfigCallback( new RestClientBuilder.RequestConfigCallback() { @Override public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { - return requestConfigBuilder.setSocketTimeout(10000); // <1> + return requestConfigBuilder.setResponseTimeout(Timeout.ofMilliseconds(10000)); // <1> } }); //end::rest-client-init-request-config-callback @@ -199,13 +204,13 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-init-client-config-callback RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); builder.setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { return httpClientBuilder.setProxy( - new HttpHost("proxy", 9000, "http")); // <1> + new HttpHost("http", "proxy", 9000)); // <1> } }); //end::rest-client-init-client-config-callback @@ -244,7 +249,7 @@ public void onFailure(Exception exception) { request.addParameter("pretty", "true"); //end::rest-client-parameters //tag::rest-client-body - request.setEntity(new NStringEntity( + request.setEntity(new StringEntity( "{\"json\":\"text\"}", ContentType.APPLICATION_JSON)); //end::rest-client-body @@ -334,8 +339,8 @@ public void commonConfiguration() throws Exception { public RequestConfig.Builder customizeRequestConfig( RequestConfig.Builder requestConfigBuilder) { return requestConfigBuilder - .setConnectTimeout(5000) - .setSocketTimeout(60000); + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)); } }); //end::rest-client-config-timeouts @@ -343,8 +348,8 @@ public RequestConfig.Builder customizeRequestConfig( { //tag::rest-client-config-request-options-timeouts RequestConfig requestConfig = RequestConfig.custom() - .setConnectTimeout(5000) - .setSocketTimeout(60000) + .setConnectTimeout(Timeout.ofMilliseconds(5000)) + .setResponseTimeout(Timeout.ofMilliseconds(60000)) .build(); RequestOptions options = RequestOptions.DEFAULT.toBuilder() .setRequestConfig(requestConfig) @@ -359,7 +364,7 @@ public RequestConfig.Builder customizeRequestConfig( @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setDefaultIOReactorConfig( + return httpClientBuilder.setIOReactorConfig( IOReactorConfig.custom() .setIoThreadCount(1) .build()); @@ -369,10 +374,9 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-basic-auth - final CredentialsProvider credentialsProvider = - new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -388,10 +392,10 @@ public HttpAsyncClientBuilder customizeHttpClient( } { //tag::rest-client-config-disable-preemptive-auth - final CredentialsProvider credentialsProvider = + final BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, - new UsernamePasswordCredentials("user", "password")); + credentialsProvider.setCredentials(new AuthScope(new HttpHost("localhost", 9200)), + new UsernamePasswordCredentials("user", "password".toCharArray())); RestClientBuilder builder = RestClient.builder( new HttpHost("localhost", 9200)) @@ -418,12 +422,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(truststore, null); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-encrypted-communication @@ -444,12 +456,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadTrustMaterial(trustStore, null); final SSLContext sslContext = sslContextBuilder.build(); RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-trust-ca-pem @@ -473,12 +493,20 @@ public HttpAsyncClientBuilder customizeHttpClient( .loadKeyMaterial(keyStore, keyStorePass.toCharArray()); final SSLContext sslContext = sslBuilder.build(); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "https")) + new HttpHost("https", "localhost", 9200)) .setHttpClientConfigCallback(new HttpClientConfigCallback() { @Override public HttpAsyncClientBuilder customizeHttpClient( HttpAsyncClientBuilder httpClientBuilder) { - return httpClientBuilder.setSSLContext(sslContext); + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create() + .setSslContext(sslContext) + .build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); } }); //end::rest-client-config-mutual-tls-authentication @@ -486,7 +514,7 @@ public HttpAsyncClientBuilder customizeHttpClient( { //tag::rest-client-auth-bearer-token RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "Bearer u6iuAxZ0RG1Kcm5jVFI4eU4tZU9aVFEwT2F3")}; @@ -502,7 +530,7 @@ public HttpAsyncClientBuilder customizeHttpClient( (apiKeyId + ":" + apiKeySecret) .getBytes(StandardCharsets.UTF_8)); RestClientBuilder builder = RestClient.builder( - new HttpHost("localhost", 9200, "http")); + new HttpHost("http", "localhost", 9200)); Header[] defaultHeaders = new Header[]{new BasicHeader("Authorization", "ApiKey " + apiKeyAuth)}; diff --git a/client/sniffer/build.gradle b/client/sniffer/build.gradle index b7cb0d87c02d9..eb3306cf2cea2 100644 --- a/client/sniffer/build.gradle +++ b/client/sniffer/build.gradle @@ -38,8 +38,8 @@ archivesBaseName = 'opensearch-rest-client-sniffer' dependencies { api project(":client:rest") - api "org.apache.httpcomponents:httpclient:${versions.httpclient}" - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.client5:httpclient5:${versions.httpclient5}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" @@ -84,6 +84,7 @@ testingConventions { } thirdPartyAudit.ignoreMissingClasses( + 'org.conscrypt.Conscrypt', //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', diff --git a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 b/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 deleted file mode 100644 index 3281e21595b39..0000000000000 --- a/client/sniffer/licenses/httpclient-4.5.13.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5f6cae5ca7ecaac1ec2827a9e2d65ae2869cada \ No newline at end of file diff --git a/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 new file mode 100644 index 0000000000000..b18cf050ac8df --- /dev/null +++ b/client/sniffer/licenses/httpclient5-5.1.3.jar.sha1 @@ -0,0 +1 @@ +13c984b7b881afcff3a7f0bb95878724a48a4b66 \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 b/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 deleted file mode 100644 index 42a03b5d7a376..0000000000000 --- a/client/sniffer/licenses/httpcore-4.4.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f2e0c573eaa7a74bac2e89b359e1f73d92a0a1d \ No newline at end of file diff --git a/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 new file mode 100644 index 0000000000000..c8981fb933736 --- /dev/null +++ b/client/sniffer/licenses/httpcore5-5.1.4.jar.sha1 @@ -0,0 +1 @@ +92538a62a4aacf96c9ea8992346a453e83da85fc \ No newline at end of file diff --git a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java index c1a0fcf9a8acf..e6696c1fc4039 100644 --- a/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java +++ b/client/sniffer/src/main/java/org/opensearch/client/sniff/OpenSearchNodesSniffer.java @@ -37,8 +37,8 @@ import com.fasterxml.jackson.core.JsonToken; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.Request; @@ -192,12 +192,12 @@ private static Node readNode(String nodeId, JsonParser parser, Scheme scheme) th publishAddressAsURI = URI.create(scheme + "://" + address); host = publishAddressAsURI.getHost(); } - publishedHost = new HttpHost(host, publishAddressAsURI.getPort(), publishAddressAsURI.getScheme()); + publishedHost = new HttpHost(publishAddressAsURI.getScheme(), host, publishAddressAsURI.getPort()); } else if (parser.currentToken() == JsonToken.START_ARRAY && "bound_address".equals(parser.getCurrentName())) { while (parser.nextToken() != JsonToken.END_ARRAY) { URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); boundHosts.add( - new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme()) + new HttpHost(boundAddressAsURI.getScheme(), boundAddressAsURI.getHost(), boundAddressAsURI.getPort()) ); } } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java index cbf349e534deb..9b5e89fbeb038 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/MockNodesSniffer.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import java.util.Collections; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java index 58b60ac13dee8..fd38eceee6224 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferParseTests.java @@ -33,10 +33,11 @@ package org.opensearch.client.sniff; import com.fasterxml.jackson.core.JsonFactory; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; + +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; import org.opensearch.client.Node; import org.opensearch.client.Node.Roles; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java index 1d06e9353726d..b678fb050e8f8 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/OpenSearchNodesSnifferTests.java @@ -40,14 +40,13 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.Consts; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; import org.opensearch.client.Node; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.HttpHost; import org.junit.After; import org.junit.Before; @@ -56,6 +55,7 @@ import java.io.StringWriter; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -181,7 +181,7 @@ public void handle(HttpExchange httpExchange) throws IOException { String nodesInfoBody = sniffResponse.nodesInfoBody; httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); try (OutputStream out = httpExchange.getResponseBody()) { - out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + out.write(nodesInfoBody.getBytes(StandardCharsets.UTF_8)); return; } } @@ -210,14 +210,14 @@ private static SniffResponse buildSniffResponse(OpenSearchNodesSniffer.Scheme sc String nodeId = RandomStrings.randomAsciiOfLengthBetween(getRandom(), 5, 10); String host = "host" + i; int port = RandomNumbers.randomIntBetween(getRandom(), 9200, 9299); - HttpHost publishHost = new HttpHost(host, port, scheme.toString()); + HttpHost publishHost = new HttpHost(scheme.toString(), host, port); Set boundHosts = new HashSet<>(); boundHosts.add(publishHost); if (randomBoolean()) { int bound = between(1, 5); for (int b = 0; b < bound; b++) { - boundHosts.add(new HttpHost(host + b, port, scheme.toString())); + boundHosts.add(new HttpHost(scheme.toString(), host + b, port)); } } diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java index e4d1058282f5c..faab6babcaca6 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SniffOnFailureListenerTests.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java index 25a3162e238ed..24ee540aa6364 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferBuilderTests.java @@ -33,7 +33,8 @@ package org.opensearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java index 304243e73c078..36923281dde6b 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/SnifferTests.java @@ -32,12 +32,12 @@ package org.opensearch.client.sniff; -import org.apache.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.RestClientTestCase; import org.opensearch.client.sniff.Sniffer.DefaultScheduler; import org.opensearch.client.sniff.Sniffer.Scheduler; +import org.apache.hc.core5.http.HttpHost; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; diff --git a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java index 3b612aab80851..8f3e446d8aefb 100644 --- a/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java +++ b/client/sniffer/src/test/java/org/opensearch/client/sniff/documentation/SnifferDocumentation.java @@ -32,7 +32,7 @@ package org.opensearch.client.sniff.documentation; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.Node; import org.opensearch.client.RestClient; import org.opensearch.client.sniff.OpenSearchNodesSniffer; @@ -69,7 +69,7 @@ public void usage() throws IOException { { //tag::sniffer-init RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient).build(); //end::sniffer-init @@ -82,7 +82,7 @@ public void usage() throws IOException { { //tag::sniffer-interval RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); Sniffer sniffer = Sniffer.builder(restClient) .setSniffIntervalMillis(60000).build(); @@ -105,7 +105,7 @@ public void usage() throws IOException { { //tag::sniffer-https RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -118,7 +118,7 @@ public void usage() throws IOException { { //tag::sniff-request-timeout RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new OpenSearchNodesSniffer( restClient, @@ -131,7 +131,7 @@ public void usage() throws IOException { { //tag::custom-nodes-sniffer RestClient restClient = RestClient.builder( - new HttpHost("localhost", 9200, "http")) + new HttpHost("http", "localhost", 9200)) .build(); NodesSniffer nodesSniffer = new NodesSniffer() { @Override diff --git a/client/test/build.gradle b/client/test/build.gradle index 07d874cf01ea7..13e9bd6b9e34a 100644 --- a/client/test/build.gradle +++ b/client/test/build.gradle @@ -35,7 +35,7 @@ sourceCompatibility = JavaVersion.VERSION_11 group = "${group}.client.test" dependencies { - api "org.apache.httpcomponents:httpcore:${versions.httpcore}" + api "org.apache.httpcomponents.core5:httpcore5:${versions.httpcore5}" api "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" api "junit:junit:${versions.junit}" api "org.hamcrest:hamcrest:${versions.hamcrest}" diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java index 2b3e867929e27..b4eacdbf88827 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestCase.java @@ -43,7 +43,8 @@ import com.carrotsearch.randomizedtesting.annotations.ThreadLeakScope; import com.carrotsearch.randomizedtesting.annotations.ThreadLeakZombies; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import java.util.ArrayList; import java.util.HashMap; diff --git a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java index aeba9bde4bff4..6a01ed30e0c63 100644 --- a/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java +++ b/client/test/src/main/java/org/opensearch/client/RestClientTestUtil.java @@ -35,8 +35,9 @@ import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomStrings; -import org.apache.http.Header; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.message.BasicHeader; import java.util.ArrayList; import java.util.Arrays; diff --git a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java index 37ffe32d19509..07576dacffb03 100644 --- a/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java +++ b/distribution/archives/integ-test-zip/src/test/java/org/opensearch/test/rest/WaitForRefreshAndCloseIT.java @@ -32,13 +32,14 @@ package org.opensearch.test.rest; -import org.apache.http.util.EntityUtils; import org.opensearch.action.ActionFuture; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.client.ResponseListener; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.After; import org.junit.Before; @@ -145,6 +146,8 @@ public void onSuccess(Response response) { future.onResponse(EntityUtils.toString(response.getEntity())); } catch (IOException e) { future.onFailure(e); + } catch (ParseException e) { + future.onFailure(e); } } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java index 57865e15d523a..b0850d0b9144d 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/CommonAnalysisModulePlugin.java @@ -485,19 +485,10 @@ public List getPreConfiguredTokenFilters() { filters.add(PreConfiguredTokenFilter.singleton("dutch_stem", false, input -> new SnowballFilter(input, new DutchStemmer()))); filters.add(PreConfiguredTokenFilter.singleton("edge_ngram", false, false, input -> new EdgeNGramTokenFilter(input, 1))); filters.add(PreConfiguredTokenFilter.openSearchVersion("edgeNGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [edge_ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "edgeNGram_deprecation", - "The [edgeNGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [edge_ngram] instead." - ); - } - return new EdgeNGramTokenFilter(reader, 1); + throw new IllegalArgumentException( + "The [edgeNGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [edge_ngram] instead." + ); })); filters.add( PreConfiguredTokenFilter.singleton("elision", true, input -> new ElisionFilter(input, FrenchAnalyzer.DEFAULT_ARTICLES)) @@ -524,19 +515,10 @@ public List getPreConfiguredTokenFilters() { ); filters.add(PreConfiguredTokenFilter.singleton("ngram", false, false, reader -> new NGramTokenFilter(reader, 1, 2, false))); filters.add(PreConfiguredTokenFilter.openSearchVersion("nGram", false, false, (reader, version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " - + "Please change the filter name to [ngram] instead." - ); - } else { - deprecationLogger.deprecate( - "nGram_deprecation", - "The [nGram] token filter name is deprecated and will be removed in a future version. " - + "Please change the filter name to [ngram] instead." - ); - } - return new NGramTokenFilter(reader, 1, 2, false); + throw new IllegalArgumentException( + "The [nGram] token filter name was deprecated in 6.4 and cannot be used in new indices. " + + "Please change the filter name to [ngram] instead." + ); })); filters.add(PreConfiguredTokenFilter.singleton("persian_normalization", true, PersianNormalizationFilter::new)); filters.add(PreConfiguredTokenFilter.singleton("porter_stem", false, PorterStemFilter::new)); @@ -581,18 +563,22 @@ public List getPreConfiguredTokenFilters() { ) ) ); - filters.add(PreConfiguredTokenFilter.openSearchVersion("word_delimiter_graph", false, false, (input, version) -> { - boolean adjustOffsets = version.onOrAfter(LegacyESVersion.V_7_3_0); - return new WordDelimiterGraphFilter( - input, - adjustOffsets, - WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, - WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS - | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS - | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, - null - ); - })); + filters.add( + PreConfiguredTokenFilter.openSearchVersion( + "word_delimiter_graph", + false, + false, + (input, version) -> new WordDelimiterGraphFilter( + input, + true, + WordDelimiterIterator.DEFAULT_WORD_DELIM_TABLE, + WordDelimiterGraphFilter.GENERATE_WORD_PARTS | WordDelimiterGraphFilter.GENERATE_NUMBER_PARTS + | WordDelimiterGraphFilter.SPLIT_ON_CASE_CHANGE | WordDelimiterGraphFilter.SPLIT_ON_NUMERICS + | WordDelimiterGraphFilter.STEM_ENGLISH_POSSESSIVE, + null + ) + ) + ); return filters; } @@ -606,12 +592,12 @@ public List getPreConfiguredTokenizers() { tokenizers.add(PreConfiguredTokenizer.singleton("letter", LetterTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("whitespace", WhitespaceTokenizer::new)); tokenizers.add(PreConfiguredTokenizer.singleton("ngram", NGramTokenizer::new)); - tokenizers.add(PreConfiguredTokenizer.openSearchVersion("edge_ngram", (version) -> { - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); - })); + tokenizers.add( + PreConfiguredTokenizer.openSearchVersion( + "edge_ngram", + (version) -> new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE) + ) + ); tokenizers.add(PreConfiguredTokenizer.singleton("pattern", () -> new PatternTokenizer(Regex.compile("\\W+", null), -1))); tokenizers.add(PreConfiguredTokenizer.singleton("thai", ThaiTokenizer::new)); // TODO deprecate and remove in API @@ -637,10 +623,7 @@ public List getPreConfiguredTokenizers() { + "Please change the tokenizer name to [edge_ngram] instead." ); } - if (version.onOrAfter(LegacyESVersion.V_7_3_0)) { - return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); - } - return new EdgeNGramTokenizer(EdgeNGramTokenizer.DEFAULT_MIN_GRAM_SIZE, EdgeNGramTokenizer.DEFAULT_MAX_GRAM_SIZE); + return new EdgeNGramTokenizer(NGramTokenizer.DEFAULT_MIN_NGRAM_SIZE, NGramTokenizer.DEFAULT_MAX_NGRAM_SIZE); })); tokenizers.add(PreConfiguredTokenizer.singleton("PathHierarchy", PathHierarchyTokenizer::new)); diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java new file mode 100644 index 0000000000000..c30318a31527b --- /dev/null +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/EnglishPluralStemFilter.java @@ -0,0 +1,182 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.analysis.common; + +import org.apache.lucene.analysis.TokenFilter; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.en.EnglishMinimalStemFilter; +import org.apache.lucene.analysis.tokenattributes.CharTermAttribute; +import org.apache.lucene.analysis.tokenattributes.KeywordAttribute; + +import java.io.IOException; + +public final class EnglishPluralStemFilter extends TokenFilter { + private final EnglishPluralStemmer stemmer = new EnglishPluralStemmer(); + private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class); + private final KeywordAttribute keywordAttr = addAttribute(KeywordAttribute.class); + + public EnglishPluralStemFilter(TokenStream input) { + super(input); + } + + @Override + public boolean incrementToken() throws IOException { + if (input.incrementToken()) { + if (!keywordAttr.isKeyword()) { + final int newlen = stemmer.stem(termAtt.buffer(), termAtt.length()); + termAtt.setLength(newlen); + } + return true; + } else { + return false; + } + } + + /** + * Plural stemmer for English based on the {@link EnglishMinimalStemFilter} + *

+ * This stemmer removes plurals but beyond EnglishMinimalStemFilter adds + * four new suffix rules to remove dangling e characters: + *

    + *
  • xes - "boxes" becomes "box"
  • + *
  • sses - "dresses" becomes "dress"
  • + *
  • shes - "dishes" becomes "dish"
  • + *
  • tches - "watches" becomes "watch"
  • + *
+ * See https://github.com/elastic/elasticsearch/issues/42892 + *

+ * In addition the s stemmer logic is amended so that + *

    + *
  • ees->ee so that bees matches bee
  • + *
  • ies->y only on longer words to that ties matches tie
  • + *
  • oes->o rule so that tomatoes matches tomato but retains e for some words eg shoes to shoe
  • + *
+ */ + public static class EnglishPluralStemmer { + + // Words ending in oes that retain the e when stemmed + public static final char[][] oesExceptions = { "shoes".toCharArray(), "canoes".toCharArray(), "oboes".toCharArray() }; + // Words ending in ches that retain the e when stemmed + public static final char[][] chesExceptions = { + "cliches".toCharArray(), + "avalanches".toCharArray(), + "mustaches".toCharArray(), + "moustaches".toCharArray(), + "quiches".toCharArray(), + "headaches".toCharArray(), + "heartaches".toCharArray(), + "porsches".toCharArray(), + "tranches".toCharArray(), + "caches".toCharArray() }; + + @SuppressWarnings("fallthrough") + public int stem(char s[], int len) { + if (len < 3 || s[len - 1] != 's') return len; + + switch (s[len - 2]) { + case 'u': + case 's': + return len; + case 'e': + // Modified ies->y logic from original s-stemmer - only work on strings > 4 + // so spies -> spy still but pies->pie. + // The original code also special-cased aies and eies for no good reason as far as I can tell. + // ( no words of consequence - eg http://www.thefreedictionary.com/words-that-end-in-aies ) + if (len > 4 && s[len - 3] == 'i') { + s[len - 3] = 'y'; + return len - 2; + } + + // Suffix rules to remove any dangling "e" + if (len > 3) { + // xes (but >1 prefix so we can stem "boxes->box" but keep "axes->axe") + if (len > 4 && s[len - 3] == 'x') { + return len - 2; + } + // oes + if (len > 3 && s[len - 3] == 'o') { + if (isException(s, len, oesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + } + if (len > 4) { + // shes/sses + if (s[len - 4] == 's' && (s[len - 3] == 'h' || s[len - 3] == 's')) { + return len - 2; + } + + // ches + if (len > 4) { + if (s[len - 4] == 'c' && s[len - 3] == 'h') { + if (isException(s, len, chesExceptions)) { + // Only remove the S + return len - 1; + } + // Remove the es + return len - 2; + + } + } + } + } + + default: + return len - 1; + } + } + + private boolean isException(char[] s, int len, char[][] exceptionsList) { + for (char[] oesRule : exceptionsList) { + int rulePos = oesRule.length - 1; + int sPos = len - 1; + boolean matched = true; + while (rulePos >= 0 && sPos >= 0) { + if (oesRule[rulePos] != s[sPos]) { + matched = false; + break; + } + rulePos--; + sPos--; + } + if (matched) { + return true; + } + } + return false; + } + } + +} diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java index 218bb74b84667..a6adf680a454c 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/NGramTokenFilterFactory.java @@ -34,7 +34,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.ngram.NGramTokenFilter; -import org.opensearch.LegacyESVersion; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -54,25 +53,15 @@ public class NGramTokenFilterFactory extends AbstractTokenFilterFactory { this.maxGram = settings.getAsInt("max_gram", 2); int ngramDiff = maxGram - minGram; if (ngramDiff > maxAllowedNgramDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" - + maxAllowedNgramDiff - + "] but was [" - + ngramDiff - + "]. This limit can be set by changing the [" - + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "ngram_big_difference", - "Deprecated big difference between max_gram and min_gram in NGram Tokenizer," - + "expected difference must be less than or equal to: [" - + maxAllowedNgramDiff - + "]" - ); - } + throw new IllegalArgumentException( + "The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + + maxAllowedNgramDiff + + "] but was [" + + ngramDiff + + "]. This limit can be set by changing the [" + + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + + "] index level setting." + ); } preserveOriginal = settings.getAsBoolean(PRESERVE_ORIG_KEY, false); } diff --git a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java index 5d96f01265cf6..fc045447e159e 100644 --- a/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/opensearch/analysis/common/StemmerTokenFilterFactory.java @@ -154,6 +154,8 @@ public TokenStream create(TokenStream tokenStream) { return new SnowballFilter(tokenStream, new EnglishStemmer()); } else if ("minimal_english".equalsIgnoreCase(language) || "minimalEnglish".equalsIgnoreCase(language)) { return new EnglishMinimalStemFilter(tokenStream); + } else if ("plural_english".equalsIgnoreCase(language) || "pluralEnglish".equalsIgnoreCase(language)) { + return new EnglishPluralStemFilter(tokenStream); } else if ("possessive_english".equalsIgnoreCase(language) || "possessiveEnglish".equalsIgnoreCase(language)) { return new EnglishPossessiveFilter(tokenStream); diff --git a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java index 2cd7b74cd8c35..18d3727475065 100644 --- a/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/opensearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -111,6 +111,83 @@ public void testPorter2FilterFactory() throws IOException { } } + public void testEnglishPluralFilter() throws IOException { + int iters = scaledRandomIntBetween(20, 100); + for (int i = 0; i < iters; i++) { + + Version v = VersionUtils.randomVersion(random()); + Settings settings = Settings.builder() + .put("index.analysis.filter.my_plurals.type", "stemmer") + .put("index.analysis.filter.my_plurals.language", "plural_english") + .put("index.analysis.analyzer.my_plurals.tokenizer", "whitespace") + .put("index.analysis.analyzer.my_plurals.filter", "my_plurals") + .put(SETTING_VERSION_CREATED, v) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + OpenSearchTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(settings, PLUGIN); + TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_plurals"); + assertThat(tokenFilter, instanceOf(StemmerTokenFilterFactory.class)); + Tokenizer tokenizer = new WhitespaceTokenizer(); + tokenizer.setReader(new StringReader("dresses")); + TokenStream create = tokenFilter.create(tokenizer); + IndexAnalyzers indexAnalyzers = analysis.indexAnalyzers; + NamedAnalyzer analyzer = indexAnalyzers.get("my_plurals"); + assertThat(create, instanceOf(EnglishPluralStemFilter.class)); + + // Check old EnglishMinimalStemmer ("S" stemmer) logic + assertAnalyzesTo(analyzer, "phones", new String[] { "phone" }); + assertAnalyzesTo(analyzer, "horses", new String[] { "horse" }); + assertAnalyzesTo(analyzer, "cameras", new String[] { "camera" }); + + // The orginal s stemmer gives up on stemming oes words because English has no fixed rule for the stem + // (see https://howtospell.co.uk/making-O-words-plural ) + // This stemmer removes the es but retains e for a small number of exceptions + assertAnalyzesTo(analyzer, "mosquitoes", new String[] { "mosquito" }); + assertAnalyzesTo(analyzer, "heroes", new String[] { "hero" }); + // oes exceptions that retain the e. + assertAnalyzesTo(analyzer, "shoes", new String[] { "shoe" }); + assertAnalyzesTo(analyzer, "horseshoes", new String[] { "horseshoe" }); + assertAnalyzesTo(analyzer, "canoes", new String[] { "canoe" }); + assertAnalyzesTo(analyzer, "oboes", new String[] { "oboe" }); + + // Check improved EnglishPluralStemFilter logic + // sses + assertAnalyzesTo(analyzer, "dresses", new String[] { "dress" }); + assertAnalyzesTo(analyzer, "possess", new String[] { "possess" }); + assertAnalyzesTo(analyzer, "possesses", new String[] { "possess" }); + // xes + assertAnalyzesTo(analyzer, "boxes", new String[] { "box" }); + assertAnalyzesTo(analyzer, "axes", new String[] { "axe" }); + // shes + assertAnalyzesTo(analyzer, "dishes", new String[] { "dish" }); + assertAnalyzesTo(analyzer, "washes", new String[] { "wash" }); + // ees + assertAnalyzesTo(analyzer, "employees", new String[] { "employee" }); + assertAnalyzesTo(analyzer, "bees", new String[] { "bee" }); + // tch + assertAnalyzesTo(analyzer, "watches", new String[] { "watch" }); + assertAnalyzesTo(analyzer, "itches", new String[] { "itch" }); + // ies->y but only for length >4 + assertAnalyzesTo(analyzer, "spies", new String[] { "spy" }); + assertAnalyzesTo(analyzer, "ties", new String[] { "tie" }); + assertAnalyzesTo(analyzer, "lies", new String[] { "lie" }); + assertAnalyzesTo(analyzer, "pies", new String[] { "pie" }); + assertAnalyzesTo(analyzer, "dies", new String[] { "die" }); + + assertAnalyzesTo(analyzer, "lunches", new String[] { "lunch" }); + assertAnalyzesTo(analyzer, "avalanches", new String[] { "avalanche" }); + assertAnalyzesTo(analyzer, "headaches", new String[] { "headache" }); + assertAnalyzesTo(analyzer, "caches", new String[] { "cache" }); + assertAnalyzesTo(analyzer, "beaches", new String[] { "beach" }); + assertAnalyzesTo(analyzer, "britches", new String[] { "britch" }); + assertAnalyzesTo(analyzer, "cockroaches", new String[] { "cockroach" }); + assertAnalyzesTo(analyzer, "cliches", new String[] { "cliche" }); + assertAnalyzesTo(analyzer, "quiches", new String[] { "quiche" }); + + } + } + public void testMultipleLanguagesThrowsException() throws IOException { Version v = VersionUtils.randomVersion(random()); Settings settings = Settings.builder() diff --git a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java index 77abba7f54677..8ca1d2a0c214f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java +++ b/modules/geo/src/main/java/org/opensearch/geo/GeoModulePlugin.java @@ -35,9 +35,8 @@ import org.opensearch.geo.search.aggregations.bucket.composite.GeoTileGridValuesSourceBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsGeoShapeAggregator; @@ -78,18 +77,18 @@ public List getAggregations() { GeoHashGridAggregationBuilder.NAME, GeoHashGridAggregationBuilder::new, GeoHashGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoHashGrid::new).setAggregatorRegistrar(GeoHashGridAggregationBuilder::registerAggregators); final AggregationSpec geoTileGrid = new AggregationSpec( GeoTileGridAggregationBuilder.NAME, GeoTileGridAggregationBuilder::new, GeoTileGridAggregationBuilder.PARSER - ).addResultReader(InternalGeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); + ).addResultReader(GeoTileGrid::new).setAggregatorRegistrar(GeoTileGridAggregationBuilder::registerAggregators); return List.of(geoBounds, geoHashGrid, geoTileGrid); } /** - * Registering the {@link GeoTileGridAggregator} in the {@link CompositeAggregation}. + * Registering the geotile grid in the {@link CompositeAggregation}. * * @return a {@link List} of {@link CompositeAggregationSpec} */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java similarity index 72% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java index 9dbed7b27307a..b58c19a7186e6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGrid.java @@ -54,30 +54,30 @@ * All geo-grid hash-encoding in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGrid extends InternalMultiBucketAggregation< - InternalGeoGrid, - InternalGeoGridBucket> implements GeoGrid { +public abstract class BaseGeoGrid extends InternalMultiBucketAggregation + implements + GeoGrid { protected final int requiredSize; - protected final List buckets; + protected final List buckets; - InternalGeoGrid(String name, int requiredSize, List buckets, Map metadata) { + protected BaseGeoGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, metadata); this.requiredSize = requiredSize; this.buckets = buckets; } - abstract Writeable.Reader getBucketReader(); + protected abstract Writeable.Reader getBucketReader(); /** * Read from a stream. */ - public InternalGeoGrid(StreamInput in) throws IOException { + public BaseGeoGrid(StreamInput in) throws IOException { super(in); requiredSize = readSize(in); - buckets = (List) in.readList(getBucketReader()); + buckets = (List) in.readList(getBucketReader()); } @Override @@ -86,24 +86,24 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeList(buckets); } - abstract InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata); + protected abstract BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata); @Override - public List getBuckets() { + public List getBuckets() { return unmodifiableList(buckets); } @Override - public InternalGeoGrid reduce(List aggregations, ReduceContext reduceContext) { - LongObjectPagedHashMap> buckets = null; + public BaseGeoGrid reduce(List aggregations, ReduceContext reduceContext) { + LongObjectPagedHashMap> buckets = null; for (InternalAggregation aggregation : aggregations) { - InternalGeoGrid grid = (InternalGeoGrid) aggregation; + BaseGeoGrid grid = (BaseGeoGrid) aggregation; if (buckets == null) { buckets = new LongObjectPagedHashMap<>(grid.buckets.size(), reduceContext.bigArrays()); } for (Object obj : grid.buckets) { - InternalGeoGridBucket bucket = (InternalGeoGridBucket) obj; - List existingBuckets = buckets.get(bucket.hashAsLong()); + BaseGeoGridBucket bucket = (BaseGeoGridBucket) obj; + List existingBuckets = buckets.get(bucket.hashAsLong()); if (existingBuckets == null) { existingBuckets = new ArrayList<>(aggregations.size()); buckets.put(bucket.hashAsLong(), existingBuckets); @@ -113,13 +113,13 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } final int size = Math.toIntExact(reduceContext.isFinalReduce() == false ? buckets.size() : Math.min(requiredSize, buckets.size())); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - for (LongObjectPagedHashMap.Cursor> cursor : buckets) { - List sameCellBuckets = cursor.value; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + for (LongObjectPagedHashMap.Cursor> cursor : buckets) { + List sameCellBuckets = cursor.value; ordered.insertWithOverflow(reduceBucket(sameCellBuckets, reduceContext)); } buckets.close(); - InternalGeoGridBucket[] list = new InternalGeoGridBucket[ordered.size()]; + BaseGeoGridBucket[] list = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; i--) { list[i] = ordered.pop(); } @@ -128,11 +128,11 @@ public InternalGeoGrid reduce(List aggregations, ReduceCont } @Override - protected InternalGeoGridBucket reduceBucket(List buckets, ReduceContext context) { + protected BaseGeoGridBucket reduceBucket(List buckets, ReduceContext context) { assert buckets.size() > 0; List aggregationsList = new ArrayList<>(buckets.size()); long docCount = 0; - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { docCount += bucket.docCount; aggregationsList.add(bucket.aggregations); } @@ -140,12 +140,12 @@ protected InternalGeoGridBucket reduceBucket(List buckets return createBucket(buckets.get(0).hashAsLong, docCount, aggs); } - abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); + protected abstract B createBucket(long hashAsLong, long docCount, InternalAggregations aggregations); @Override public XContentBuilder doXContentBody(XContentBuilder builder, Params params) throws IOException { builder.startArray(CommonFields.BUCKETS.getPreferredName()); - for (InternalGeoGridBucket bucket : buckets) { + for (BaseGeoGridBucket bucket : buckets) { bucket.toXContent(builder, params); } builder.endArray(); @@ -168,7 +168,7 @@ public boolean equals(Object obj) { if (obj == null || getClass() != obj.getClass()) return false; if (super.equals(obj) == false) return false; - InternalGeoGrid other = (InternalGeoGrid) obj; + BaseGeoGrid other = (BaseGeoGrid) obj; return Objects.equals(requiredSize, other.requiredSize) && Objects.equals(buckets, other.buckets); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java similarity index 87% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java index 93fcdbd098400..f362d2b3d33d6 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BaseGeoGridBucket.java @@ -45,12 +45,12 @@ /** * Base implementation of geogrid aggs * - * @opensearch.internal + * @opensearch.api */ -public abstract class InternalGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket +public abstract class BaseGeoGridBucket extends InternalMultiBucketAggregation.InternalBucket implements GeoGrid.Bucket, - Comparable { + Comparable { protected long hashAsLong; protected long docCount; @@ -58,7 +58,7 @@ public abstract class InternalGeoGridBucket ext long bucketOrd; - public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + public BaseGeoGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { this.docCount = docCount; this.aggregations = aggregations; this.hashAsLong = hashAsLong; @@ -67,7 +67,7 @@ public InternalGeoGridBucket(long hashAsLong, long docCount, InternalAggregation /** * Read from a stream. */ - public InternalGeoGridBucket(StreamInput in) throws IOException { + public BaseGeoGridBucket(StreamInput in) throws IOException { hashAsLong = in.readLong(); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); @@ -80,7 +80,7 @@ public void writeTo(StreamOutput out) throws IOException { aggregations.writeTo(out); } - long hashAsLong() { + public long hashAsLong() { return hashAsLong; } @@ -95,7 +95,7 @@ public Aggregations getAggregations() { } @Override - public int compareTo(InternalGeoGridBucket other) { + public int compareTo(BaseGeoGridBucket other) { if (this.hashAsLong > other.hashAsLong) { return 1; } @@ -119,7 +119,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - InternalGeoGridBucket bucket = (InternalGeoGridBucket) o; + BaseGeoGridBucket bucket = (BaseGeoGridBucket) o; return hashAsLong == bucket.hashAsLong && docCount == bucket.docCount && Objects.equals(aggregations, bucket.aggregations); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java index 70d0552b3e80b..83fcdf4f66424 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/BucketPriorityQueue.java @@ -38,14 +38,14 @@ * * @opensearch.internal */ -class BucketPriorityQueue extends PriorityQueue { +class BucketPriorityQueue extends PriorityQueue { BucketPriorityQueue(int size) { super(size); } @Override - protected boolean lessThan(InternalGeoGridBucket o1, InternalGeoGridBucket o2) { + protected boolean lessThan(BaseGeoGridBucket o1, BaseGeoGridBucket o2) { int cmp = Long.compare(o2.getDocCount(), o1.getDocCount()); if (cmp == 0) { cmp = o2.compareTo(o1); diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java index d40029e9a762d..89ce288770185 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/CellIdSource.java @@ -43,7 +43,7 @@ * Wrapper class to help convert {@link MultiGeoPointValues} * to numeric long values for bucketing. * - * @opensearch.internal + * @opensearch.api */ public class CellIdSource extends ValuesSource.Numeric { private final ValuesSource.GeoPoint valuesSource; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java index 4ae888640efc8..b2fe6e33ef95c 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGrid.java @@ -39,13 +39,13 @@ * A geo-grid aggregation. Defines multiple buckets, each representing a cell in a geo-grid of a specific * precision. * - * @opensearch.internal + * @opensearch.api */ public interface GeoGrid extends MultiBucketsAggregation { /** * A bucket that is associated with a geo-grid cell. The key of the bucket is - * the {@link InternalGeoGridBucket#getKeyAsString()} of the cell + * the {@link BaseGeoGridBucket#getKeyAsString()} of the cell */ interface Bucket extends MultiBucketsAggregation.Bucket {} diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 4a904b3aa2b16..0ca2a28844f99 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -58,9 +58,9 @@ import java.util.function.Function; /** - * Base Aggregation Builder for geohash_grid and geotile_grid aggs + * Base Aggregation Builder for geogrid aggs * - * @opensearch.internal + * @opensearch.api */ public abstract class GeoGridAggregationBuilder extends ValuesSourceAggregationBuilder { /* recognized field names in JSON */ diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java index 909772c61a960..db07ac8f947e5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregator.java @@ -55,16 +55,16 @@ /** * Aggregates data expressed as longs (for efficiency's sake) but formats results as aggregation-specific strings. * - * @opensearch.internal + * @opensearch.api */ -public abstract class GeoGridAggregator extends BucketsAggregator { +public abstract class GeoGridAggregator extends BucketsAggregator { protected final int requiredSize; protected final int shardSize; protected final ValuesSource.Numeric valuesSource; protected final LongKeyedBucketOrds bucketOrds; - GeoGridAggregator( + protected GeoGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -118,23 +118,23 @@ public void collect(int doc, long owningBucketOrd) throws IOException { }; } - abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); + protected abstract T buildAggregation(String name, int requiredSize, List buckets, Map metadata); /** * This method is used to return a re-usable instance of the bucket when building * the aggregation. - * @return a new {@link InternalGeoGridBucket} implementation with empty parameters + * @return a new {@link BaseGeoGridBucket} implementation with empty parameters */ - abstract InternalGeoGridBucket newEmptyBucket(); + protected abstract BaseGeoGridBucket newEmptyBucket(); @Override public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws IOException { - InternalGeoGridBucket[][] topBucketsPerOrd = new InternalGeoGridBucket[owningBucketOrds.length][]; + BaseGeoGridBucket[][] topBucketsPerOrd = new BaseGeoGridBucket[owningBucketOrds.length][]; for (int ordIdx = 0; ordIdx < owningBucketOrds.length; ordIdx++) { int size = (int) Math.min(bucketOrds.bucketsInOrd(owningBucketOrds[ordIdx]), shardSize); - BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); - InternalGeoGridBucket spare = null; + BucketPriorityQueue ordered = new BucketPriorityQueue<>(size); + BaseGeoGridBucket spare = null; LongKeyedBucketOrds.BucketOrdsEnum ordsEnum = bucketOrds.ordsEnum(owningBucketOrds[ordIdx]); while (ordsEnum.next()) { if (spare == null) { @@ -149,7 +149,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I spare = ordered.insertWithOverflow(spare); } - topBucketsPerOrd[ordIdx] = new InternalGeoGridBucket[ordered.size()]; + topBucketsPerOrd[ordIdx] = new BaseGeoGridBucket[ordered.size()]; for (int i = ordered.size() - 1; i >= 0; --i) { topBucketsPerOrd[ordIdx][i] = ordered.pop(); } @@ -163,7 +163,7 @@ public InternalAggregation[] buildAggregations(long[] owningBucketOrds) throws I } @Override - public InternalGeoGrid buildEmptyAggregation() { + public BaseGeoGrid buildEmptyAggregation() { return buildAggregation(name, requiredSize, Collections.emptyList(), metadata()); } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java index ff1247300939a..aa1d5504ad24f 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoHashGrid extends InternalGeoGrid { +public class GeoHashGrid extends BaseGeoGrid { - InternalGeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoHashGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoHashGrid(StreamInput in) throws IOException { + public GeoHashGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoHashGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoHashGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoHashGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoHashGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index bbaf9613fb216..760d7d643c0a5 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geohash_grid * - * @opensearch.internal + * @opensearch.api */ public class GeoHashGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geohash_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java index 6ca7a4d8a9cb8..9ff9fe7d8f9ba 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregator.java @@ -47,9 +47,9 @@ * * @opensearch.internal */ -public class GeoHashGridAggregator extends GeoGridAggregator { +class GeoHashGridAggregator extends GeoGridAggregator { - public GeoHashGridAggregator( + GeoHashGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -64,16 +64,17 @@ public GeoHashGridAggregator( } @Override - InternalGeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoHashGrid(name, requiredSize, buckets, metadata); + protected GeoHashGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoHashGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoHashGrid buildEmptyAggregation() { - return new InternalGeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoHashGrid buildEmptyAggregation() { + return new GeoHashGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoHashGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java index 1914c07e831f7..898a7d82a4dec 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridAggregatorFactory.java @@ -58,7 +58,7 @@ * * @opensearch.internal */ -public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -86,7 +86,7 @@ public class GeoHashGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoHashGrid(name, requiredSize, emptyList(), metadata); + final InternalAggregation aggregation = new GeoHashGrid(name, requiredSize, emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java similarity index 70% rename from modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java rename to modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java index fa544b5893f0c..91c523c80855e 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGrid.java @@ -43,40 +43,40 @@ * All geohashes in a grid are of the same precision and held internally as a single long * for efficiency's sake. * - * @opensearch.internal + * @opensearch.api */ -public class InternalGeoTileGrid extends InternalGeoGrid { +public class GeoTileGrid extends BaseGeoGrid { - InternalGeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { + GeoTileGrid(String name, int requiredSize, List buckets, Map metadata) { super(name, requiredSize, buckets, metadata); } - public InternalGeoTileGrid(StreamInput in) throws IOException { + public GeoTileGrid(StreamInput in) throws IOException { super(in); } @Override - public InternalGeoGrid create(List buckets) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + public BaseGeoGrid create(List buckets) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoGridBucket createBucket(InternalAggregations aggregations, InternalGeoGridBucket prototype) { + public BaseGeoGridBucket createBucket(InternalAggregations aggregations, BaseGeoGridBucket prototype) { return new InternalGeoTileGridBucket(prototype.hashAsLong, prototype.docCount, aggregations); } @Override - InternalGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected BaseGeoGrid create(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { + protected InternalGeoTileGridBucket createBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { return new InternalGeoTileGridBucket(hashAsLong, docCount, aggregations); } @Override - Reader getBucketReader() { + protected Reader getBucketReader() { return InternalGeoTileGridBucket::new; } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 76ad515f34fe5..0f1f87bdc57fa 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -51,7 +51,7 @@ /** * Aggregation Builder for geotile_grid agg * - * @opensearch.internal + * @opensearch.api */ public class GeoTileGridAggregationBuilder extends GeoGridAggregationBuilder { public static final String NAME = "geotile_grid"; diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java index a205a9afde41e..8faed4e9cd2d4 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregator.java @@ -48,9 +48,9 @@ * * @opensearch.internal */ -public class GeoTileGridAggregator extends GeoGridAggregator { +class GeoTileGridAggregator extends GeoGridAggregator { - public GeoTileGridAggregator( + GeoTileGridAggregator( String name, AggregatorFactories factories, ValuesSource.Numeric valuesSource, @@ -65,16 +65,17 @@ public GeoTileGridAggregator( } @Override - InternalGeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { - return new InternalGeoTileGrid(name, requiredSize, buckets, metadata); + protected GeoTileGrid buildAggregation(String name, int requiredSize, List buckets, Map metadata) { + return new GeoTileGrid(name, requiredSize, buckets, metadata); } @Override - public InternalGeoTileGrid buildEmptyAggregation() { - return new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); + public GeoTileGrid buildEmptyAggregation() { + return new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata()); } - InternalGeoGridBucket newEmptyBucket() { + @Override + protected BaseGeoGridBucket newEmptyBucket() { return new InternalGeoTileGridBucket(0, 0, null); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java index b830988a3d410..6eb73727ad6c8 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridAggregatorFactory.java @@ -57,7 +57,7 @@ * * @opensearch.internal */ -public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { +class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory { private final int precision; private final int requiredSize; @@ -85,7 +85,7 @@ public class GeoTileGridAggregatorFactory extends ValuesSourceAggregatorFactory @Override protected Aggregator createUnmapped(SearchContext searchContext, Aggregator parent, Map metadata) throws IOException { - final InternalAggregation aggregation = new InternalGeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); + final InternalAggregation aggregation = new GeoTileGrid(name, requiredSize, Collections.emptyList(), metadata); return new NonCollectingAggregator(name, searchContext, parent, factories, metadata) { @Override public InternalAggregation buildEmptyAggregation() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java index 659909e868651..6e7ed8a679681 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/InternalGeoHashGridBucket.java @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoHashGridBucket extends BaseGeoGridBucket { InternalGeoHashGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -51,7 +51,7 @@ public class InternalGeoHashGridBucket extends InternalGeoGridBucket { +class InternalGeoTileGridBucket extends BaseGeoGridBucket { InternalGeoTileGridBucket(long hashAsLong, long docCount, InternalAggregations aggregations) { super(hashAsLong, docCount, aggregations); } @@ -52,7 +52,7 @@ public class InternalGeoTileGridBucket extends InternalGeoGridBucket implements GeoGrid { @@ -63,7 +63,7 @@ public static ObjectParser createParser( return parser; } - protected void setName(String name) { + public void setName(String name) { super.setName(name); } } diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java index 80124cda50b19..cbe3a2ee89dd7 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoGridBucket.java @@ -40,7 +40,7 @@ /** * A single geo grid bucket result parsed between nodes * - * @opensearch.internal + * @opensearch.api */ public abstract class ParsedGeoGridBucket extends ParsedMultiBucketAggregation.ParsedBucket implements GeoGrid.Bucket { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java index 109524e755c4d..343149f8e19ab 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGrid extends ParsedGeoGrid { +class ParsedGeoHashGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoHashGrid::new, diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java index 4e6e454b08324..6704273f45580 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoHashGridBucket.java @@ -41,7 +41,7 @@ * * @opensearch.internal */ -public class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { +class ParsedGeoHashGridBucket extends ParsedGeoGridBucket { @Override public GeoPoint getKey() { diff --git a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java index 8734c96a15578..cb64a0e153e87 100644 --- a/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java +++ b/modules/geo/src/main/java/org/opensearch/geo/search/aggregations/bucket/geogrid/ParsedGeoTileGrid.java @@ -42,7 +42,7 @@ * * @opensearch.internal */ -public class ParsedGeoTileGrid extends ParsedGeoGrid { +class ParsedGeoTileGrid extends ParsedGeoGrid { private static final ObjectParser PARSER = createParser( ParsedGeoTileGrid::new, diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java index 3c7c292f9d193..bc7fde8d66d0a 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/composite/GeoTileGridAggregationCompositeAggregatorTests.java @@ -17,7 +17,6 @@ import org.opensearch.common.geo.GeoPoint; import org.opensearch.geo.GeoModulePlugin; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregator; import org.opensearch.index.mapper.GeoPointFieldMapper; import org.opensearch.plugins.SearchPlugin; import org.opensearch.search.aggregations.bucket.composite.CompositeAggregationBuilder; @@ -31,7 +30,7 @@ import java.util.Map; /** - * Testing the {@link GeoTileGridAggregator} as part of CompositeAggregation. + * Testing the geo tile grid as part of CompositeAggregation. */ public class GeoTileGridAggregationCompositeAggregatorTests extends BaseCompositeAggregatorTestCase { diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java index d6153637f656d..5ec10a7f4f7cf 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridAggregatorTestCase.java @@ -73,7 +73,7 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { +public abstract class GeoGridAggregatorTestCase extends AggregatorTestCase { private static final String FIELD_NAME = "location"; protected static final double GEOHASH_TOLERANCE = 1E-5D; @@ -201,9 +201,9 @@ public void testAsSubAgg() throws IOException { Consumer verify = (terms) -> { Map> actual = new TreeMap<>(); for (StringTerms.Bucket tb : terms.getBuckets()) { - InternalGeoGrid gg = tb.getAggregations().get("gg"); + BaseGeoGrid gg = tb.getAggregations().get("gg"); Map sub = new TreeMap<>(); - for (InternalGeoGridBucket ggb : gg.getBuckets()) { + for (BaseGeoGridBucket ggb : gg.getBuckets()) { sub.put(ggb.getKeyAsString(), ggb.getDocCount()); } actual.put(tb.getKeyAsString(), sub); @@ -299,7 +299,7 @@ private void testCase( String field, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex ) throws IOException { testCase(query, precision, geoBoundingBox, verify, buildIndex, createBuilder("_name").field(field)); @@ -309,7 +309,7 @@ private void testCase( Query query, int precision, GeoBoundingBox geoBoundingBox, - Consumer> verify, + Consumer> verify, CheckedConsumer buildIndex, GeoGridAggregationBuilder aggregationBuilder ) throws IOException { @@ -333,7 +333,7 @@ private void testCase( aggregator.preCollection(); indexSearcher.search(query, aggregator); aggregator.postCollection(); - verify.accept((InternalGeoGrid) aggregator.buildTopLevel()); + verify.accept((BaseGeoGrid) aggregator.buildTopLevel()); indexReader.close(); directory.close(); diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java index 432736a2b43fe..2a655239997b6 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoGridTestCase.java @@ -50,16 +50,16 @@ import static org.hamcrest.Matchers.equalTo; -public abstract class GeoGridTestCase> extends - InternalMultiBucketAggregationTestCase { +public abstract class GeoGridTestCase> extends InternalMultiBucketAggregationTestCase< + T> { /** - * Instantiate a {@link InternalGeoGrid}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGrid}-derived class using the same parameters as constructor. */ - protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); + protected abstract T createInternalGeoGrid(String name, int size, List buckets, Map metadata); /** - * Instantiate a {@link InternalGeoGridBucket}-derived class using the same parameters as constructor. + * Instantiate a {@link BaseGeoGridBucket}-derived class using the same parameters as constructor. */ protected abstract B createInternalGeoGridBucket(Long key, long docCount, InternalAggregations aggregations); @@ -117,7 +117,7 @@ protected List getNamedXContents() { protected T createTestInstance(String name, Map metadata, InternalAggregations aggregations) { final int precision = randomPrecision(); int size = randomNumberOfBuckets(); - List buckets = new ArrayList<>(size); + List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { double latitude = randomDoubleBetween(-90.0, 90.0, false); double longitude = randomDoubleBetween(-180.0, 180.0, false); @@ -176,7 +176,7 @@ protected Class implementationClass() { protected T mutateInstance(T instance) { String name = instance.getName(); int size = instance.getRequiredSize(); - List buckets = instance.getBuckets(); + List buckets = instance.getBuckets(); Map metadata = instance.getMetadata(); switch (between(0, 3)) { case 0: @@ -206,7 +206,7 @@ protected T mutateInstance(T instance) { } public void testCreateFromBuckets() { - InternalGeoGrid original = createTestInstance(); + BaseGeoGrid original = createTestInstance(); assertThat(original, equalTo(original.create(original.buckets))); } } diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java index c84c6ef5ec076..ada943b6dd369 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoHashGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoHashGridTests extends GeoGridTestCase { +public class GeoHashGridTests extends GeoGridTestCase { @Override - protected InternalGeoHashGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoHashGrid(name, size, buckets, metadata); + protected GeoHashGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoHashGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java index ead67e0455d94..b59e9ec2cff53 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java +++ b/modules/geo/src/test/java/org/opensearch/geo/search/aggregations/bucket/geogrid/GeoTileGridTests.java @@ -37,16 +37,11 @@ import java.util.List; import java.util.Map; -public class GeoTileGridTests extends GeoGridTestCase { +public class GeoTileGridTests extends GeoGridTestCase { @Override - protected InternalGeoTileGrid createInternalGeoGrid( - String name, - int size, - List buckets, - Map metadata - ) { - return new InternalGeoTileGrid(name, size, buckets, metadata); + protected GeoTileGrid createInternalGeoGrid(String name, int size, List buckets, Map metadata) { + return new GeoTileGrid(name, size, buckets, metadata); } @Override diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java index c0d7e51047c6b..706c73e7416f5 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationBuilders.java @@ -10,8 +10,8 @@ import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoHashGrid; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoTileGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoHashGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.GeoTileGrid; import org.opensearch.geo.search.aggregations.metrics.GeoBounds; import org.opensearch.geo.search.aggregations.metrics.GeoBoundsAggregationBuilder; @@ -24,14 +24,14 @@ public static GeoBoundsAggregationBuilder geoBounds(String name) { } /** - * Create a new {@link InternalGeoHashGrid} aggregation with the given name. + * Create a new {@link GeoHashGrid} aggregation with the given name. */ public static GeoHashGridAggregationBuilder geohashGrid(String name) { return new GeoHashGridAggregationBuilder(name); } /** - * Create a new {@link InternalGeoTileGrid} aggregation with the given name. + * Create a new {@link GeoTileGrid} aggregation with the given name. */ public static GeoTileGridAggregationBuilder geotileGrid(String name) { return new GeoTileGridAggregationBuilder(name); diff --git a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java index 3473cf2d94b76..89debdf5abd95 100644 --- a/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java +++ b/modules/geo/src/test/java/org/opensearch/geo/tests/common/AggregationInspectionHelper.java @@ -8,7 +8,7 @@ package org.opensearch.geo.tests.common; -import org.opensearch.geo.search.aggregations.bucket.geogrid.InternalGeoGrid; +import org.opensearch.geo.search.aggregations.bucket.geogrid.BaseGeoGrid; import org.opensearch.geo.search.aggregations.metrics.InternalGeoBounds; public class AggregationInspectionHelper { @@ -17,7 +17,7 @@ public static boolean hasValue(InternalGeoBounds agg) { return (agg.topLeft() == null && agg.bottomRight() == null) == false; } - public static boolean hasValue(InternalGeoGrid agg) { + public static boolean hasValue(BaseGeoGrid agg) { return agg.getBuckets().stream().anyMatch(bucket -> bucket.getDocCount() > 0); } } diff --git a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java index 1802d03e20942..7c2c403fdd487 100644 --- a/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/opensearch/script/mustache/MultiSearchTemplateResponse.java @@ -32,7 +32,6 @@ package org.opensearch.script.mustache; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.search.MultiSearchResponse; @@ -125,11 +124,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = -1L; - } + tookInMillis = in.readVLong(); } MultiSearchTemplateResponse(Item[] items, long tookInMillis) { @@ -159,9 +154,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java index 2584a9b41f14d..10ee9393b343f 100644 --- a/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java +++ b/modules/opensearch-dashboards/src/javaRestTest/java/org/opensearch/dashboards/OpenSearchDashboardsSystemIndexIT.java @@ -34,7 +34,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.opensearch.OpenSearchParseException; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.xcontent.XContentHelper; @@ -73,7 +76,7 @@ public void testCreateIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testAliases() throws IOException { + public void testAliases() throws IOException, ParseException { assumeFalse("In this test, .opensearch_dashboards is the alias name", ".opensearch_dashboards".equals(indexName)); Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); @@ -96,7 +99,7 @@ public void testBulkToOpenSearchDashboardsIndex() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testRefresh() throws IOException { + public void testRefresh() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); Response response = client().performRequest(request); @@ -114,7 +117,7 @@ public void testRefresh() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testGetFromOpenSearchDashboardsIndex() throws IOException { + public void testGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity("{ \"index\" : { \"_index\" : \"" + indexName + "\", \"_id\" : \"1\" } }\n{ \"foo\" : \"bar\" }\n"); request.addParameter("refresh", "true"); @@ -130,7 +133,7 @@ public void testGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("bar")); } - public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { + public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -163,7 +166,7 @@ public void testMultiGetFromOpenSearchDashboardsIndex() throws IOException { assertThat(responseBody, containsString("tag")); } - public void testSearchFromOpenSearchDashboardsIndex() throws IOException { + public void testSearchFromOpenSearchDashboardsIndex() throws IOException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" @@ -241,7 +244,7 @@ public void testUpdateIndexSettings() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testGetIndex() throws IOException { + public void testGetIndex() throws IOException, ParseException { Request request = new Request("PUT", "/_opensearch_dashboards/" + indexName); Response response = client().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); @@ -278,7 +281,7 @@ public void testIndexingAndUpdatingDocs() throws IOException { assertThat(response.getStatusLine().getStatusCode(), is(200)); } - public void testScrollingDocs() throws IOException { + public void testScrollingDocs() throws IOException, OpenSearchParseException, ParseException { Request request = new Request("POST", "/_opensearch_dashboards/_bulk"); request.setJsonEntity( "{ \"index\" : { \"_index\" : \"" diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java index 43adffc6f7671..bb1a9d190313f 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/AbstractAsyncBulkByScrollAction.java @@ -33,7 +33,8 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.action.ActionListener; diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java index 34fcd245289be..f8e9018bce6df 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/ReindexSslConfig.java @@ -32,10 +32,10 @@ package org.opensearch.index.reindex; -import org.apache.http.conn.ssl.DefaultHostnameVerifier; -import org.apache.http.conn.ssl.NoopHostnameVerifier; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.opensearch.common.Strings; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.client5.http.ssl.DefaultHostnameVerifier; +import org.apache.hc.client5.http.ssl.NoopHostnameVerifier; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; import org.opensearch.common.settings.SecureSetting; import org.opensearch.common.settings.SecureString; import org.opensearch.common.settings.Setting; @@ -161,16 +161,24 @@ private void reload() { } /** - * Encapsulate the loaded SSL configuration as a HTTP-client {@link SSLIOSessionStrategy}. + * Encapsulate the loaded SSL configuration as a HTTP-client {@link TlsStrategy}. * The returned strategy is immutable, but successive calls will return different objects that may have different * configurations if the underlying key/certificate files are modified. */ - SSLIOSessionStrategy getStrategy() { + TlsStrategy getStrategy() { final HostnameVerifier hostnameVerifier = configuration.getVerificationMode().isHostnameVerificationEnabled() ? new DefaultHostnameVerifier() : new NoopHostnameVerifier(); - final String[] protocols = configuration.getSupportedProtocols().toArray(Strings.EMPTY_ARRAY); - final String[] cipherSuites = configuration.getCipherSuites().toArray(Strings.EMPTY_ARRAY); - return new SSLIOSessionStrategy(context, protocols, cipherSuites, hostnameVerifier); + + final String[] protocols = configuration.getSupportedProtocols().toArray(new String[0]); + final String[] cipherSuites = configuration.getCipherSuites().toArray(new String[0]); + + return ClientTlsStrategyBuilder.create() + .setSslContext(context) + .setHostnameVerifier(hostnameVerifier) + .setCiphers(cipherSuites) + .setTlsVersions(protocols) + .build(); + } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java index 8ade055d10f60..aa9accbd90e21 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/Reindexer.java @@ -33,15 +33,18 @@ package org.opensearch.index.reindex; import java.util.Optional; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpRequestInterceptor; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.apache.http.impl.nio.reactor.IOReactorConfig; -import org.apache.http.message.BasicHeader; + +import org.apache.hc.client5.http.auth.AuthScope; +import org.apache.hc.client5.http.auth.UsernamePasswordCredentials; +import org.apache.hc.client5.http.impl.auth.BasicCredentialsProvider; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpRequestInterceptor; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.reactor.IOReactorConfig; +import org.apache.hc.core5.util.Timeout; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.action.ActionListener; @@ -202,21 +205,23 @@ static RestClient buildRestClient( for (Map.Entry header : remoteInfo.getHeaders().entrySet()) { clientHeaders[i++] = new BasicHeader(header.getKey(), header.getValue()); } - final RestClientBuilder builder = RestClient.builder( - new HttpHost(remoteInfo.getHost(), remoteInfo.getPort(), remoteInfo.getScheme()) - ).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { - c.setConnectTimeout(Math.toIntExact(remoteInfo.getConnectTimeout().millis())); - c.setSocketTimeout(Math.toIntExact(remoteInfo.getSocketTimeout().millis())); + final HttpHost httpHost = new HttpHost(remoteInfo.getScheme(), remoteInfo.getHost(), remoteInfo.getPort()); + final RestClientBuilder builder = RestClient.builder(httpHost).setDefaultHeaders(clientHeaders).setRequestConfigCallback(c -> { + c.setConnectTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getConnectTimeout().millis()))); + c.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(remoteInfo.getSocketTimeout().millis()))); return c; }).setHttpClientConfigCallback(c -> { // Enable basic auth if it is configured if (remoteInfo.getUsername() != null) { - UsernamePasswordCredentials creds = new UsernamePasswordCredentials(remoteInfo.getUsername(), remoteInfo.getPassword()); - CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials(AuthScope.ANY, creds); + UsernamePasswordCredentials creds = new UsernamePasswordCredentials( + remoteInfo.getUsername(), + remoteInfo.getPassword().toCharArray() + ); + BasicCredentialsProvider credentialsProvider = new BasicCredentialsProvider(); + credentialsProvider.setCredentials(new AuthScope(httpHost, null, "Basic"), creds); c.setDefaultCredentialsProvider(credentialsProvider); } else { - restInterceptor.ifPresent(interceptor -> c.addInterceptorLast(interceptor)); + restInterceptor.ifPresent(interceptor -> c.addRequestInterceptorLast(interceptor)); } // Stick the task id in the thread name so we can track down tasks from stack traces AtomicInteger threads = new AtomicInteger(); @@ -227,8 +232,13 @@ static RestClient buildRestClient( return t; }); // Limit ourselves to one reactor thread because for now the search process is single threaded. - c.setDefaultIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); - c.setSSLStrategy(sslConfig.getStrategy()); + c.setIOReactorConfig(IOReactorConfig.custom().setIoThreadCount(1).build()); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(sslConfig.getStrategy()) + .build(); + + c.setConnectionManager(connectionManager); return c; }); if (Strings.hasLength(remoteInfo.getPathPrefix()) && "/".equals(remoteInfo.getPathPrefix()) == false) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java index 8467fbdeacd0e..873bd7c3b48cb 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteRequestBuilders.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; @@ -240,7 +240,7 @@ static Request scroll(String scroll, TimeValue keepAlive, Version remoteVersion) if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } @@ -258,7 +258,7 @@ static Request clearScroll(String scroll, Version remoteVersion) { if (remoteVersion.before(Version.fromId(2000099))) { // Versions before 2.0.0 extract the plain scroll_id from the body - request.setEntity(new NStringEntity(scroll, ContentType.TEXT_PLAIN)); + request.setEntity(new StringEntity(scroll, ContentType.TEXT_PLAIN)); return request; } try (XContentBuilder entity = JsonXContent.contentBuilder()) { diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java index be691243ecf84..3a943450a1a89 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSource.java @@ -32,10 +32,11 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.util.Supplier; @@ -199,7 +200,7 @@ public void onSuccess(org.opensearch.client.Response response) { InputStream content = responseEntity.getContent(); XContentType xContentType = null; if (responseEntity.getContentType() != null) { - final String mimeType = ContentType.parse(responseEntity.getContentType().getValue()).getMimeType(); + final String mimeType = ContentType.parse(responseEntity.getContentType()).getMimeType(); xContentType = XContentType.fromMediaType(mimeType); } if (xContentType == null) { @@ -284,7 +285,11 @@ private static String bodyMessage(@Nullable HttpEntity entity) throws IOExceptio if (entity == null) { return "No error body."; } else { - return "body=" + EntityUtils.toString(entity); + try { + return "body=" + EntityUtils.toString(entity); + } catch (final ParseException ex) { + throw new IOException(ex); + } } } } diff --git a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java index 034981c969b4b..0646c9b5d8705 100644 --- a/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java +++ b/modules/reindex/src/main/java/org/opensearch/index/reindex/spi/ReindexRestInterceptorProvider.java @@ -6,7 +6,8 @@ package org.opensearch.index.reindex.spi; import java.util.Optional; -import org.apache.http.HttpRequestInterceptor; + +import org.apache.hc.core5.http.HttpRequestInterceptor; import org.opensearch.common.util.concurrent.ThreadContext; import org.opensearch.index.reindex.ReindexRequest; diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java index c349bc54bcbd9..e7af54a0563d3 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteRequestBuildersTests.java @@ -32,8 +32,8 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.action.search.SearchRequest; import org.opensearch.client.Request; @@ -245,7 +245,7 @@ public void testInitialSearchEntity() throws IOException { searchRequest.source(new SearchSourceBuilder()); String query = "{\"match_all\":{}}"; HttpEntity entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); if (remoteVersion.onOrAfter(Version.fromId(1000099))) { assertEquals( "{\"query\":" + query + ",\"_source\":true}", @@ -261,7 +261,7 @@ public void testInitialSearchEntity() throws IOException { // Source filtering is included if set up searchRequest.source().fetchSource(new String[] { "in1", "in2" }, new String[] { "out" }); entity = initialSearch(searchRequest, new BytesArray(query), remoteVersion).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertEquals( "{\"query\":" + query + ",\"_source\":{\"includes\":[\"in1\",\"in2\"],\"excludes\":[\"out\"]}}", Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)) @@ -287,7 +287,7 @@ public void testScrollParams() { public void testScrollEntity() throws IOException { String scroll = randomAlphaOfLength(30); HttpEntity entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromString("5.0.0")).getEntity(); - assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), entity.getContentType()); assertThat( Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -295,14 +295,14 @@ public void testScrollEntity() throws IOException { // Test with version < 2.0.0 entity = scroll(scroll, timeValueMillis(between(1, 1000)), Version.fromId(1070499)).getEntity(); - assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), entity.getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))); } public void testClearScroll() throws IOException { String scroll = randomAlphaOfLength(30); Request request = clearScroll(scroll, Version.fromString("5.0.0")); - assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.APPLICATION_JSON.toString(), request.getEntity().getContentType()); assertThat( Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)), containsString("\"" + scroll + "\"") @@ -311,7 +311,7 @@ public void testClearScroll() throws IOException { // Test with version < 2.0.0 request = clearScroll(scroll, Version.fromId(1070499)); - assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType().getValue()); + assertEquals(ContentType.TEXT_PLAIN.toString(), request.getEntity().getContentType()); assertEquals(scroll, Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8))); assertThat(request.getParameters().keySet(), empty()); } diff --git a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java index 337bc67796f8e..c0e2bd14f55bc 100644 --- a/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/opensearch/index/reindex/remote/RemoteScrollableHitSourceTests.java @@ -32,31 +32,14 @@ package org.opensearch.index.reindex.remote; -import org.apache.http.ContentTooLongException; -import org.apache.http.HttpEntity; -import org.apache.http.HttpEntityEnclosingRequest; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.ProtocolVersion; -import org.apache.http.StatusLine; -import org.apache.http.client.protocol.HttpClientContext; -import org.apache.http.concurrent.FutureCallback; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.InputStreamEntity; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.nio.client.CloseableHttpAsyncClient; -import org.apache.http.impl.nio.client.HttpAsyncClientBuilder; -import org.apache.http.message.BasicHttpResponse; -import org.apache.http.message.BasicStatusLine; -import org.apache.http.nio.protocol.HttpAsyncRequestProducer; -import org.apache.http.nio.protocol.HttpAsyncResponseConsumer; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchStatusException; import org.opensearch.Version; import org.opensearch.action.bulk.BackoffPolicy; import org.opensearch.action.search.SearchRequest; -import org.opensearch.client.HeapBufferedAsyncResponseConsumer; import org.opensearch.client.RestClient; +import org.opensearch.client.http.HttpUriRequestProducer; +import org.opensearch.client.nio.HeapBufferedAsyncResponseConsumer; import org.opensearch.common.ParsingException; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.io.FileSystemUtils; @@ -74,13 +57,32 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; +import org.apache.hc.client5.http.impl.async.HttpAsyncClientBuilder; +import org.apache.hc.core5.concurrent.FutureCallback; +import org.apache.hc.core5.function.Supplier; +import org.apache.hc.core5.http.ClassicHttpRequest; +import org.apache.hc.core5.http.ClassicHttpResponse; +import org.apache.hc.core5.http.ContentTooLongException; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.io.entity.InputStreamEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; +import org.apache.hc.core5.http.message.BasicClassicHttpResponse; +import org.apache.hc.core5.http.nio.AsyncPushConsumer; +import org.apache.hc.core5.http.nio.AsyncRequestProducer; +import org.apache.hc.core5.http.nio.AsyncResponseConsumer; +import org.apache.hc.core5.http.nio.HandlerFactory; +import org.apache.hc.core5.http.protocol.HttpContext; +import org.apache.hc.core5.io.CloseMode; +import org.apache.hc.core5.reactor.IOReactorStatus; import org.junit.After; import org.junit.Before; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; import java.io.IOException; import java.io.InputStreamReader; +import java.io.UncheckedIOException; import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.Queue; @@ -97,7 +99,6 @@ import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; -import static org.mockito.Mockito.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -444,24 +445,49 @@ public void testWrapExceptionToPreserveStatus() throws IOException { @SuppressWarnings({ "unchecked", "rawtypes" }) public void testTooLargeResponse() throws Exception { ContentTooLongException tooLong = new ContentTooLongException("too long!"); - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).then(new Answer>() { + CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { + + @Override + public void close() throws IOException {} + + @Override + public void close(CloseMode closeMode) {} + + @Override + public void start() {} + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + @Override + public void initiateShutdown() {} + @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - HeapBufferedAsyncResponseConsumer consumer = (HeapBufferedAsyncResponseConsumer) invocationOnMock.getArguments()[1]; - FutureCallback callback = (FutureCallback) invocationOnMock.getArguments()[3]; - assertEquals(new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), consumer.getBufferLimit()); + public IOReactorStatus getStatus() { + return null; + } + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + assertEquals( + new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt(), + ((HeapBufferedAsyncResponseConsumer) responseConsumer).getBufferLimit() + ); callback.failed(tooLong); return null; } - }); + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + }; + RemoteScrollableHitSource source = sourceWithMockedClient(true, httpClient); Throwable e = expectThrows(RuntimeException.class, source::start); @@ -539,46 +565,68 @@ private RemoteScrollableHitSource sourceWithMockedRemoteCall(boolean mockRemoteV } } - CloseableHttpAsyncClient httpClient = mock(CloseableHttpAsyncClient.class); - when( - httpClient.execute( - any(HttpAsyncRequestProducer.class), - any(HttpAsyncResponseConsumer.class), - any(HttpClientContext.class), - any(FutureCallback.class) - ) - ).thenAnswer(new Answer>() { - + final CloseableHttpAsyncClient httpClient = new CloseableHttpAsyncClient() { int responseCount = 0; @Override - public Future answer(InvocationOnMock invocationOnMock) throws Throwable { - // Throw away the current thread context to simulate running async httpclient's thread pool - threadPool.getThreadContext().stashContext(); - HttpAsyncRequestProducer requestProducer = (HttpAsyncRequestProducer) invocationOnMock.getArguments()[0]; - FutureCallback futureCallback = (FutureCallback) invocationOnMock.getArguments()[3]; - HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestProducer.generateRequest(); - URL resource = resources[responseCount]; - String path = paths[responseCount++]; - ProtocolVersion protocolVersion = new ProtocolVersion("http", 1, 1); - if (path.startsWith("fail:")) { - String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); - if (path.equals("fail:rejection.json")) { - StatusLine statusLine = new BasicStatusLine(protocolVersion, RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); - BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); - futureCallback.completed(httpResponse); + public void close(CloseMode closeMode) {} + + @Override + public void close() throws IOException {} + + @Override + public void start() {} + + @Override + public IOReactorStatus getStatus() { + return null; + } + + @Override + public void awaitShutdown(org.apache.hc.core5.util.TimeValue waitTime) throws InterruptedException {} + + @Override + public void initiateShutdown() {} + + @Override + protected Future doExecute( + HttpHost target, + AsyncRequestProducer requestProducer, + AsyncResponseConsumer responseConsumer, + HandlerFactory pushHandlerFactory, + HttpContext context, + FutureCallback callback + ) { + try { + // Throw away the current thread context to simulate running async httpclient's thread pool + threadPool.getThreadContext().stashContext(); + ClassicHttpRequest request = getRequest(requestProducer); + URL resource = resources[responseCount]; + String path = paths[responseCount++]; + if (path.startsWith("fail:")) { + String body = Streams.copyToString(new InputStreamReader(request.getEntity().getContent(), StandardCharsets.UTF_8)); + if (path.equals("fail:rejection.json")) { + ClassicHttpResponse httpResponse = new BasicClassicHttpResponse(RestStatus.TOO_MANY_REQUESTS.getStatus(), ""); + callback.completed((T) httpResponse); + } else { + callback.failed(new RuntimeException(body)); + } } else { - futureCallback.failed(new RuntimeException(body)); + BasicClassicHttpResponse httpResponse = new BasicClassicHttpResponse(200, ""); + httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); + callback.completed((T) httpResponse); } - } else { - StatusLine statusLine = new BasicStatusLine(protocolVersion, 200, ""); - HttpResponse httpResponse = new BasicHttpResponse(statusLine); - httpResponse.setEntity(new InputStreamEntity(FileSystemUtils.openFileURLStream(resource), contentType)); - futureCallback.completed(httpResponse); + return null; + } catch (IOException ex) { + throw new UncheckedIOException(ex); } - return null; } - }); + + @Override + public void register(String hostname, String uriPattern, Supplier supplier) {} + + }; + return sourceWithMockedClient(mockRemoteVersion, httpClient); } @@ -649,4 +697,9 @@ private T expectListenerFailure(Class expectedExcept assertNotNull(exception.get()); return exception.get(); } + + private static ClassicHttpRequest getRequest(AsyncRequestProducer requestProducer) { + assertThat(requestProducer, instanceOf(HttpUriRequestProducer.class)); + return ((HttpUriRequestProducer) requestProducer).getRequest(); + } } diff --git a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java index 3d0c09fb2288c..cbadcba5ef6f0 100644 --- a/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java +++ b/modules/repository-url/src/yamlRestTest/java/org/opensearch/repositories/url/RepositoryURLClientYamlTestSuiteIT.java @@ -34,9 +34,6 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.common.Strings; @@ -49,6 +46,9 @@ import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ClientYamlTestCandidate; import org.opensearch.test.rest.yaml.OpenSearchClientYamlSuiteTestCase; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.junit.Before; import java.io.IOException; @@ -144,7 +144,7 @@ private static HttpEntity buildRepositorySettings(final String type, final Setti builder.endObject(); } builder.endObject(); - return new NStringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); + return new StringEntity(Strings.toString(builder), ContentType.APPLICATION_JSON); } } } diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java index 96e21e0e05ff7..fbac1f1c52e95 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/opensearch/rest/discovery/Zen2RestApiIT.java @@ -32,7 +32,6 @@ package org.opensearch.rest.discovery; -import org.apache.http.HttpHost; import org.opensearch.OpenSearchNetty4IntegTestCase; import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.client.Client; @@ -49,9 +48,11 @@ import org.opensearch.http.HttpServerTransport; import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.Matchers; import java.io.IOException; +import java.net.URISyntaxException; import java.util.Collections; import java.util.List; @@ -124,6 +125,8 @@ public Settings onNodeStopped(String nodeName) throws IOException { .get(); assertFalse(nodeName, clusterHealthResponse.isTimedOut()); return Settings.EMPTY; + } catch (final URISyntaxException ex) { + throw new IOException(ex); } finally { restClient.setNodes(allNodes); } diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 8ca151d1e90db..6be815e4a0f46 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -44,7 +44,7 @@ opensearchplugin { } dependencies { - api 'com.azure:azure-core:1.31.0' + api 'com.azure:azure-core:1.33.0' api 'com.azure:azure-storage-common:12.18.1' api 'com.azure:azure-core-http-netty:1.12.4' api "io.netty:netty-codec-dns:${versions.netty}" diff --git a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 deleted file mode 100644 index 6a5076b3da301..0000000000000 --- a/plugins/repository-azure/licenses/azure-core-1.31.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -39f18dae02237f90f1cd23b56701d7f9d9525531 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 new file mode 100644 index 0000000000000..9077fc4ebf84b --- /dev/null +++ b/plugins/repository-azure/licenses/azure-core-1.33.0.jar.sha1 @@ -0,0 +1 @@ +93f105c2e923f0ab90521cc0e6e729b9c8304ad8 \ No newline at end of file diff --git a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java index e8417f9ceaf2c..1478f48f16650 100644 --- a/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/opensearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -40,8 +40,6 @@ import fixture.azure.AzureHttpHandler; import reactor.core.scheduler.Schedulers; -import org.apache.http.HttpStatus; - import org.opensearch.cluster.metadata.RepositoryMetadata; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -63,7 +61,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; - +import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.AfterClass; import org.junit.Before; diff --git a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java index 72d3e37466d09..5448799e7f81b 100644 --- a/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java +++ b/plugins/repository-gcs/src/main/java/org/opensearch/repositories/gcs/GoogleCloudStorageRetryingInputStream.java @@ -42,7 +42,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.SpecialPermission; import org.opensearch.common.SuppressForbidden; import org.opensearch.core.internal.io.IOUtils; @@ -61,7 +60,7 @@ /** * Wrapper around reads from GCS that will retry blob downloads that fail part-way through, resuming from where the failure occurred. * This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed if the SDK handles retries itself in the future. */ class GoogleCloudStorageRetryingInputStream extends InputStream { diff --git a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java index 616a1ae9feb4f..6850b204e0112 100644 --- a/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java +++ b/plugins/repository-gcs/src/test/java/org/opensearch/repositories/gcs/GoogleCloudStorageBlobContainerRetriesTests.java @@ -37,8 +37,8 @@ import com.google.cloud.storage.StorageOptions; import com.sun.net.httpserver.HttpHandler; import fixture.gcs.FakeOAuth2HttpHandler; -import org.apache.http.HttpStatus; +import org.apache.hc.core5.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index 6fd91f78a63e6..792bdc6bacd4a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -65,7 +65,7 @@ dependencies { } api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.11.0' + api 'org.apache.avro:avro:1.11.1' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' @@ -85,7 +85,7 @@ dependencies { api 'net.minidev:json-smart:2.4.8' api 'org.apache.zookeeper:zookeeper:3.8.0' api "io.netty:netty-all:${versions.netty}" - implementation 'com.fasterxml.woodstox:woodstox-core:6.3.0' + implementation 'com.fasterxml.woodstox:woodstox-core:6.3.1' implementation 'org.codehaus.woodstox:stax2-api:4.2.1' hdfsFixture project(':test:fixtures:hdfs-fixture') diff --git a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 deleted file mode 100644 index 9a0601879a1fc..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b0c58e5b450d4f4931456952ad9520cae9c896c \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 new file mode 100644 index 0000000000000..f03424516b44e --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.1.jar.sha1 @@ -0,0 +1 @@ +81af5d4b9bdaaf4ba41bcb0df5241355ec34c630 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 deleted file mode 100644 index ebd85df98b39e..0000000000000 --- a/plugins/repository-hdfs/licenses/woodstox-core-6.3.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -03c1df4164b107ee22ad4f24bd453ec78a0efd95 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 new file mode 100644 index 0000000000000..fb4e67404cd42 --- /dev/null +++ b/plugins/repository-hdfs/licenses/woodstox-core-6.3.1.jar.sha1 @@ -0,0 +1 @@ +bf29b07ca4dd81ef3c0bc18c8bd5617510a81c5d \ No newline at end of file diff --git a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java index 388f5b8d74a2b..f751d63232f79 100644 --- a/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/opensearch/repositories/s3/S3RetryingInputStream.java @@ -40,7 +40,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.core.internal.io.IOUtils; import java.io.IOException; @@ -52,7 +51,7 @@ /** * Wrapper around an S3 object that will retry the {@link GetObjectRequest} if the download fails part-way through, resuming from where * the failure occurred. This should be handled by the SDK but it isn't today. This should be revisited in the future (e.g. before removing - * the {@link LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. + * the {@code LegacyESVersion#V_7_0_0} version constant) and removed when the SDK handles retries itself. * * See https://github.com/aws/aws-sdk-java/issues/856 for the related SDK issue */ diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java index 5691154882c9f..ebed71d90df9a 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/opensearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -32,9 +32,9 @@ package org.opensearch.search; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NStringEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.lucene.search.TotalHits; import org.opensearch.OpenSearchException; import org.opensearch.Version; @@ -343,7 +343,7 @@ private static HttpEntity buildUpdateSettingsRequestBody(Map set builder.endObject(); requestBody = Strings.toString(builder); } - return new NStringEntity(requestBody, ContentType.APPLICATION_JSON); + return new StringEntity(requestBody, ContentType.APPLICATION_JSON); } private static class HighLevelClient extends RestHighLevelClient { diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java index 714d8a252579f..8eb4bc3230b41 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/FullClusterRestartIT.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -53,6 +52,8 @@ import org.opensearch.test.XContentTestUtils; import org.opensearch.test.rest.OpenSearchRestTestCase; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.junit.Before; import java.io.IOException; @@ -286,7 +287,7 @@ public void testClusterState() throws Exception { } - public void testShrink() throws IOException { + public void testShrink() throws IOException, NumberFormatException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -329,9 +330,6 @@ public void testShrink() throws IOException { client().performRequest(updateSettingsRequest); Request shrinkIndexRequest = new Request("PUT", "/" + index + "/_shrink/" + shrunkenIndex); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - shrinkIndexRequest.addParameter("copy_settings", "true"); - } shrinkIndexRequest.setJsonEntity("{\"settings\": {\"index.number_of_shards\": 1}}"); client().performRequest(shrinkIndexRequest); @@ -359,7 +357,7 @@ public void testShrink() throws IOException { assertEquals(numDocs, totalHits); } - public void testShrinkAfterUpgrade() throws IOException { + public void testShrinkAfterUpgrade() throws IOException, ParseException { String shrunkenIndex = index + "_shrunk"; int numDocs; if (isRunningAgainstOldCluster()) { @@ -447,7 +445,7 @@ public void testShrinkAfterUpgrade() throws IOException { *
  • Make sure the document count is correct * */ - public void testRollover() throws IOException { + public void testRollover() throws IOException, ParseException { if (isRunningAgainstOldCluster()) { Request createIndex = new Request("PUT", "/" + index + "-000001"); createIndex.setJsonEntity("{" @@ -529,7 +527,7 @@ void assertBasicSearchWorks(int count) throws IOException { } } - void assertAllSearchWorks(int count) throws IOException { + void assertAllSearchWorks(int count) throws IOException, ParseException { logger.info("--> testing _all search"); Map response = entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))); assertNoFailures(response); @@ -626,14 +624,14 @@ void assertStoredBinaryFields(int count) throws Exception { } } - static String toStr(Response response) throws IOException { + static String toStr(Response response) throws IOException, ParseException { return EntityUtils.toString(response.getEntity()); } /** * Tests that a single document survives. Super basic smoke test. */ - public void testSingleDoc() throws IOException { + public void testSingleDoc() throws IOException, ParseException { String docLocation = "/" + index + "/" + type + "/1"; String doc = "{\"test\": \"test\"}"; @@ -795,7 +793,7 @@ public void testRecovery() throws Exception { * old and new versions. All of the snapshots include an index, a template, * and some routing configuration. */ - public void testSnapshotRestore() throws IOException { + public void testSnapshotRestore() throws IOException, ParseException { int count; if (isRunningAgainstOldCluster()) { // Create the index @@ -1006,12 +1004,8 @@ public void testClosedIndices() throws Exception { closeIndex(index); } - if (getOldClusterVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - ensureGreenLongWait(index); - assertClosedIndex(index, true); - } else { - assertClosedIndex(index, false); - } + ensureGreenLongWait(index); + assertClosedIndex(index, true); if (isRunningAgainstOldCluster() == false) { openIndex(index); @@ -1067,7 +1061,7 @@ private void assertClosedIndex(final String index, final boolean checkRoutingTab } } - private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException { + private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException, ParseException { // Check the snapshot metadata, especially the version Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName); Map listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest)); @@ -1186,7 +1180,7 @@ private void indexDocument(String id) throws IOException { assertOK(client().performRequest(indexRequest)); } - private int countOfIndexedRandomDocuments() throws IOException { + private int countOfIndexedRandomDocuments() throws IOException, NumberFormatException, ParseException { return Integer.parseInt(loadInfoDocument(index + "_count")); } @@ -1201,7 +1195,7 @@ private void saveInfoDocument(String id, String value) throws IOException { client().performRequest(request); } - private String loadInfoDocument(String id) throws IOException { + private String loadInfoDocument(String id) throws IOException, ParseException { Request request = new Request("GET", "/info/_doc/" + id); request.addParameter("filter_path", "_source"); String doc = toStr(client().performRequest(request)); @@ -1253,7 +1247,7 @@ public void testPeerRecoveryRetentionLeases() throws Exception { settings.startObject("settings"); settings.field("number_of_shards", between(1, 5)); settings.field("number_of_replicas", between(0, 1)); - if (randomBoolean() || getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { + if (randomBoolean()) { // this is the default after v7.0.0, but is required before that settings.field("soft_deletes.enabled", true); } @@ -1436,10 +1430,6 @@ public void testSystemIndexMetadataIsUpgraded() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (getOldClusterVersion().before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java index de042cb2b7634..44ed426e13782 100644 --- a/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java +++ b/qa/full-cluster-restart/src/test/java/org/opensearch/upgrades/QueryBuilderBWCIT.java @@ -32,7 +32,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.client.Request; import org.opensearch.client.Response; @@ -46,6 +47,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentHelper; import org.opensearch.common.xcontent.json.JsonXContent; +import org.opensearch.index.mapper.MapperService; import org.opensearch.index.query.BoolQueryBuilder; import org.opensearch.index.query.ConstantScoreQueryBuilder; import org.opensearch.index.query.DisMaxQueryBuilder; @@ -157,7 +159,7 @@ private static void addCandidate(String querySource, QueryBuilder expectedQb) { } public void testQueryBuilderBWC() throws Exception { - final String type = getOldClusterVersion().before(LegacyESVersion.V_7_0_0) ? "doc" : "_doc"; + final String type = MapperService.SINGLE_MAPPING_NAME; String index = "queries"; if (isRunningAgainstOldCluster()) { XContentBuilder mappingsAndSettings = jsonBuilder(); @@ -234,7 +236,7 @@ public void testQueryBuilderBWC() throws Exception { } } - private static Map toMap(Response response) throws IOException { + private static Map toMap(Response response) throws IOException, ParseException { return toMap(EntityUtils.toString(response.getEntity())); } diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java index f85a94cc9f556..35f530f22a141 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/ExceptionIT.java @@ -8,7 +8,9 @@ package org.opensearch.backwards; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.Request; @@ -21,8 +23,6 @@ import java.util.Collections; import java.util.Map; -import static org.apache.http.HttpStatus.SC_NOT_FOUND; - public class ExceptionIT extends OpenSearchRestTestCase { public void testOpensearchException() throws Exception { logClusterNodes(); @@ -38,13 +38,13 @@ public void testOpensearchException() throws Exception { } catch (ResponseException e) { logger.debug(e.getMessage()); Response response = e.getResponse(); - assertEquals(SC_NOT_FOUND, response.getStatusLine().getStatusCode()); + assertEquals(HttpStatus.SC_NOT_FOUND, response.getStatusLine().getStatusCode()); assertEquals("no_such_index", ObjectPath.createFromResponse(response).evaluate("error.index")); } } } - private void logClusterNodes() throws IOException { + private void logClusterNodes() throws IOException, ParseException { ObjectPath objectPath = ObjectPath.createFromResponse(client().performRequest(new Request("GET", "_nodes"))); Map nodes = objectPath.evaluate("nodes"); // As of 2.0, 'GET _cat/master' API is deprecated to promote inclusive language. diff --git a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java index 69c4f0110a3ff..4746ad35a9406 100644 --- a/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/opensearch/backwards/IndexingIT.java @@ -31,7 +31,7 @@ package org.opensearch.backwards; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -50,6 +50,7 @@ import org.opensearch.test.rest.yaml.ObjectPath; import java.io.IOException; +import java.net.URISyntaxException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -416,7 +417,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th return shards; } - private Nodes buildNodeAndVersions() throws IOException { + private Nodes buildNodeAndVersions() throws IOException, URISyntaxException { Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); @@ -426,7 +427,7 @@ private Nodes buildNodeAndVersions() throws IOException { id, objectPath.evaluate("nodes." + id + ".name"), Version.fromString(objectPath.evaluate("nodes." + id + ".version")), - HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); + HttpHost.create((String)objectPath.evaluate("nodes." + id + ".http.publish_address")))); } response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setClusterManagerNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); diff --git a/qa/os/build.gradle b/qa/os/build.gradle index 92c5e4f154ad8..9a1e6f781faec 100644 --- a/qa/os/build.gradle +++ b/qa/os/build.gradle @@ -42,6 +42,7 @@ dependencies { api "org.apache.httpcomponents:fluent-hc:${versions.httpclient}" api "org.apache.logging.log4j:log4j-api:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-jcl:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" api "commons-logging:commons-logging:${versions.commonslogging}" diff --git a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java index 61ccbab95850d..5f73144501f94 100644 --- a/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java +++ b/qa/remote-clusters/src/test/java/org/opensearch/cluster/remote/test/AbstractMultiClusterRemoteTestCase.java @@ -31,7 +31,7 @@ package org.opensearch.cluster.remote.test; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.junit.AfterClass; import org.junit.Before; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -104,8 +104,8 @@ private HighLevelClient(RestClient restClient) { private RestHighLevelClient buildClient(final String url) throws IOException { int portSeparator = url.lastIndexOf(':'); - HttpHost httpHost = new HttpHost(url.substring(0, portSeparator), - Integer.parseInt(url.substring(portSeparator + 1)), getProtocol()); + HttpHost httpHost = new HttpHost(getProtocol(), url.substring(0, portSeparator), + Integer.parseInt(url.substring(portSeparator + 1))); return new HighLevelClient(buildClient(restAdminSettings(), new HttpHost[]{httpHost})); } diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java index f34e5f7bc121a..ed4bf11041c88 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/IndexingIT.java @@ -31,7 +31,8 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.Request; @@ -61,7 +62,7 @@ */ public class IndexingIT extends AbstractRollingTestCase { - public void testIndexing() throws IOException { + public void testIndexing() throws IOException, ParseException { switch (CLUSTER_TYPE) { case OLD: break; @@ -181,18 +182,7 @@ public void testAutoIdWithOpTypeCreate() throws IOException { } } - if (minNodeVersion.before(LegacyESVersion.V_7_5_0)) { - ResponseException e = expectThrows(ResponseException.class, () -> client().performRequest(bulk)); - assertEquals(400, e.getResponse().getStatusLine().getStatusCode()); - assertThat(e.getMessage(), - // if request goes to 7.5+ node - either(containsString("optype create not supported for indexing requests without explicit id until")) - // if request goes to < 7.5 node - .or(containsString("an id must be provided if version type or value are set") - )); - } else { - client().performRequest(bulk); - } + client().performRequest(bulk); break; case UPGRADED: client().performRequest(bulk); @@ -214,7 +204,7 @@ private void bulk(String index, String valueSuffix, int count) throws IOExceptio client().performRequest(bulk); } - private void assertCount(String index, int count) throws IOException { + private void assertCount(String index, int count) throws IOException, ParseException { Request searchTestIndexRequest = new Request("POST", "/" + index + "/_search"); searchTestIndexRequest.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); searchTestIndexRequest.addParameter("filter_path", "hits.total"); diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java deleted file mode 100644 index 0ef1e3a5050af..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/JodaCompatibilityIT.java +++ /dev/null @@ -1,282 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.apache.http.HttpStatus; -import org.apache.http.util.EntityUtils; -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Node; -import org.opensearch.client.Request; -import org.opensearch.client.RequestOptions; -import org.opensearch.client.Response; -import org.opensearch.client.WarningsHandler; -import org.opensearch.common.Booleans; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.search.DocValueFormat; -import org.junit.BeforeClass; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.Collections; -import java.util.List; -import java.util.function.Consumer; - -import static org.opensearch.rest.action.search.RestSearchAction.TOTAL_HITS_AS_INT_PARAM; - -/** - * This is test is meant to verify that when upgrading from 6.x version to 7.7 or newer it is able to parse date fields with joda pattern. - * - * The test is indexing documents and searches with use of joda or java pattern. - * In order to make sure that serialization logic is used a search call is executed 3 times (using all nodes). - * It cannot be guaranteed that serialization logic will always be used as it might happen that - * all shards are allocated on the same node and client is connecting to it. - * Because of this warnings assertions have to be ignored. - * - * A special flag used when serializing {@link DocValueFormat.DateTime#writeTo DocValueFormat.DateTime::writeTo} - * is used to indicate that an index was created in 6.x and has a joda pattern. The same flag is read when - * {@link DocValueFormat.DateTime#DateTime(StreamInput)} deserializing. - * When upgrading from 7.0-7.6 to 7.7 there is no way to tell if a pattern was created in 6.x as this flag cannot be added. - * Hence a skip assume section in init() - * - * @see org.opensearch.search.DocValueFormat.DateTime - */ -public class JodaCompatibilityIT extends AbstractRollingTestCase { - - @BeforeClass - public static void init(){ - assumeTrue("upgrading from 7.0-7.6 will fail parsing joda formats", - UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - } - - public void testJodaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("joda_time", "YYYY-MM-dd'T'HH:mm:ssZZ"); - createTestIndex.setOptions(ignoreWarnings()); - - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("joda_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("joda_time", minute); - - Request search = dateRangeSearch("joda_time"); - search.setOptions(ignoreWarnings()); - - performOnAllNodes(search, r -> assertEquals(HttpStatus.SC_OK, r.getStatusLine().getStatusCode())); - break; - case UPGRADED: - postNewDoc("joda_time", 4); - - search = searchWithAgg("joda_time"); - search.setOptions(ignoreWarnings()); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - break; - } - } - - public void testJavaBackedDocValueAndDateFields() throws Exception { - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = indexWithDateField("java_time", "8yyyy-MM-dd'T'HH:mm:ssXXX"); - Response resp = client().performRequest(createTestIndex); - assertEquals(HttpStatus.SC_OK, resp.getStatusLine().getStatusCode()); - - postNewDoc("java_time", 1); - - break; - case MIXED: - int minute = Booleans.parseBoolean(System.getProperty("tests.first_round")) ? 2 : 3; - postNewDoc("java_time", minute); - - Request search = dateRangeSearch("java_time"); - Response searchResp = client().performRequest(search); - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - break; - case UPGRADED: - postNewDoc("java_time", 4); - - search = searchWithAgg("java_time"); - //making sure all nodes were used for search - performOnAllNodes(search, r -> assertResponseHasAllDocuments(r)); - - break; - } - } - - private RequestOptions ignoreWarnings() { - RequestOptions.Builder options = RequestOptions.DEFAULT.toBuilder(); - options.setWarningsHandler(WarningsHandler.PERMISSIVE); - return options.build(); - } - - private void performOnAllNodes(Request search, Consumer consumer) throws IOException { - List nodes = client().getNodes(); - for (Node node : nodes) { - client().setNodes(Collections.singletonList(node)); - Response response = client().performRequest(search); - consumer.accept(response); - assertEquals(HttpStatus.SC_OK, response.getStatusLine().getStatusCode()); - } - client().setNodes(nodes); - } - - private void assertResponseHasAllDocuments(Response searchResp) { - assertEquals(HttpStatus.SC_OK, searchResp.getStatusLine().getStatusCode()); - try { - assertEquals(removeWhiteSpace("{" + - " \"_shards\": {" + - " \"total\": 3," + - " \"successful\": 3" + - " },"+ - " \"hits\": {" + - " \"total\": 4," + - " \"hits\": [" + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:01+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:02+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:03+01:00\"" + - " }" + - " }," + - " {" + - " \"_source\": {" + - " \"datetime\": \"2020-01-01T00:00:04+01:00\"" + - " }" + - " }" + - " ]" + - " }" + - "}"), - EntityUtils.toString(searchResp.getEntity(), StandardCharsets.UTF_8)); - } catch (IOException e) { - throw new AssertionError("Exception during response parising", e); - } - } - - private String removeWhiteSpace(String input) { - return input.replaceAll("[\\n\\r\\t\\ ]", ""); - } - - private Request dateRangeSearch(String endpoint) { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - search.setJsonEntity("" + - "{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - - private Request searchWithAgg(String endpoint) throws IOException { - Request search = new Request("GET", endpoint+"/_search"); - search.addParameter(TOTAL_HITS_AS_INT_PARAM, "true"); - search.addParameter("filter_path", "hits.total,hits.hits._source.datetime,_shards.total,_shards.successful"); - - search.setJsonEntity("{\n" + - " \"track_total_hits\": true,\n" + - " \"sort\": \"datetime\",\n" + - " \"query\": {\n" + - " \"range\": {\n" + - " \"datetime\": {\n" + - " \"gte\": \"2020-01-01T00:00:00+01:00\",\n" + - " \"lte\": \"2020-01-02T00:00:00+01:00\"\n" + - " }\n" + - " }\n" + - " },\n" + - " \"aggs\" : {\n" + - " \"docs_per_year\" : {\n" + - " \"date_histogram\" : {\n" + - " \"field\" : \"date\",\n" + - " \"calendar_interval\" : \"year\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}\n" - ); - return search; - } - private Request indexWithDateField(String indexName, String format) { - Request createTestIndex = new Request("PUT", indexName); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity("{\n" + - " \"settings\": {\n" + - " \"index.number_of_shards\": 3\n" + - " },\n" + - " \"mappings\": {\n" + - " \"properties\": {\n" + - " \"datetime\": {\n" + - " \"type\": \"date\",\n" + - " \"format\": \"" + format + "\"\n" + - " }\n" + - " }\n" + - " }\n" + - "}" - ); - return createTestIndex; - } - - private void postNewDoc(String endpoint, int minute) throws IOException { - Request putDoc = new Request("POST", endpoint+"/_doc"); - putDoc.addParameter("refresh", "true"); - putDoc.addParameter("wait_for_active_shards", "all"); - putDoc.setJsonEntity("{\n" + - " \"datetime\": \"2020-01-01T00:00:0" + minute + "+01:00\"\n" + - "}" - ); - Response resp = client().performRequest(putDoc); - assertEquals(HttpStatus.SC_CREATED, resp.getStatusLine().getStatusCode()); - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java deleted file mode 100644 index 07b1d67fde7ff..0000000000000 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/MappingIT.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.upgrades; - -import org.opensearch.LegacyESVersion; -import org.opensearch.client.Request; -import org.opensearch.client.Response; -import org.opensearch.common.xcontent.support.XContentMapValues; - -public class MappingIT extends AbstractRollingTestCase { - /** - * Create a mapping that explicitly disables the _all field (possible in 6x, see #37429) - * and check that it can be upgraded to 7x. - */ - public void testAllFieldDisable6x() throws Exception { - assumeTrue("_all", UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)); - switch (CLUSTER_TYPE) { - case OLD: - Request createTestIndex = new Request("PUT", "all-index"); - createTestIndex.addParameter("include_type_name", "false"); - createTestIndex.setJsonEntity( - "{ \"settings\": { \"index.number_of_shards\": 1 }, " + - "\"mappings\": {\"_all\": { \"enabled\": false }, \"properties\": { \"field\": { \"type\": \"text\" }}}}" - ); - createTestIndex.setOptions(expectWarnings("[_all] is deprecated in 6.0+ and will be removed in 7.0. As a replacement," + - " " + "you can use [copy_to] on mapping fields to create your own catch all field.")); - Response resp = client().performRequest(createTestIndex); - assertEquals(200, resp.getStatusLine().getStatusCode()); - break; - - default: - final Request request = new Request("GET", "all-index"); - Response response = client().performRequest(request); - assertEquals(200, response.getStatusLine().getStatusCode()); - Object enabled = XContentMapValues.extractValue("all-index.mappings._all.enabled", entityAsMap(response)); - assertNotNull(enabled); - assertEquals(false, enabled); - break; - } - } -} diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java index cbf91fa9d71e7..4fd82af9603a9 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/RecoveryIT.java @@ -31,7 +31,6 @@ package org.opensearch.upgrades; -import org.apache.http.util.EntityUtils; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.PlainActionFuture; @@ -47,8 +46,10 @@ import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.xcontent.support.XContentMapValues; import org.opensearch.index.IndexSettings; +import org.opensearch.index.mapper.MapperService; import org.opensearch.rest.RestStatus; import org.opensearch.test.rest.yaml.ObjectPath; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -270,6 +271,7 @@ public void testRelocationWithConcurrentIndexing() throws Exception { updateIndexSettings(index, Settings.builder().put(INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.getKey(), "none")); break; case MIXED: + // todo: verify this test can be removed in 3.0.0 final String newNode = getNodeId(v -> v.equals(Version.CURRENT)); final String oldNode = getNodeId(v -> v.before(Version.CURRENT)); // remove the replica and guaranteed the primary is placed on the old node @@ -348,11 +350,7 @@ public void testRecovery() throws Exception { if (randomBoolean()) { indexDocs(index, i, 1); // update } else if (randomBoolean()) { - if (getNodeId(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)) == null) { - client().performRequest(new Request("DELETE", index + "/test/" + i)); - } else { - client().performRequest(new Request("DELETE", index + "/_doc/" + i)); - } + client().performRequest(new Request("DELETE", index + "/" + MapperService.SINGLE_MAPPING_NAME + "/" + i)); } } } @@ -458,15 +456,10 @@ public void testRecoveryClosedIndex() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -492,14 +485,10 @@ public void testCloseIndexDuringRollingUpgrade() throws Exception { closeIndex(indexName); } - if (minimumNodeVersion.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index is created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - } else { - assertClosedIndex(indexName, false); - } + // index is created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); } /** @@ -526,27 +515,20 @@ public void testClosedIndexNoopRecovery() throws Exception { closeIndex(indexName); } - final Version indexVersionCreated = indexVersionCreated(indexName); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0)) { - // index was created on a version that supports the replication of closed indices, - // so we expect the index to be closed and replicated - ensureGreen(indexName); - assertClosedIndex(indexName, true); - if (minimumNodeVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - switch (CLUSTER_TYPE) { - case OLD: break; - case MIXED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); - break; - case UPGRADED: - assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); - break; - } - } - } else { - assertClosedIndex(indexName, false); - } + // index was created on a version that supports the replication of closed indices, + // so we expect the index to be closed and replicated + ensureGreen(indexName); + assertClosedIndex(indexName, true); + switch (CLUSTER_TYPE) { + case OLD: break; + case MIXED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME + "-0")); + break; + case UPGRADED: + assertNoopRecoveries(indexName, s -> s.startsWith(CLUSTER_NAME)); + break; + } } /** * Returns the version in which the given index has been created diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index c50af0084b000..634dc0628f27a 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -59,13 +59,8 @@ public void testSystemIndicesUpgrades() throws Exception { Request bulk = new Request("POST", "/_bulk"); bulk.addParameter("refresh", "true"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\", \"_type\" : \"_doc\"}}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } else { - bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + - "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); - } + bulk.setJsonEntity("{\"index\": {\"_index\": \"test_index_old\"}\n" + + "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); // start a async reindex job @@ -91,10 +86,6 @@ public void testSystemIndicesUpgrades() throws Exception { // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); getTasksIndex.addParameter("allow_no_indices", "false"); - if (UPGRADE_FROM_VERSION.before(LegacyESVersion.V_7_0_0)) { - getTasksIndex.addParameter("include_type_name", "false"); - } - getTasksIndex.setOptions(expectVersionSpecificWarnings(v -> { v.current(systemIndexWarning); v.compatible(systemIndexWarning); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java index 6178167c98e98..0c845bb2d34e5 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsDisabledIT.java @@ -34,7 +34,8 @@ import java.io.IOException; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -60,7 +61,7 @@ protected Settings nodeSettings(int nodeOrdinal) { .build(); } - public void testThatErrorTraceParamReturns400() throws IOException { + public void testThatErrorTraceParamReturns400() throws IOException, ParseException { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java index 090a572ef0d6a..e2ccf86d31dbf 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/DetailedErrorsEnabledIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; @@ -47,7 +48,7 @@ */ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { - public void testThatErrorTraceWorksByDefault() throws IOException { + public void testThatErrorTraceWorksByDefault() throws IOException, ParseException { try { Request request = new Request("DELETE", "/"); request.addParameter("error_trace", "true"); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java index 1925ecc5cd346..5514fae996a39 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/HttpCompressionIT.java @@ -31,9 +31,10 @@ package org.opensearch.http; -import org.apache.http.HttpHeaders; -import org.apache.http.client.entity.GzipDecompressingEntity; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.entity.GzipDecompressingEntity; +import org.apache.hc.core5.http.HttpHeaders; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -56,7 +57,7 @@ public class HttpCompressionIT extends OpenSearchRestTestCase { " }\n" + "}"; - public void testCompressesResponseIfRequested() throws IOException { + public void testCompressesResponseIfRequested() throws IOException, ParseException { Request request = new Request("POST", "/company/_doc/2"); request.setJsonEntity(SAMPLE_DOCUMENT); Response response = client().performRequest(request); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java index c3d766abe96ca..8e6dea7edd0f8 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/NoHandlerIT.java @@ -32,7 +32,8 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.RequestOptions; import org.opensearch.client.Response; @@ -46,7 +47,7 @@ public class NoHandlerIT extends HttpSmokeTestCase { - public void testNoHandlerRespectsAcceptHeader() throws IOException { + public void testNoHandlerRespectsAcceptHeader() throws IOException, ParseException { runTestNoHandlerRespectsAcceptHeader( "application/json", "application/json; charset=UTF-8", @@ -58,7 +59,7 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { } private void runTestNoHandlerRespectsAcceptHeader( - final String accept, final String contentType, final String expect) throws IOException { + final String accept, final String contentType, final String expect) throws IOException, ParseException { Request request = new Request("GET", "/foo/bar/baz/qux/quux"); RequestOptions.Builder options = request.getOptions().toBuilder(); options.addHeader("Accept", accept); diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java index b8257272ba65b..74b85ace37b81 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/RestHttpResponseHeadersIT.java @@ -30,7 +30,7 @@ package org.opensearch.http; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Request; import org.opensearch.client.Response; import org.opensearch.client.ResponseException; diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java index a13d406f7b133..42c7357de3f07 100644 --- a/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/SearchRestCancellationIT.java @@ -31,8 +31,8 @@ package org.opensearch.http; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.SetOnce; import org.opensearch.action.admin.cluster.node.info.NodeInfo; @@ -109,7 +109,7 @@ public void testAutomaticCancellationMultiSearchDuringQueryPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringQueryPhase(MultiSearchAction.NAME, restRequest); } @@ -158,7 +158,7 @@ public void testAutomaticCancellationMultiSearchDuringFetchPhase() throws Except new Script(ScriptType.INLINE, "mockscript", ScriptedBlockPlugin.SCRIPT_NAME, Collections.emptyMap())))); Request restRequest = new Request("POST", "/_msearch"); byte[] requestBody = MultiSearchRequest.writeMultiLineFormat(multiSearchRequest, contentType.xContent()); - restRequest.setEntity(new NByteArrayEntity(requestBody, createContentType(contentType))); + restRequest.setEntity(new ByteArrayEntity(requestBody, createContentType(contentType))); verifyCancellationDuringFetchPhase(MultiSearchAction.NAME, restRequest); } diff --git a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java index f85c3efcbb6e8..2b1abe45f7723 100644 --- a/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java +++ b/qa/wildfly/src/main/java/org/opensearch/wildfly/transport/RestHighLevelClientProducer.java @@ -32,20 +32,22 @@ package org.opensearch.wildfly.transport; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.client.RestClient; import org.opensearch.client.RestHighLevelClient; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import javax.enterprise.inject.Produces; + +import java.net.URISyntaxException; import java.nio.file.Path; @SuppressWarnings("unused") public final class RestHighLevelClientProducer { @Produces - public RestHighLevelClient createRestHighLevelClient() { + public RestHighLevelClient createRestHighLevelClient() throws URISyntaxException { String httpUri = System.getProperty("opensearch.uri"); return new RestHighLevelClient(RestClient.builder(HttpHost.create(httpUri))); diff --git a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java index 7961ca69c2d29..2f2b355baedaf 100644 --- a/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java +++ b/qa/wildfly/src/test/java/org/opensearch/wildfly/WildflyIT.java @@ -32,14 +32,15 @@ package org.opensearch.wildfly; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.classic.CloseableHttpClient; +import org.apache.hc.client5.http.impl.classic.CloseableHttpResponse; +import org.apache.hc.client5.http.impl.classic.HttpClientBuilder; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.io.entity.StringEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.tests.util.LuceneTestCase; @@ -78,7 +79,7 @@ private String buildBaseUrl() { return "http://localhost:" + port + "/example-app/transport"; } - public void testRestClient() throws URISyntaxException, IOException { + public void testRestClient() throws URISyntaxException, IOException, ParseException { final String baseUrl = buildBaseUrl(); try (CloseableHttpClient client = HttpClientBuilder.create().build()) { @@ -100,7 +101,7 @@ public void testRestClient() throws URISyntaxException, IOException { put.setEntity(new StringEntity(body, ContentType.APPLICATION_JSON)); try (CloseableHttpResponse response = client.execute(put)) { - int status = response.getStatusLine().getStatusCode(); + int status = response.getCode(); assertThat( "expected a 201 response but got: " + status + " - body: " + EntityUtils.toString(response.getEntity()), status, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json new file mode 100644 index 0000000000000..13ea101169e60 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/cluster.delete_decommission_awareness.json @@ -0,0 +1,19 @@ +{ + "cluster.delete_decommission_awareness": { + "documentation": { + "url": "https://opensearch.org/docs/latest/opensearch/rest-api/decommission/", + "description": "Delete any existing decommission." + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_cluster/decommission/awareness/", + "methods": [ + "DELETE" + ] + } + ] + } + } +} diff --git a/server/build.gradle b/server/build.gradle index 9d9d12e798eab..d50be48afc023 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -129,6 +129,7 @@ dependencies { // logging api "org.apache.logging.log4j:log4j-api:${versions.log4j}" + api "org.apache.logging.log4j:log4j-jul:${versions.log4j}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}", optional // jna diff --git a/server/licenses/log4j-jul-2.17.1.jar.sha1 b/server/licenses/log4j-jul-2.17.1.jar.sha1 new file mode 100644 index 0000000000000..4afb381a696e9 --- /dev/null +++ b/server/licenses/log4j-jul-2.17.1.jar.sha1 @@ -0,0 +1 @@ +881333b463d47828eda7443b19811763367b1916 \ No newline at end of file diff --git a/client/rest/licenses/httpcore-nio-LICENSE.txt b/server/licenses/log4j-jul-LICENSE.txt similarity index 100% rename from client/rest/licenses/httpcore-nio-LICENSE.txt rename to server/licenses/log4j-jul-LICENSE.txt diff --git a/server/licenses/log4j-jul-NOTICE.txt b/server/licenses/log4j-jul-NOTICE.txt new file mode 100644 index 0000000000000..243a0391fb574 --- /dev/null +++ b/server/licenses/log4j-jul-NOTICE.txt @@ -0,0 +1,20 @@ +Apache Log4j +Copyright 1999-2021 Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +ResolverUtil.java +Copyright 2005-2006 Tim Fennell + +Dumbster SMTP test server +Copyright 2004 Jason Paul Kitchen + +TypeUtil.java +Copyright 2002-2012 Ramnivas Laddad, Juergen Hoeller, Chris Beams + +picocli (http://picocli.info) +Copyright 2017 Remko Popma + +TimeoutBlockingWaitStrategy.java and parts of Util.java +Copyright 2011 LMAX Ltd. diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java index a44cf05a4bdc4..11d1af608fbee 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/cluster/stats/ClusterStatsIT.java @@ -89,6 +89,7 @@ public void testNodeCounts() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 1); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); int numNodes = randomIntBetween(1, 5); @@ -160,6 +161,7 @@ public void testNodeCountsWithDeprecatedMasterRole() { expectedCounts.put(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE.roleName(), 1); expectedCounts.put(DiscoveryNodeRole.INGEST_ROLE.roleName(), 0); expectedCounts.put(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE.roleName(), 0); + expectedCounts.put(DiscoveryNodeRole.SEARCH_ROLE.roleName(), 0); expectedCounts.put(ClusterStatsNodes.Counts.COORDINATING_ONLY, 0); ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get(); diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java index 0b4eae81cde86..bba07d878a42c 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/routing/WeightedRoutingIT.java @@ -244,6 +244,48 @@ public void testGetWeightedRouting_WeightsAreSet() throws IOException { assertEquals("3.0", weightedRoutingResponse.getLocalNodeWeight()); } + public void testWeightedRoutingMetadataOnOSProcessRestart() throws Exception { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + internalCluster().startNodes( + Settings.builder().put(commonSettings).put("node.attr.zone", "a").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "b").build(), + Settings.builder().put(commonSettings).put("node.attr.zone", "c").build() + ); + + logger.info("--> waiting for nodes to form a cluster"); + ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("3").execute().actionGet(); + assertThat(health.isTimedOut(), equalTo(false)); + + ensureGreen(); + + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 2.0, "c", 3.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + // put api call to set weights + ClusterPutWeightedRoutingResponse response = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertEquals(response.isAcknowledged(), true); + + ensureStableCluster(3); + + // routing weights are set in cluster metadata + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + + ensureGreen(); + + // Restart a random data node and check that OS process comes healthy + internalCluster().restartRandomDataNode(); + ensureGreen(); + assertNotNull(internalCluster().clusterService().state().metadata().weightedRoutingMetadata()); + } + public void testDeleteWeightedRouting_WeightsNotSet() { Settings commonSettings = Settings.builder() .put("cluster.routing.allocation.awareness.attributes", "zone") diff --git a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java index db46fb4424848..6d05ecd0b56b0 100644 --- a/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/gateway/ReplicaShardAllocatorIT.java @@ -32,7 +32,6 @@ package org.opensearch.gateway; -import org.opensearch.LegacyESVersion; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; @@ -512,7 +511,7 @@ public void testPeerRecoveryForClosedIndices() throws Exception { } /** - * If the recovery source is on an old node (before
    {@link LegacyESVersion#V_7_2_0}
    ) then the recovery target + * If the recovery source is on an old node (before
    {@code LegacyESVersion#V_7_2_0}
    ) then the recovery target * won't have the safe commit after phase1 because the recovery source does not send the global checkpoint in the clean_files * step. And if the recovery fails and retries, then the recovery stage might not transition properly. This test simulates * this behavior by changing the global checkpoint in phase1 to unassigned. diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java index 72f28e94528ba..17a7d4c84b6fe 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/IndicesLifecycleListenerIT.java @@ -32,9 +32,7 @@ package org.opensearch.indices; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.cluster.state.ClusterStateResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -246,13 +244,8 @@ public void testIndexStateShardChanged() throws Throwable { assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6)); assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1)); - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_2_0)) { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); - } else { - assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED); - assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED); - } + assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); + assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED); } private static void assertShardStatesMatch( diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index b4287f201489b..c0fabb8becf6b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -37,7 +37,6 @@ import org.opensearch.Version; import org.opensearch.action.ActionFuture; -import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; import org.opensearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse; import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; @@ -47,7 +46,6 @@ import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.support.ActiveShardCount; -import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.client.Client; import org.opensearch.client.node.NodeClient; @@ -1386,44 +1384,6 @@ public void testPartialSnapshotAllShardsMissing() throws Exception { assertThat(createSnapshotResponse.getSnapshotInfo().state(), is(SnapshotState.PARTIAL)); } - /** - * Tests for the legacy snapshot path that is normally executed if the cluster contains any nodes older than - * {@link SnapshotsService#NO_REPO_INITIALIZE_VERSION}. - * Makes sure that blocking as well as non-blocking snapshot create paths execute cleanly as well as that error handling works out - * correctly by testing a snapshot name collision. - */ - public void testCreateSnapshotLegacyPath() throws Exception { - final String clusterManagerNode = internalCluster().startClusterManagerOnlyNode(); - internalCluster().startDataOnlyNode(); - final String repoName = "test-repo"; - createRepository(repoName, "fs"); - createIndex("some-index"); - - final SnapshotsService snapshotsService = internalCluster().getClusterManagerNodeInstance(SnapshotsService.class); - final Snapshot snapshot1 = PlainActionFuture.get( - f -> snapshotsService.createSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-1"), f) - ); - awaitNoMoreRunningOperations(clusterManagerNode); - - final InvalidSnapshotNameException sne = expectThrows( - InvalidSnapshotNameException.class, - () -> PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, snapshot1.getSnapshotId().getName()), f) - ) - ); - - assertThat(sne.getMessage(), containsString("snapshot with the same name already exists")); - final SnapshotInfo snapshot2 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-2"), f) - ); - assertThat(snapshot2.state(), is(SnapshotState.SUCCESS)); - - final SnapshotInfo snapshot3 = PlainActionFuture.get( - f -> snapshotsService.executeSnapshotLegacy(new CreateSnapshotRequest(repoName, "snap-3").indices("does-not-exist-*"), f) - ); - assertThat(snapshot3.state(), is(SnapshotState.SUCCESS)); - } - public void testSnapshotDeleteRelocatingPrimaryIndex() throws Exception { internalCluster().startClusterManagerOnlyNode(); final List dataNodes = internalCluster().startDataOnlyNodes(2); diff --git a/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java new file mode 100644 index 0000000000000..96fcf0053c9ab --- /dev/null +++ b/server/src/internalClusterTest/java/org/opensearch/snapshots/SearchableSnapshotIT.java @@ -0,0 +1,135 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ +package org.opensearch.snapshots; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.List; + +import org.hamcrest.MatcherAssert; +import org.junit.BeforeClass; +import org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse; +import org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.opensearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.opensearch.client.Client; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.routing.GroupShardsIterator; +import org.opensearch.cluster.routing.ShardIterator; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; +import org.opensearch.index.Index; +import org.opensearch.monitor.fs.FsInfo; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest.Metric.FS; +import static org.opensearch.common.util.CollectionUtils.iterableAsArrayList; + +public final class SearchableSnapshotIT extends AbstractSnapshotIntegTestCase { + + @BeforeClass + public static void assumeFeatureFlag() { + assumeTrue( + "Searchable snapshot feature flag is enabled", + Boolean.parseBoolean(System.getProperty(FeatureFlags.SEARCHABLE_SNAPSHOT)) + ); + } + + @Override + protected boolean addMockInternalEngine() { + return false; + } + + public void testCreateSearchableSnapshot() throws Exception { + final Client client = client(); + createRepository("test-repo", "fs"); + createIndex( + "test-idx-1", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + createIndex( + "test-idx-2", + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, "1").build() + ); + ensureGreen(); + indexRandomDocs("test-idx-1", 100); + indexRandomDocs("test-idx-2", 100); + + logger.info("--> snapshot"); + final CreateSnapshotResponse createSnapshotResponse = client.admin() + .cluster() + .prepareCreateSnapshot("test-repo", "test-snap") + .setWaitForCompletion(true) + .setIndices("test-idx-1", "test-idx-2") + .get(); + MatcherAssert.assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0)); + MatcherAssert.assertThat( + createSnapshotResponse.getSnapshotInfo().successfulShards(), + equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()) + ); + + assertTrue(client.admin().indices().prepareDelete("test-idx-1", "test-idx-2").get().isAcknowledged()); + + logger.info("--> restore indices as 'remote_snapshot'"); + client.admin() + .cluster() + .prepareRestoreSnapshot("test-repo", "test-snap") + .setRenamePattern("(.+)") + .setRenameReplacement("$1-copy") + .setStorageType(RestoreSnapshotRequest.StorageType.REMOTE_SNAPSHOT) + .setWaitForCompletion(true) + .execute() + .actionGet(); + ensureGreen(); + + assertDocCount("test-idx-1-copy", 100L); + assertDocCount("test-idx-2-copy", 100L); + assertIndexDirectoryDoesNotExist("test-idx-1-copy", "test-idx-2-copy"); + } + + /** + * Picks a shard out of the cluster state for each given index and asserts + * that the 'index' directory does not exist in the node's file system. + * This assertion is digging a bit into the implementation details to + * verify that the Lucene segment files are not copied from the snapshot + * repository to the node's local disk for a remote snapshot index. + */ + private void assertIndexDirectoryDoesNotExist(String... indexNames) { + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + for (String indexName : indexNames) { + final Index index = state.metadata().index(indexName).getIndex(); + // Get the primary shards for the given index + final GroupShardsIterator shardIterators = state.getRoutingTable() + .activePrimaryShardsGrouped(new String[] { indexName }, false); + // Randomly pick one of the shards + final List iterators = iterableAsArrayList(shardIterators); + final ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators); + final ShardRouting shardRouting = shardIterator.nextOrNull(); + assertNotNull(shardRouting); + assertTrue(shardRouting.primary()); + assertTrue(shardRouting.assignedToNode()); + // Get the file system stats for the assigned node + final String nodeId = shardRouting.currentNodeId(); + final NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats(nodeId).addMetric(FS.metricName()).get(); + for (FsInfo.Path info : nodeStats.getNodes().get(0).getFs()) { + // Build the expected path for the index data for a "normal" + // index and assert it does not exist + final String path = info.getPath(); + final Path file = PathUtils.get(path) + .resolve("indices") + .resolve(index.getUUID()) + .resolve(Integer.toString(shardRouting.getId())) + .resolve("index"); + MatcherAssert.assertThat("Expect file not to exist: " + file, Files.exists(file), is(false)); + } + } + } +} diff --git a/server/src/main/java/org/opensearch/Build.java b/server/src/main/java/org/opensearch/Build.java index 364b17ad4aa33..13c951b10cfe3 100644 --- a/server/src/main/java/org/opensearch/Build.java +++ b/server/src/main/java/org/opensearch/Build.java @@ -207,58 +207,27 @@ public String date() { } public static Build readBuild(StreamInput in) throws IOException { - final String distribution; - final Type type; // the following is new for opensearch: we write the distribution to support any "forks" - if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - distribution = in.readString(); - } else { - distribution = "other"; - } - - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - // (Integ test zip still write OSS as distribution) - // See issue: https://github.com/opendistro-for-elasticsearch/search/issues/159 - if (in.getVersion().before(Version.V_1_3_0)) { - String flavor = in.readString(); - } + final String distribution = in.readString(); // be lenient when reading on the wire, the enumeration values from other versions might be different than what we know - type = Type.fromDisplayName(in.readString(), false); + final Type type = Type.fromDisplayName(in.readString(), false); String hash = in.readString(); String date = in.readString(); boolean snapshot = in.readBoolean(); - - final String version; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - version = in.readString(); - } else { - version = in.getVersion().toString(); - } + final String version = in.readString(); return new Build(type, hash, date, snapshot, version, distribution); } public static void writeBuild(Build build, StreamOutput out) throws IOException { // the following is new for opensearch: we write the distribution name to support any "forks" of the code - if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeString(build.distribution); - } + out.writeString(build.distribution); - // The following block is kept for existing BWS tests to pass. - // TODO - clean up this code when we remove all v6 bwc tests. - // TODO - clean this up when OSS flavor is removed in all of the code base - if (out.getVersion().before(Version.V_1_3_0)) { - out.writeString("oss"); - } final Type buildType = build.type(); out.writeString(buildType.displayName()); out.writeString(build.hash()); out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeString(build.getQualifiedVersion()); - } + out.writeString(build.getQualifiedVersion()); } /** diff --git a/server/src/main/java/org/opensearch/LegacyESVersion.java b/server/src/main/java/org/opensearch/LegacyESVersion.java index d4ac3c7d2f8b1..6e2aeaa5f7b4f 100644 --- a/server/src/main/java/org/opensearch/LegacyESVersion.java +++ b/server/src/main/java/org/opensearch/LegacyESVersion.java @@ -48,21 +48,6 @@ */ public class LegacyESVersion extends Version { - public static final LegacyESVersion V_7_0_0 = new LegacyESVersion(7000099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_0_1 = new LegacyESVersion(7000199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_0 = new LegacyESVersion(7010099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_1_1 = new LegacyESVersion(7010199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_0 = new LegacyESVersion(7020099, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_2_1 = new LegacyESVersion(7020199, org.apache.lucene.util.Version.LUCENE_8_0_0); - public static final LegacyESVersion V_7_3_0 = new LegacyESVersion(7030099, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_1 = new LegacyESVersion(7030199, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_3_2 = new LegacyESVersion(7030299, org.apache.lucene.util.Version.LUCENE_8_1_0); - public static final LegacyESVersion V_7_4_0 = new LegacyESVersion(7040099, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_1 = new LegacyESVersion(7040199, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_4_2 = new LegacyESVersion(7040299, org.apache.lucene.util.Version.LUCENE_8_2_0); - public static final LegacyESVersion V_7_5_0 = new LegacyESVersion(7050099, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_1 = new LegacyESVersion(7050199, org.apache.lucene.util.Version.LUCENE_8_3_0); - public static final LegacyESVersion V_7_5_2 = new LegacyESVersion(7050299, org.apache.lucene.util.Version.LUCENE_8_3_0); public static final LegacyESVersion V_7_6_0 = new LegacyESVersion(7060099, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_1 = new LegacyESVersion(7060199, org.apache.lucene.util.Version.LUCENE_8_4_0); public static final LegacyESVersion V_7_6_2 = new LegacyESVersion(7060299, org.apache.lucene.util.Version.LUCENE_8_4_0); diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index 4b6ca173ec692..78bda1cf088cd 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -49,7 +49,6 @@ import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.rest.RestStatus; -import org.opensearch.search.SearchException; import org.opensearch.search.aggregations.MultiBucketConsumerService; import org.opensearch.transport.TcpTransport; @@ -317,10 +316,6 @@ public void writeTo(StreamOutput out) throws IOException { public static OpenSearchException readException(StreamInput input, int id) throws IOException { CheckedFunction opensearchException = ID_TO_SUPPLIER.get(id); if (opensearchException == null) { - if (id == 127 && input.getVersion().before(LegacyESVersion.V_7_5_0)) { - // was SearchContextException - return new SearchException(input); - } throw new IllegalStateException("unknown exception for id: " + id); } return opensearchException.apply(input); @@ -601,7 +596,7 @@ public static void generateFailureXContent(XContentBuilder builder, Params param } t = t.getCause(); } - builder.field(ERROR, ExceptionsHelper.summaryMessage(t)); + builder.field(ERROR, ExceptionsHelper.summaryMessage(e)); return; } @@ -1533,7 +1528,7 @@ private enum OpenSearchExceptionHandle { org.opensearch.cluster.coordination.CoordinationStateRejectedException.class, org.opensearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - LegacyESVersion.V_7_0_0 + UNKNOWN_VERSION_ADDED ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.opensearch.snapshots.SnapshotInProgressException.class, @@ -1569,13 +1564,13 @@ private enum OpenSearchExceptionHandle { org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.opensearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), INGEST_PROCESSOR_EXCEPTION( org.opensearch.ingest.IngestProcessorException.class, org.opensearch.ingest.IngestProcessorException::new, 157, - LegacyESVersion.V_7_5_0 + UNKNOWN_VERSION_ADDED ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.opensearch.indices.recovery.PeerRecoveryNotFound.class, diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 1bffe9ec98ec5..3387eee2dffc8 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -412,7 +412,7 @@ private Version computeMinIndexCompatVersion() { } else if (major == 7 || major == 1) { return LegacyESVersion.fromId(6000026); } else if (major == 2) { - return LegacyESVersion.V_7_0_0; + return LegacyESVersion.fromId(7000099); } else { bwcMajor = major - 1; } diff --git a/server/src/main/java/org/opensearch/action/ActionFuture.java b/server/src/main/java/org/opensearch/action/ActionFuture.java index 77b748f50bfbf..d796180eda021 100644 --- a/server/src/main/java/org/opensearch/action/ActionFuture.java +++ b/server/src/main/java/org/opensearch/action/ActionFuture.java @@ -40,7 +40,7 @@ /** * An extension to {@link Future} allowing for simplified "get" operations. * - * @opensearch.internal + * @opensearch.api */ public interface ActionFuture extends Future { diff --git a/server/src/main/java/org/opensearch/action/ActionListener.java b/server/src/main/java/org/opensearch/action/ActionListener.java index 8f632449c7d91..645ed4deec006 100644 --- a/server/src/main/java/org/opensearch/action/ActionListener.java +++ b/server/src/main/java/org/opensearch/action/ActionListener.java @@ -46,7 +46,7 @@ /** * A listener for action responses or failures. * - * @opensearch.internal + * @opensearch.api */ public interface ActionListener { /** diff --git a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java index 26e9ba8621c53..af8fde4c9893c 100644 --- a/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/opensearch/action/ActionListenerResponseHandler.java @@ -46,7 +46,7 @@ * A simple base class for action response listeners, defaulting to using the SAME executor (as its * very common on response handlers). * - * @opensearch.internal + * @opensearch.api */ public class ActionListenerResponseHandler implements TransportResponseHandler { diff --git a/server/src/main/java/org/opensearch/action/ActionModule.java b/server/src/main/java/org/opensearch/action/ActionModule.java index 6b53a7d6a2888..84bc9b395c5dc 100644 --- a/server/src/main/java/org/opensearch/action/ActionModule.java +++ b/server/src/main/java/org/opensearch/action/ActionModule.java @@ -42,6 +42,8 @@ import org.opensearch.action.admin.cluster.configuration.TransportClearVotingConfigExclusionsAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.TransportGetDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.TransportDeleteDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionAction; import org.opensearch.action.admin.cluster.decommission.awareness.put.TransportDecommissionAction; import org.opensearch.action.admin.cluster.health.ClusterHealthAction; @@ -313,6 +315,7 @@ import org.opensearch.rest.action.admin.cluster.RestClusterStatsAction; import org.opensearch.rest.action.admin.cluster.RestClusterUpdateSettingsAction; import org.opensearch.rest.action.admin.cluster.RestCreateSnapshotAction; +import org.opensearch.rest.action.admin.cluster.RestDeleteDecommissionStateAction; import org.opensearch.rest.action.admin.cluster.RestDeleteRepositoryAction; import org.opensearch.rest.action.admin.cluster.RestDeleteSnapshotAction; import org.opensearch.rest.action.admin.cluster.RestDeleteStoredScriptAction; @@ -703,6 +706,7 @@ public void reg // Decommission actions actions.register(DecommissionAction.INSTANCE, TransportDecommissionAction.class); actions.register(GetDecommissionStateAction.INSTANCE, TransportGetDecommissionStateAction.class); + actions.register(DeleteDecommissionStateAction.INSTANCE, TransportDeleteDecommissionStateAction.class); return unmodifiableMap(actions.getRegistry()); } @@ -885,6 +889,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestDeletePitAction()); registerHandler.accept(new RestGetAllPitsAction(nodesInCluster)); registerHandler.accept(new RestPitSegmentsAction(nodesInCluster)); + registerHandler.accept(new RestDeleteDecommissionStateAction()); for (ActionPlugin plugin : actionPlugins) { for (RestHandler handler : plugin.getRestHandlers( diff --git a/server/src/main/java/org/opensearch/action/ActionRequest.java b/server/src/main/java/org/opensearch/action/ActionRequest.java index c6d8eb9f273d6..a6879dd98691a 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequest.java +++ b/server/src/main/java/org/opensearch/action/ActionRequest.java @@ -39,9 +39,9 @@ import java.io.IOException; /** - * Base action request + * Base action request implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequest extends TransportRequest { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java index d1fddb076b350..27358a0412468 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestBuilder.java @@ -40,7 +40,7 @@ /** * Base Action Request Builder * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRequestBuilder { diff --git a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java index d7da932c4dfc2..ffba4d2eb50c0 100644 --- a/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java +++ b/server/src/main/java/org/opensearch/action/ActionRequestValidationException.java @@ -35,8 +35,8 @@ import org.opensearch.common.ValidationException; /** - * Base exception for an action request validation + * Base exception for an action request validation extendable by plugins * - * @opensearch.internal + * @opensearch.api */ public class ActionRequestValidationException extends ValidationException {} diff --git a/server/src/main/java/org/opensearch/action/ActionResponse.java b/server/src/main/java/org/opensearch/action/ActionResponse.java index c72fc87ccfaf8..ab0544365c0b9 100644 --- a/server/src/main/java/org/opensearch/action/ActionResponse.java +++ b/server/src/main/java/org/opensearch/action/ActionResponse.java @@ -38,9 +38,9 @@ import java.io.IOException; /** - * Base class for responses to action requests. + * Base class for responses to action requests implemented by plugins. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionResponse extends TransportResponse { diff --git a/server/src/main/java/org/opensearch/action/ActionRunnable.java b/server/src/main/java/org/opensearch/action/ActionRunnable.java index c718b33bd404a..2c3f70afda75d 100644 --- a/server/src/main/java/org/opensearch/action/ActionRunnable.java +++ b/server/src/main/java/org/opensearch/action/ActionRunnable.java @@ -41,7 +41,7 @@ * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught * exception or error is thrown while the actual action is run. * - * @opensearch.internal + * @opensearch.api */ public abstract class ActionRunnable extends AbstractRunnable { diff --git a/server/src/main/java/org/opensearch/action/ActionType.java b/server/src/main/java/org/opensearch/action/ActionType.java index 9c17061990abe..c22cddd6fad71 100644 --- a/server/src/main/java/org/opensearch/action/ActionType.java +++ b/server/src/main/java/org/opensearch/action/ActionType.java @@ -39,7 +39,7 @@ /** * A generic action. Should strive to make it a singleton. * - * @opensearch.internal + * @opensearch.api */ public class ActionType { diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..3aff666d388be --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateAction.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionType; + +/** + * Delete decommission state action. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateAction extends ActionType { + public static final DeleteDecommissionStateAction INSTANCE = new DeleteDecommissionStateAction(); + public static final String NAME = "cluster:admin/decommission/awareness/delete"; + + private DeleteDecommissionStateAction() { + super(NAME, DeleteDecommissionStateResponse::new); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java new file mode 100644 index 0000000000000..205be54a36c33 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequest.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.ActionRequestValidationException; +import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Request for deleting decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequest extends ClusterManagerNodeRequest { + + public DeleteDecommissionStateRequest() {} + + public DeleteDecommissionStateRequest(StreamInput in) throws IOException { + super(in); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java new file mode 100644 index 0000000000000..08f194c53f18e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateRequestBuilder.java @@ -0,0 +1,27 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.clustermanager.ClusterManagerNodeOperationRequestBuilder; +import org.opensearch.client.OpenSearchClient; + +/** + * Builder for Delete decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateRequestBuilder extends ClusterManagerNodeOperationRequestBuilder< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse, + DeleteDecommissionStateRequestBuilder> { + + public DeleteDecommissionStateRequestBuilder(OpenSearchClient client, DeleteDecommissionStateAction action) { + super(client, action, new DeleteDecommissionStateRequest()); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java new file mode 100644 index 0000000000000..2ff634966586a --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/DeleteDecommissionStateResponse.java @@ -0,0 +1,36 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.opensearch.action.support.master.AcknowledgedResponse; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Response returned after deletion of decommission request. + * + * @opensearch.internal + */ +public class DeleteDecommissionStateResponse extends AcknowledgedResponse { + + public DeleteDecommissionStateResponse(StreamInput in) throws IOException { + super(in); + } + + public DeleteDecommissionStateResponse(boolean acknowledged) { + super(acknowledged); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..7d8f4bdd8304c --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/TransportDeleteDecommissionStateAction.java @@ -0,0 +1,86 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness.delete; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.clustermanager.TransportClusterManagerNodeAction; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.block.ClusterBlockException; +import org.opensearch.cluster.block.ClusterBlockLevel; +import org.opensearch.cluster.decommission.DecommissionService; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.io.IOException; + +/** + * Transport action for delete decommission. + * + * @opensearch.internal + */ +public class TransportDeleteDecommissionStateAction extends TransportClusterManagerNodeAction< + DeleteDecommissionStateRequest, + DeleteDecommissionStateResponse> { + + private static final Logger logger = LogManager.getLogger(TransportDeleteDecommissionStateAction.class); + private final DecommissionService decommissionService; + + @Inject + public TransportDeleteDecommissionStateAction( + TransportService transportService, + ClusterService clusterService, + ThreadPool threadPool, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + DecommissionService decommissionService + ) { + super( + DeleteDecommissionStateAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + DeleteDecommissionStateRequest::new, + indexNameExpressionResolver + ); + this.decommissionService = decommissionService; + } + + @Override + protected String executor() { + return ThreadPool.Names.SAME; + } + + @Override + protected DeleteDecommissionStateResponse read(StreamInput in) throws IOException { + return new DeleteDecommissionStateResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(DeleteDecommissionStateRequest request, ClusterState state) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); + } + + @Override + protected void clusterManagerOperation( + DeleteDecommissionStateRequest request, + ClusterState state, + ActionListener listener + ) { + logger.info("Received delete decommission Request [{}]", request); + this.decommissionService.startRecommissionAction(listener); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java new file mode 100644 index 0000000000000..c2cfc03baa45e --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/decommission/awareness/delete/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Delete decommission transport handlers. */ +package org.opensearch.action.admin.cluster.decommission.awareness.delete; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java index 1dedf481dec56..84a7616fe6b06 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.health; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -90,11 +89,7 @@ public ClusterHealthRequest(StreamInput in) throws IOException { waitForEvents = Priority.readFrom(in); } waitForNoInitializingShards = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions = IndicesOptions.readIndicesOptions(in); - } else { - indicesOptions = IndicesOptions.lenientExpandOpen(); - } + indicesOptions = IndicesOptions.readIndicesOptions(in); } @Override @@ -122,9 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { Priority.writeTo(waitForEvents, out); } out.writeBoolean(waitForNoInitializingShards); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indicesOptions.writeIndicesOptions(out); - } + indicesOptions.writeIndicesOptions(out); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java index e8429580ec887..4c71993251f4f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/hotthreads/TransportNodesHotThreadsAction.java @@ -35,7 +35,6 @@ import org.opensearch.OpenSearchException; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -43,6 +42,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.monitor.jvm.HotThreads; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -117,7 +117,7 @@ protected NodeHotThreads nodeOperation(NodeRequest request) { * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesHotThreadsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java index 7bcf83ba28111..ee7b287b878e7 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/info/TransportNodesInfoAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -126,7 +126,7 @@ protected NodeInfo nodeOperation(NodeInfoRequest nodeRequest) { * * @opensearch.internal */ - public static class NodeInfoRequest extends BaseNodeRequest { + public static class NodeInfoRequest extends TransportRequest { NodesInfoRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java index e31f5f304c836..b721c8f005974 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsRequest.java @@ -114,16 +114,14 @@ boolean hasPassword() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - if (this.secureSettingsPassword == null) { - out.writeOptionalBytesReference(null); - } else { - final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); - try { - out.writeOptionalBytesReference(new BytesArray(passwordBytes)); - } finally { - Arrays.fill(passwordBytes, (byte) 0); - } + if (this.secureSettingsPassword == null) { + out.writeOptionalBytesReference(null); + } else { + final byte[] passwordBytes = CharArrays.toUtf8Bytes(this.secureSettingsPassword.getChars()); + try { + out.writeOptionalBytesReference(new BytesArray(passwordBytes)); + } finally { + Arrays.fill(passwordBytes, (byte) 0); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java index d7ad4357fa046..920c66bc5c543 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/reload/TransportNodesReloadSecureSettingsAction.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.service.ClusterService; @@ -54,6 +53,7 @@ import org.opensearch.plugins.ReloadablePlugin; import org.opensearch.tasks.Task; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -188,7 +188,7 @@ protected NodesReloadSecureSettingsResponse.NodeResponse nodeOperation(NodeReque * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodesReloadSecureSettingsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 644c7f02d45f0..5d5d54c8fe7ed 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -127,7 +127,7 @@ protected NodeStats nodeOperation(NodeStatsRequest nodeStatsRequest) { * * @opensearch.internal */ - public static class NodeStatsRequest extends BaseNodeRequest { + public static class NodeStatsRequest extends TransportRequest { NodesStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java index c7612f7e15838..dbd3673149efe 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/node/usage/TransportNodesUsageAction.java @@ -34,7 +34,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.inject.Inject; @@ -42,6 +41,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.aggregations.support.AggregationUsageService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.usage.UsageService; @@ -117,7 +117,7 @@ protected NodeUsage nodeOperation(NodeUsageRequest nodeUsageRequest) { * * @opensearch.internal */ - public static class NodeUsageRequest extends BaseNodeRequest { + public static class NodeUsageRequest extends TransportRequest { NodesUsageRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java index a3804db687a2d..07b918e427784 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/repositories/cleanup/TransportCleanupRepositoryAction.java @@ -34,8 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -91,8 +89,6 @@ public final class TransportCleanupRepositoryAction extends TransportClusterMana private static final Logger logger = LogManager.getLogger(TransportCleanupRepositoryAction.class); - private static final Version MIN_VERSION = LegacyESVersion.V_7_4_0; - private final RepositoriesService repositoriesService; private final SnapshotsService snapshotsService; @@ -179,17 +175,7 @@ protected void clusterManagerOperation( ClusterState state, ActionListener listener ) { - if (state.nodes().getMinNodeVersion().onOrAfter(MIN_VERSION)) { - cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); - } else { - throw new IllegalArgumentException( - "Repository cleanup is only supported from version [" - + MIN_VERSION - + "] but the oldest node version in the cluster is [" - + state.nodes().getMinNodeVersion() - + ']' - ); - } + cleanupRepo(request.name(), ActionListener.map(listener, CleanupRepositoryResponse::new)); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index d78a4c95246b4..cb64718ed5843 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -60,7 +60,6 @@ import static org.opensearch.common.settings.Settings.readSettingsFromStream; import static org.opensearch.common.settings.Settings.writeSettingsToStream; import static org.opensearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Create snapshot request @@ -124,9 +123,7 @@ public CreateSnapshotRequest(StreamInput in) throws IOException { includeGlobalState = in.readBoolean(); waitForCompletion = in.readBoolean(); partial = in.readBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } + userMetadata = in.readMap(); } @Override @@ -140,9 +137,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(includeGlobalState); out.writeBoolean(waitForCompletion); out.writeBoolean(partial); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index ed4af6d915792..f604a30121797 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -103,18 +103,10 @@ protected void clusterManagerOperation( ClusterState state, final ActionListener listener ) { - if (state.nodes().getMinNodeVersion().before(SnapshotsService.NO_REPO_INITIALIZE_VERSION)) { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshotLegacy(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshotLegacy(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + if (request.waitForCompletion()) { + snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); } else { - if (request.waitForCompletion()) { - snapshotsService.executeSnapshot(request, ActionListener.map(listener, CreateSnapshotResponse::new)); - } else { - snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); - } + snapshotsService.createSnapshot(request, ActionListener.map(listener, snapshot -> new CreateSnapshotResponse())); } } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 1b673217a248b..3ecf5ab19c0e4 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -33,6 +33,7 @@ package org.opensearch.action.admin.cluster.snapshots.restore; import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; import org.opensearch.action.support.clustermanager.ClusterManagerNodeRequest; @@ -42,6 +43,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentType; @@ -68,6 +70,38 @@ public class RestoreSnapshotRequest extends ClusterManagerNodeRequest source) { } else { throw new IllegalArgumentException("malformed ignore_index_settings section, should be an array of strings"); } + } else if (name.equals("storage_type")) { + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT)) { + if (entry.getValue() instanceof String) { + storageType(StorageType.fromString((String) entry.getValue())); + } else { + throw new IllegalArgumentException("malformed storage_type"); + } + } else { + throw new IllegalArgumentException( + "Unsupported parameter " + name + ". Feature flag is not enabled for this experimental feature" + ); + } } else { if (IndicesOptions.isIndicesOptions(name) == false) { throw new IllegalArgumentException("Unknown parameter " + name); @@ -579,6 +648,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.value(ignoreIndexSetting); } builder.endArray(); + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && storageType != null) { + storageType.toXContent(builder); + } builder.endObject(); return builder; } @@ -605,7 +677,8 @@ public boolean equals(Object o) { && Objects.equals(renameReplacement, that.renameReplacement) && Objects.equals(indexSettings, that.indexSettings) && Arrays.equals(ignoreIndexSettings, that.ignoreIndexSettings) - && Objects.equals(snapshotUuid, that.snapshotUuid); + && Objects.equals(snapshotUuid, that.snapshotUuid) + && Objects.equals(storageType, that.storageType); } @Override @@ -621,7 +694,8 @@ public int hashCode() { partial, includeAliases, indexSettings, - snapshotUuid + snapshotUuid, + storageType ); result = 31 * result + Arrays.hashCode(indices); result = 31 * result + Arrays.hashCode(ignoreIndexSettings); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java index 68397851699fb..0104637a00035 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequestBuilder.java @@ -248,4 +248,12 @@ public RestoreSnapshotRequestBuilder setIgnoreIndexSettings(List ignoreI request.ignoreIndexSettings(ignoreIndexSettings); return this; } + + /** + * Sets the storage type + */ + public RestoreSnapshotRequestBuilder setStorageType(RestoreSnapshotRequest.StorageType storageType) { + request.storageType(storageType); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 8fd1ed22a0d14..5fa908a039887 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.snapshots.status; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.SnapshotsInProgress.State; import org.opensearch.common.Nullable; @@ -92,15 +91,8 @@ public class SnapshotStatus implements ToXContentObject, Writeable { state = State.fromValue(in.readByte()); shards = Collections.unmodifiableList(in.readList(SnapshotIndexShardStatus::new)); includeGlobalState = in.readOptionalBoolean(); - final long startTime; - final long time; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - startTime = in.readLong(); - time = in.readLong(); - } else { - startTime = 0L; - time = 0L; - } + final long startTime = in.readLong(); + final long time = in.readLong(); updateShardStats(startTime, time); } @@ -207,10 +199,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(state.value()); out.writeList(shards); out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeLong(stats.getStartTime()); - out.writeLong(stats.getTime()); - } + out.writeLong(stats.getStartTime()); + out.writeLong(stats.getTime()); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 86d0499a23f9e..e9bf564afaf32 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -36,7 +36,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -51,6 +50,7 @@ import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotShardsService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -207,7 +207,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) th * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final List snapshots; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index 89cd112d30c79..d2d7d843e19db 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -32,14 +32,12 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -59,9 +57,6 @@ public ClusterStateResponse(StreamInput in) throws IOException { super(in); clusterName = new ClusterName(in); clusterState = in.readOptionalWriteable(innerIn -> ClusterState.readFrom(innerIn, null)); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - new ByteSizeValue(in); - } waitForTimedOut = in.readBoolean(); } @@ -98,9 +93,6 @@ public boolean isWaitForTimedOut() { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeOptionalWriteable(clusterState); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - ByteSizeValue.ZERO.writeTo(out); - } out.writeBoolean(waitForTimedOut); } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java index a13932e137ab0..401813a6174fa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/stats/TransportClusterStatsAction.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.ShardStats; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.TransportNodesAction; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.health.ClusterHealthStatus; @@ -57,6 +56,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.node.NodeService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.Transports; @@ -216,7 +216,7 @@ protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeReq * * @opensearch.internal */ - public static class ClusterStatsNodeRequest extends BaseNodeRequest { + public static class ClusterStatsNodeRequest extends TransportRequest { ClusterStatsRequest request; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java index 62f51aa3f3bff..bb28623430f2d 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -88,11 +88,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest tokens, DetailAnalyzeResponse detail) { } public Response(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - tokens.add(new AnalyzeToken(in)); - } - } else { - tokens = null; - } - } + AnalyzeToken[] tokenArray = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); + tokens = tokenArray != null ? Arrays.asList(tokenArray) : null; detail = in.readOptionalWriteable(DetailAnalyzeResponse::new); } @@ -371,22 +358,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - AnalyzeToken[] tokenArray = null; - if (tokens != null) { - tokenArray = tokens.toArray(new AnalyzeToken[0]); - } - out.writeOptionalArray(tokenArray); - } else { - if (tokens != null) { - out.writeVInt(tokens.size()); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } + AnalyzeToken[] tokenArray = null; + if (tokens != null) { + tokenArray = tokens.toArray(new AnalyzeToken[0]); } + out.writeOptionalArray(tokenArray); out.writeOptionalWriteable(detail); } @@ -766,19 +742,7 @@ public AnalyzeTokenList(String name, AnalyzeToken[] tokens) { AnalyzeTokenList(StreamInput in) throws IOException { name = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); - } else { - int size = in.readVInt(); - if (size > 0) { - tokens = new AnalyzeToken[size]; - for (int i = 0; i < size; i++) { - tokens[i] = new AnalyzeToken(in); - } - } else { - tokens = null; - } - } + tokens = in.readOptionalArray(AnalyzeToken::new, AnalyzeToken[]::new); } public String getName() { @@ -811,18 +775,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalArray(tokens); - } else { - if (tokens != null) { - out.writeVInt(tokens.length); - for (AnalyzeToken token : tokens) { - token.writeTo(out); - } - } else { - out.writeVInt(0); - } - } + out.writeOptionalArray(tokens); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java index b16cabfda4d67..1095cec447442 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.support.ActiveShardCount; @@ -61,11 +60,7 @@ public CloseIndexRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards = ActiveShardCount.readFrom(in); - } else { - waitForActiveShards = ActiveShardCount.NONE; - } + waitForActiveShards = ActiveShardCount.readFrom(in); } public CloseIndexRequest() {} @@ -143,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - waitForActiveShards.writeTo(out); - } + waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java index 1fc9017359a8c..0388ea47bfc69 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/CloseIndexResponse.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.close; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.support.DefaultShardOperationFailedException; import org.opensearch.action.support.master.ShardsAcknowledgedResponse; @@ -49,7 +48,6 @@ import java.util.List; import java.util.Objects; -import static java.util.Collections.emptyList; import static java.util.Collections.unmodifiableList; /** @@ -62,12 +60,8 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { private final List indices; CloseIndexResponse(StreamInput in) throws IOException { - super(in, in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - indices = unmodifiableList(in.readList(IndexResult::new)); - } else { - indices = unmodifiableList(emptyList()); - } + super(in, true); + indices = unmodifiableList(in.readList(IndexResult::new)); } public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -82,12 +76,8 @@ public List getIndices() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - writeShardsAcknowledged(out); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeList(indices); - } + writeShardsAcknowledged(out); + out.writeList(indices); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java index fe39e2a254301..691b2c7c95730 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseAction.java @@ -33,7 +33,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; @@ -205,11 +204,7 @@ public static class ShardRequest extends ReplicationRequest { ShardRequest(StreamInput in) throws IOException { super(in); clusterBlock = new ClusterBlock(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - phase1 = in.readBoolean(); - } else { - phase1 = false; - } + phase1 = in.readBoolean(); } public ShardRequest(final ShardId shardId, final ClusterBlock clusterBlock, final boolean phase1, final TaskId parentTaskId) { @@ -228,9 +223,7 @@ public String toString() { public void writeTo(final StreamOutput out) throws IOException { super.writeTo(out); clusterBlock.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeBoolean(phase1); - } + out.writeBoolean(phase1); } public ClusterBlock clusterBlock() { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java index 95837d82be7ac..302c2aad64bb4 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/create/CreateIndexRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.create; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchGenerationException; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; @@ -126,9 +125,6 @@ public CreateIndexRequest(StreamInput in) throws IOException { for (int i = 0; i < aliasesSize; i++) { aliases.add(new Alias(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } waitForActiveShards = ActiveShardCount.readFrom(in); } @@ -505,9 +501,6 @@ public void writeTo(StreamOutput out) throws IOException { for (Alias alias : aliases) { alias.writeTo(out); } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } waitForActiveShards.writeTo(out); } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java index 6026dd10c607b..6885de74e4479 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/find/NodeFindDanglingIndexRequest.java @@ -34,16 +34,16 @@ import java.io.IOException; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; /** * Used when querying every node in the cluster for a specific dangling index. * * @opensearch.internal */ -public class NodeFindDanglingIndexRequest extends BaseNodeRequest { +public class NodeFindDanglingIndexRequest extends TransportRequest { private final String indexUUID; public NodeFindDanglingIndexRequest(String indexUUID) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java index 9b737fff8316e..696daf75942fb 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/dangling/list/NodeListDanglingIndicesRequest.java @@ -32,9 +32,9 @@ package org.opensearch.action.admin.indices.dangling.list; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; @@ -43,7 +43,7 @@ * * @opensearch.internal */ -public class NodeListDanglingIndicesRequest extends BaseNodeRequest { +public class NodeListDanglingIndicesRequest extends TransportRequest { /** * Filter the response by index UUID. Leave as null to find all indices. */ diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java index 64f76db5e1549..6d238a385231f 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/get/TransportGetFieldMappingsIndexAction.java @@ -33,7 +33,6 @@ package org.opensearch.action.admin.indices.mapping.get; import org.opensearch.OpenSearchException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetadata; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.single.shard.TransportSingleShardAction; @@ -120,8 +119,7 @@ protected ShardsIterator shards(ClusterState state, InternalRequest request) { protected GetFieldMappingsResponse shardOperation(final GetFieldMappingsIndexRequest request, ShardId shardId) { assert shardId != null; IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); - Version indexCreatedVersion = indexService.mapperService().getIndexSettings().getIndexVersionCreated(); - Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(indexCreatedVersion, f); + Predicate metadataFieldPredicate = (f) -> indicesService.isMetadataField(f); Predicate fieldPredicate = metadataFieldPredicate.or(indicesService.getFieldFilter().apply(shardId.getIndexName())); DocumentMapper documentMapper = indexService.mapperService().documentMapper(); diff --git a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java index 85fd74f0762a5..a8eeedd4a3e4c 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -117,9 +117,6 @@ public PutMappingRequest(StreamInput in) throws IOException { } } source = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // updateAllTypes - } concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { @@ -349,9 +346,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); // updateAllTypes - } out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java index bd5d9c651af7a..484bc93496fc8 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionResponse; import org.opensearch.action.support.DefaultShardOperationFailedException; @@ -247,13 +246,8 @@ public Failure(String nodeId, String index, int shardId, Throwable reason) { } private Failure(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } readFrom(in, this); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - nodeId = in.readString(); - } + nodeId = in.readString(); } public String nodeId() { @@ -266,13 +260,8 @@ static Failure readFailure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } super.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeString(nodeId); - } + out.writeString(nodeId); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index 50784e60a3f19..f5d9528422b58 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,7 +31,6 @@ package org.opensearch.action.admin.indices.shrink; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -122,9 +121,6 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); targetIndexRequest.writeTo(out); out.writeString(sourceIndex); - if (type == ResizeType.CLONE && out.getVersion().before(LegacyESVersion.V_7_4_0)) { - throw new IllegalArgumentException("can't send clone request to a node that's older than " + LegacyESVersion.V_7_4_0); - } out.writeEnum(type); out.writeOptionalBoolean(copySettings); } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java index 9a24d8a42dc9d..fd3d6daa9c393 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/stats/CommonStatsFlags.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.indices.stats; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -87,9 +86,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { fieldDataFields = in.readStringArray(); completionDataFields = in.readStringArray(); includeSegmentFileSizes = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnloadedSegments = in.readBoolean(); - } + includeUnloadedSegments = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_2_0)) { includeAllShardIndexingPressureTrackers = in.readBoolean(); includeOnlyTopIndexingPressureMetrics = in.readBoolean(); @@ -111,9 +108,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(fieldDataFields); out.writeStringArrayNullable(completionDataFields); out.writeBoolean(includeSegmentFileSizes); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnloadedSegments); - } + out.writeBoolean(includeUnloadedSegments); if (out.getVersion().onOrAfter(Version.V_1_2_0)) { out.writeBoolean(includeAllShardIndexingPressureTrackers); out.writeBoolean(includeOnlyTopIndexingPressureMetrics); diff --git a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java index ce723df0c383a..86880c0211c1d 100644 --- a/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/opensearch/action/delete/DeleteRequest.java @@ -33,7 +33,6 @@ package org.opensearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.CompositeIndicesRequest; @@ -96,9 +95,6 @@ public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); ifSeqNo = in.readZLong(); @@ -280,9 +276,6 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeLong(version); out.writeByte(versionType.getValue()); out.writeZLong(ifSeqNo); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java index 688568ba9a6d6..50d60560bdfe4 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -72,11 +72,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); mergeResults = in.readBoolean(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - includeUnmapped = in.readBoolean(); - } else { - includeUnmapped = false; - } + includeUnmapped = in.readBoolean(); indexFilter = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalNamedWriteable(QueryBuilder.class) : null; nowInMillis = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readOptionalLong() : null; } @@ -109,9 +105,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); out.writeBoolean(mergeResults); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeBoolean(includeUnmapped); - } + out.writeBoolean(includeUnmapped); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeOptionalNamedWriteable(indexFilter); out.writeOptionalLong(nowInMillis); diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java index e5f644987182c..847cca25ceb35 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.fieldcaps; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionResponse; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; @@ -87,11 +86,7 @@ private FieldCapabilitiesResponse( public FieldCapabilitiesResponse(StreamInput in) throws IOException { super(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - indices = in.readStringArray(); - } else { - indices = Strings.EMPTY_ARRAY; - } + indices = in.readStringArray(); this.responseMap = in.readMap(StreamInput::readString, FieldCapabilitiesResponse::readField); indexResponses = in.readList(FieldCapabilitiesIndexResponse::new); } @@ -138,9 +133,7 @@ private static Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeStringArray(indices); - } + out.writeStringArray(indices); out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); out.writeList(indexResponses); } diff --git a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java index 99962741299ca..7d9ab4ff93f59 100644 --- a/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java +++ b/server/src/main/java/org/opensearch/action/fieldcaps/TransportFieldCapabilitiesIndexAction.java @@ -153,8 +153,7 @@ private FieldCapabilitiesIndexResponse shardOperation(final FieldCapabilitiesInd for (String field : fieldNames) { MappedFieldType ft = mapperService.fieldType(field); if (ft != null) { - if (indicesService.isMetadataField(mapperService.getIndexSettings().getIndexVersionCreated(), field) - || fieldPredicate.test(ft.name())) { + if (indicesService.isMetadataField(field) || fieldPredicate.test(ft.name())) { IndexFieldCapabilities fieldCap = new IndexFieldCapabilities( field, ft.familyTypeName(), diff --git a/server/src/main/java/org/opensearch/action/get/GetRequest.java b/server/src/main/java/org/opensearch/action/get/GetRequest.java index 5f740ba789bb2..64148f070cc16 100644 --- a/server/src/main/java/org/opensearch/action/get/GetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/GetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.RealtimeRequest; @@ -89,9 +88,6 @@ public GetRequest() {} } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); - } preference = in.readOptionalString(); refresh = in.readBoolean(); storedFields = in.readOptionalStringArray(); @@ -260,9 +256,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); - } out.writeOptionalString(preference); out.writeBoolean(refresh); diff --git a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java index 00df8657736ae..91f506dafafe1 100644 --- a/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/opensearch/action/get/MultiGetRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequest; @@ -114,9 +113,6 @@ public Item(StreamInput in) throws IOException { } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } storedFields = in.readOptionalStringArray(); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); @@ -211,9 +207,6 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalStringArray(storedFields); out.writeLong(version); out.writeByte(versionType.getValue()); diff --git a/server/src/main/java/org/opensearch/action/index/IndexRequest.java b/server/src/main/java/org/opensearch/action/index/IndexRequest.java index f863c4a11340e..ceff8dcbc4b55 100644 --- a/server/src/main/java/org/opensearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/opensearch/action/index/IndexRequest.java @@ -148,20 +148,13 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } id = in.readOptionalString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } source = in.readBytesReference(); opType = OpType.fromId(in.readByte()); version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - finalPipeline = in.readOptionalString(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - isPipelineResolved = in.readBoolean(); - } + finalPipeline = in.readOptionalString(); + isPipelineResolved = in.readBoolean(); isRetry = in.readBoolean(); autoGeneratedTimestamp = in.readLong(); if (in.readBoolean()) { @@ -642,7 +635,7 @@ public void resolveRouting(Metadata metadata) { } public void checkAutoIdWithOpTypeCreateSupportedByVersion(Version version) { - if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.V_7_5_0)) { + if (id == null && opType == OpType.CREATE && version.before(LegacyESVersion.fromId(7050099))) { throw new IllegalArgumentException( "optype create not supported for indexing requests without explicit id until all nodes " + "are on version 7.5.0 or higher" ); @@ -669,20 +662,13 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeOptionalString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeBytesReference(source); out.writeByte(opType.getId()); out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeOptionalString(finalPipeline); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeBoolean(isPipelineResolved); - } + out.writeOptionalString(finalPipeline); + out.writeBoolean(isPipelineResolved); out.writeBoolean(isRetry); out.writeLong(autoGeneratedTimestamp); if (contentType != null) { diff --git a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java index 2440a1802912b..f36ca0e7d7379 100644 --- a/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java +++ b/server/src/main/java/org/opensearch/action/ingest/SimulateDocumentBaseResult.java @@ -31,7 +31,6 @@ package org.opensearch.action.ingest; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -94,34 +93,14 @@ public SimulateDocumentBaseResult(Exception failure) { * Read from a stream. */ public SimulateDocumentBaseResult(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - failure = in.readException(); - ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); - } else { - if (in.readBoolean()) { - ingestDocument = null; - failure = in.readException(); - } else { - ingestDocument = new WriteableIngestDocument(in); - failure = null; - } - } + failure = in.readException(); + ingestDocument = in.readOptionalWriteable(WriteableIngestDocument::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeException(failure); - out.writeOptionalWriteable(ingestDocument); - } else { - if (failure == null) { - out.writeBoolean(false); - ingestDocument.writeTo(out); - } else { - out.writeBoolean(true); - out.writeException(failure); - } - } + out.writeException(failure); + out.writeOptionalWriteable(ingestDocument); } public IngestDocument getIngestDocument() { diff --git a/server/src/main/java/org/opensearch/action/main/MainResponse.java b/server/src/main/java/org/opensearch/action/main/MainResponse.java index 691bbda512275..0fbfdab9ba294 100644 --- a/server/src/main/java/org/opensearch/action/main/MainResponse.java +++ b/server/src/main/java/org/opensearch/action/main/MainResponse.java @@ -33,7 +33,6 @@ package org.opensearch.action.main; import org.opensearch.Build; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionResponse; import org.opensearch.cluster.ClusterName; @@ -71,9 +70,6 @@ public class MainResponse extends ActionResponse implements ToXContentObject { clusterName = new ClusterName(in); clusterUuid = in.readString(); build = Build.readBuild(in); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); - } } public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build) { @@ -111,9 +107,6 @@ public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); out.writeString(clusterUuid); Build.writeBuild(build, out); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(true); - } } @Override diff --git a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java index c90f75e3c0aed..de0c0dd9bbfc3 100644 --- a/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java +++ b/server/src/main/java/org/opensearch/action/search/GetAllPitNodeRequest.java @@ -9,16 +9,16 @@ package org.opensearch.action.search; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; import java.io.IOException; /** * Inner node get all pits request */ -public class GetAllPitNodeRequest extends BaseNodeRequest { +public class GetAllPitNodeRequest extends TransportRequest { public GetAllPitNodeRequest() { super(); diff --git a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java index 6c25a16a65c75..c4ba3becbc151 100644 --- a/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java +++ b/server/src/main/java/org/opensearch/action/search/MultiSearchResponse.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionResponse; @@ -147,11 +146,7 @@ public MultiSearchResponse(StreamInput in) throws IOException { for (int i = 0; i < items.length; i++) { items[i] = new Item(in); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - tookInMillis = in.readVLong(); - } else { - tookInMillis = 0L; - } + tookInMillis = in.readVLong(); } public MultiSearchResponse(Item[] items, long tookInMillis) { @@ -184,9 +179,7 @@ public void writeTo(StreamOutput out) throws IOException { for (Item item : items) { item.writeTo(out); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(tookInMillis); - } + out.writeVLong(tookInMillis); } @Override diff --git a/server/src/main/java/org/opensearch/action/search/SearchRequest.java b/server/src/main/java/org/opensearch/action/search/SearchRequest.java index da34dab6383d9..e4dd0d0b1a116 100644 --- a/server/src/main/java/org/opensearch/action/search/SearchRequest.java +++ b/server/src/main/java/org/opensearch/action/search/SearchRequest.java @@ -251,9 +251,7 @@ public SearchRequest(StreamInput in) throws IOException { absoluteStartMillis = DEFAULT_ABSOLUTE_START_MILLIS; finalReduce = true; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - ccsMinimizeRoundtrips = in.readBoolean(); - } + ccsMinimizeRoundtrips = in.readBoolean(); if (in.getVersion().onOrAfter(Version.V_1_1_0)) { cancelAfterTimeInterval = in.readOptionalTimeValue(); @@ -288,9 +286,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(absoluteStartMillis); out.writeBoolean(finalReduce); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(ccsMinimizeRoundtrips); - } + out.writeBoolean(ccsMinimizeRoundtrips); if (out.getVersion().onOrAfter(Version.V_1_1_0)) { out.writeOptionalTimeValue(cancelAfterTimeInterval); diff --git a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java b/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java deleted file mode 100644 index b5ff1d60ff75b..0000000000000 --- a/server/src/main/java/org/opensearch/action/support/nodes/BaseNodeRequest.java +++ /dev/null @@ -1,69 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.action.support.nodes; - -import org.opensearch.LegacyESVersion; -import org.opensearch.common.io.stream.StreamInput; -import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.transport.TransportRequest; - -import java.io.IOException; - -/** - * Base class for node transport requests - * - * @opensearch.internal - * - * @deprecated this class is deprecated and classes will extend TransportRequest directly - */ -// TODO: this class can be removed in main once 7.x is bumped to 7.4.0 -@Deprecated -public abstract class BaseNodeRequest extends TransportRequest { - - public BaseNodeRequest() {} - - public BaseNodeRequest(StreamInput in) throws IOException { - super(in); - if (in.getVersion().before(LegacyESVersion.V_7_3_0)) { - in.readString(); // previously nodeId - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - super.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_3_0)) { - out.writeString(""); // previously nodeId - } - } -} diff --git a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java index 18fcdfad0bcc4..a12e9b753599d 100644 --- a/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java +++ b/server/src/main/java/org/opensearch/action/support/nodes/TransportNodesAction.java @@ -70,7 +70,7 @@ public abstract class TransportNodesAction< NodesRequest extends BaseNodesRequest, NodesResponse extends BaseNodesResponse, - NodeRequest extends BaseNodeRequest, + NodeRequest extends TransportRequest, NodeResponse extends BaseNodeResponse> extends HandledTransportAction { protected final ThreadPool threadPool; diff --git a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java index cf2d10d2f1db3..dcd5feda0004a 100644 --- a/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/opensearch/action/termvectors/TermVectorsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.action.termvectors; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; @@ -189,10 +188,6 @@ public TermVectorsRequest() {} xContentType = in.readEnum(XContentType.class); } routing = in.readOptionalString(); - - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } preference = in.readOptionalString(); long flags = in.readVLong(); @@ -541,9 +536,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeEnum(xContentType); } out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } out.writeOptionalString(preference); long longFlags = 0; for (Flag flag : flagsEnum) { diff --git a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java index d434f134f4321..abd3c31597c18 100644 --- a/server/src/main/java/org/opensearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/opensearch/action/update/UpdateRequest.java @@ -170,9 +170,6 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti } id = in.readString(); routing = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalString(); // _parent - } if (in.readBoolean()) { script = new Script(in); } @@ -181,26 +178,11 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti if (in.readBoolean()) { doc = new IndexRequest(shardId, in); } - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - String[] fields = in.readOptionalStringArray(); - if (fields != null) { - throw new IllegalArgumentException("[fields] is no longer supported"); - } - } fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new); if (in.readBoolean()) { upsertRequest = new IndexRequest(shardId, in); } docAsUpsert = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - long version = in.readLong(); - VersionType versionType = VersionType.readFromStream(in); - if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { - throw new UnsupportedOperationException( - "versioned update requests have been removed in 7.0. Use if_seq_no and if_primary_term" - ); - } - } ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); @@ -893,10 +875,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } out.writeString(id); out.writeOptionalString(routing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalString(null); // _parent - } - boolean hasScript = script != null; out.writeBoolean(hasScript); if (hasScript) { @@ -917,9 +895,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { doc.writeTo(out); } } - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalStringArray(null); - } out.writeOptionalWriteable(fetchSourceContext); if (upsertRequest == null) { out.writeBoolean(false); @@ -935,10 +910,6 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { } } out.writeBoolean(docAsUpsert); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeLong(Versions.MATCH_ANY); - out.writeByte(VersionType.INTERNAL.getValue()); - } out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); out.writeBoolean(detectNoop); diff --git a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java index d4c920b33fd33..77ddb5e17c742 100644 --- a/server/src/main/java/org/opensearch/client/ClusterAdminClient.java +++ b/server/src/main/java/org/opensearch/client/ClusterAdminClient.java @@ -37,6 +37,9 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateResponse; @@ -881,4 +884,19 @@ public interface ClusterAdminClient extends OpenSearchClient { * Get Decommissioned attribute */ GetDecommissionStateRequestBuilder prepareGetDecommission(); + + /** + * Deletes the decommission metadata. + */ + ActionFuture deleteDecommissionState(DeleteDecommissionStateRequest request); + + /** + * Deletes the decommission metadata. + */ + void deleteDecommissionState(DeleteDecommissionStateRequest request, ActionListener listener); + + /** + * Deletes the decommission metadata. + */ + DeleteDecommissionStateRequestBuilder prepareDeleteDecommissionRequest(); } diff --git a/server/src/main/java/org/opensearch/client/Requests.java b/server/src/main/java/org/opensearch/client/Requests.java index 039dec7b2b2db..21f2a2d906602 100644 --- a/server/src/main/java/org/opensearch/client/Requests.java +++ b/server/src/main/java/org/opensearch/client/Requests.java @@ -32,6 +32,7 @@ package org.opensearch.client; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.health.ClusterHealthRequest; @@ -598,4 +599,11 @@ public static DecommissionRequest decommissionRequest() { public static GetDecommissionStateRequest getDecommissionStateRequest() { return new GetDecommissionStateRequest(); } + + /** + * Creates a new delete decommission request. + */ + public static DeleteDecommissionStateRequest deleteDecommissionStateRequest() { + return new DeleteDecommissionStateRequest(); + } } diff --git a/server/src/main/java/org/opensearch/client/support/AbstractClient.java b/server/src/main/java/org/opensearch/client/support/AbstractClient.java index 4a27619f3e9ca..b42010d4253d5 100644 --- a/server/src/main/java/org/opensearch/client/support/AbstractClient.java +++ b/server/src/main/java/org/opensearch/client/support/AbstractClient.java @@ -43,6 +43,10 @@ import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequest; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainRequestBuilder; import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateAction; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequestBuilder; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateAction; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequest; import org.opensearch.action.admin.cluster.decommission.awareness.get.GetDecommissionStateRequestBuilder; @@ -1427,6 +1431,23 @@ public GetDecommissionStateRequestBuilder prepareGetDecommission() { return new GetDecommissionStateRequestBuilder(this, GetDecommissionStateAction.INSTANCE); } + @Override + public ActionFuture deleteDecommissionState(DeleteDecommissionStateRequest request) { + return execute(DeleteDecommissionStateAction.INSTANCE, request); + } + + @Override + public void deleteDecommissionState( + DeleteDecommissionStateRequest request, + ActionListener listener + ) { + execute(DeleteDecommissionStateAction.INSTANCE, request, listener); + } + + @Override + public DeleteDecommissionStateRequestBuilder prepareDeleteDecommissionRequest() { + return new DeleteDecommissionStateRequestBuilder(this, DeleteDecommissionStateAction.INSTANCE); + } } static class IndicesAdmin implements IndicesAdminClient { diff --git a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java index 2cf9d66fee2bd..291aa88a3fb3e 100644 --- a/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/RepositoryCleanupInProgress.java @@ -114,7 +114,7 @@ public String toString() { @Override public Version getMinimalSupportedVersion() { - return LegacyESVersion.V_7_4_0; + return LegacyESVersion.fromId(7040099); } /** diff --git a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java index 0ff373b6116de..4cbf0cfe70adb 100644 --- a/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/opensearch/cluster/SnapshotsInProgress.java @@ -67,7 +67,6 @@ import java.util.stream.Collectors; import static org.opensearch.snapshots.SnapshotInfo.DATA_STREAMS_IN_SNAPSHOT; -import static org.opensearch.snapshots.SnapshotInfo.METADATA_FIELD_INTRODUCED; /** * Meta data about snapshots that are currently executing @@ -296,11 +295,7 @@ private Entry(StreamInput in) throws IOException { shards = in.readImmutableMap(ShardId::new, ShardSnapshotStatus::readFrom); repositoryStateId = in.readLong(); failure = in.readOptionalString(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { version = Version.readVersion(in); } else if (in.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { @@ -736,9 +731,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(shards); out.writeLong(repositoryStateId); out.writeOptionalString(failure); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - } + out.writeMap(userMetadata); if (out.getVersion().onOrAfter(VERSION_IN_SNAPSHOT_VERSION)) { Version.writeVersion(version, out); } else if (out.getVersion().onOrAfter(SnapshotsService.SHARD_GEN_IN_REPO_DATA_VERSION)) { diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index 1c7e7cd0419e2..fbb345ea3a441 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterName; @@ -106,6 +105,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -139,6 +139,7 @@ public class Coordinator extends AbstractLifecycleComponent implements Discovery private final Settings settings; private final boolean singleNodeDiscovery; + private volatile boolean localNodeCommissioned; private final ElectionStrategy electionStrategy; private final TransportService transportService; private final ClusterManagerService clusterManagerService; @@ -219,7 +220,8 @@ public Coordinator( this::joinLeaderInTerm, this.onJoinValidators, rerouteService, - nodeHealthService + nodeHealthService, + this::onNodeCommissionStatusChange ); this.persistedStateSupplier = persistedStateSupplier; this.noClusterManagerBlockService = new NoClusterManagerBlockService(settings, clusterSettings); @@ -282,6 +284,7 @@ public Coordinator( joinHelper::logLastFailedJoinAttempt ); this.nodeHealthService = nodeHealthService; + this.localNodeCommissioned = true; } private ClusterFormationState getClusterFormationState() { @@ -597,6 +600,9 @@ private void handleJoinRequest(JoinRequest joinRequest, JoinHelper.JoinCallback joinRequest.getSourceNode().getVersion(), stateForJoinValidation.getNodes().getMinNodeVersion() ); + // we are checking source node commission status here to reject any join request coming from a decommissioned node + // even before executing the join task to fail fast + JoinTaskExecutor.ensureNodeCommissioned(joinRequest.getSourceNode(), stateForJoinValidation.metadata()); } sendValidateJoinRequest(stateForJoinValidation, joinRequest, joinCallback); } else { @@ -1425,6 +1431,17 @@ protected void onFoundPeersUpdated() { } } + // package-visible for testing + synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + this.localNodeCommissioned = localNodeCommissioned; + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + } + + // package-visible for testing + boolean localNodeCommissioned() { + return localNodeCommissioned; + } + private void startElectionScheduler() { assert electionScheduler == null : electionScheduler; @@ -1451,6 +1468,14 @@ public void run() { return; } + // if either the localNodeCommissioned flag or the last accepted state thinks it should skip pre voting, we will + // acknowledge it + if (nodeCommissioned(lastAcceptedState.nodes().getLocalNode(), lastAcceptedState.metadata()) == false + || localNodeCommissioned == false) { + logger.debug("skip prevoting as local node is decommissioned"); + return; + } + if (prevotingRound != null) { prevotingRound.close(); } @@ -1771,14 +1796,8 @@ protected void sendApplyCommit( } } - // TODO: only here temporarily for BWC development, remove once complete - public static Settings.Builder addZen1Attribute(boolean isZen1Node, Settings.Builder builder) { - return builder.put("node.attr.zen1", isZen1Node); - } - // TODO: only here temporarily for BWC development, remove once complete public static boolean isZen1Node(DiscoveryNode discoveryNode) { - return discoveryNode.getVersion().before(LegacyESVersion.V_7_0_0) - || (Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false"))); + return Booleans.isTrue(discoveryNode.getAttributes().getOrDefault("zen1", "false")); } } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java index 656e6d220720f..a66152b8016ee 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinHelper.java @@ -42,6 +42,7 @@ import org.opensearch.cluster.ClusterStateTaskListener; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.coordination.Coordinator.Mode; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RerouteService; @@ -57,6 +58,7 @@ import org.opensearch.monitor.StatusInfo; import org.opensearch.threadpool.ThreadPool; import org.opensearch.threadpool.ThreadPool.Names; +import org.opensearch.transport.RemoteTransportException; import org.opensearch.transport.TransportChannel; import org.opensearch.transport.TransportException; import org.opensearch.transport.TransportRequest; @@ -78,6 +80,7 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; +import java.util.function.Consumer; import java.util.function.Function; import java.util.function.LongSupplier; import java.util.function.Supplier; @@ -118,6 +121,7 @@ public class JoinHelper { private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); private final Supplier joinTaskExecutorGenerator; + private final Consumer nodeCommissioned; JoinHelper( Settings settings, @@ -130,12 +134,14 @@ public class JoinHelper { Function joinLeaderInTerm, Collection> joinValidators, RerouteService rerouteService, - NodeHealthService nodeHealthService + NodeHealthService nodeHealthService, + Consumer nodeCommissioned ) { this.clusterManagerService = clusterManagerService; this.transportService = transportService; this.nodeHealthService = nodeHealthService; this.joinTimeout = JOIN_TIMEOUT_SETTING.get(settings); + this.nodeCommissioned = nodeCommissioned; this.joinTaskExecutorGenerator = () -> new JoinTaskExecutor(settings, allocationService, logger, rerouteService, transportService) { private final long term = currentTermSupplier.getAsLong(); @@ -342,6 +348,7 @@ public void handleResponse(Empty response) { pendingOutgoingJoins.remove(dedupKey); logger.debug("successfully joined {} with {}", destination, joinRequest); lastFailedJoinAttempt.set(null); + nodeCommissioned.accept(true); onCompletion.run(); } @@ -352,6 +359,13 @@ public void handleException(TransportException exp) { FailedJoinAttempt attempt = new FailedJoinAttempt(destination, joinRequest, exp); attempt.logNow(); lastFailedJoinAttempt.set(attempt); + if (exp instanceof RemoteTransportException && (exp.getCause() instanceof NodeDecommissionedException)) { + logger.info( + "local node is decommissioned [{}]. Will not be able to join the cluster", + exp.getCause().getMessage() + ); + nodeCommissioned.accept(false); + } onCompletion.run(); } diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 814aa17255931..ac237db85ee5b 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -39,9 +39,6 @@ import org.opensearch.cluster.ClusterStateTaskExecutor; import org.opensearch.cluster.NotClusterManagerException; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.decommission.DecommissionAttribute; -import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; -import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -64,6 +61,7 @@ import java.util.function.BiConsumer; import java.util.stream.Collectors; +import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; /** @@ -196,6 +194,9 @@ public ClusterTasksResult execute(ClusterState currentState, List jo // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getVersion(), currentState.getMetadata()); + // we have added the same check in handleJoinRequest method and adding it here as this method + // would guarantee that a decommissioned node would never be able to join the cluster and ensures correctness + ensureNodeCommissioned(node, currentState.metadata()); nodesBuilder.add(node); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); @@ -203,7 +204,7 @@ public ClusterTasksResult execute(ClusterState currentState, List jo if (node.isClusterManagerNode()) { joiniedNodeNameIds.put(node.getName(), node.getId()); } - } catch (IllegalArgumentException | IllegalStateException e) { + } catch (IllegalArgumentException | IllegalStateException | NodeDecommissionedException e) { results.failure(joinTask, e); continue; } @@ -477,22 +478,13 @@ public static void ensureMajorVersionBarrier(Version joiningNodeVersion, Version } public static void ensureNodeCommissioned(DiscoveryNode node, Metadata metadata) { - DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { - DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); - DecommissionStatus status = decommissionAttributeMetadata.status(); - if (decommissionAttribute != null && status != null) { - // We will let the node join the cluster if the current status is in FAILED state - if (node.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()) - && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { - throw new NodeDecommissionedException( - "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", - node.toString(), - decommissionAttribute.toString(), - status.status() - ); - } - } + if (nodeCommissioned(node, metadata) == false) { + throw new NodeDecommissionedException( + "node [{}] has decommissioned attribute [{}] with current status of decommissioning [{}]", + node.toString(), + metadata.decommissionAttributeMetadata().decommissionAttribute().toString(), + metadata.decommissionAttributeMetadata().status().status() + ); } } diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index fcab411f073ba..b2c8bfbc0cdc8 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -13,8 +13,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.ClusterStateUpdateTask; @@ -389,10 +389,6 @@ private Set filterNodesWithDecommissionAttribute( return nodesWithDecommissionAttribute; } - private static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { - return discoveryNode.getAttributes().get(decommissionAttribute.attributeName()).equals(decommissionAttribute.attributeValue()); - } - private static void validateAwarenessAttribute( final DecommissionAttribute decommissionAttribute, List awarenessAttributes, @@ -483,7 +479,7 @@ public void onFailure(Exception e) { }; } - public void startRecommissionAction(final ActionListener listener) { + public void startRecommissionAction(final ActionListener listener) { /* * For abandoned requests, we might not really know if it actually restored the exclusion list. * And can land up in cases where even after recommission, exclusions are set(which is unexpected). @@ -506,7 +502,7 @@ public void onFailure(Exception e) { }, false); } - void deleteDecommissionState(ActionListener listener) { + void deleteDecommissionState(ActionListener listener) { clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { @@ -527,8 +523,42 @@ public void onFailure(String source, Exception e) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { // Cluster state processed for deleting the decommission attribute. assert newState.metadata().decommissionAttributeMetadata() == null; - listener.onResponse(new AcknowledgedResponse(true)); + listener.onResponse(new DeleteDecommissionStateResponse(true)); } }); } + + /** + * Utility method to check if the node has decommissioned attribute + * + * @param discoveryNode node to check on + * @param decommissionAttribute attribute to be checked with + * @return true or false based on whether node has decommissioned attribute + */ + public static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { + String nodeAttributeValue = discoveryNode.getAttributes().get(decommissionAttribute.attributeName()); + return nodeAttributeValue != null && nodeAttributeValue.equals(decommissionAttribute.attributeValue()); + } + + /** + * Utility method to check if the node is commissioned or not + * + * @param discoveryNode node to check on + * @param metadata metadata present current which will be used to check the commissioning status of the node + * @return if the node is commissioned or not + */ + public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata metadata) { + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null) { + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + DecommissionStatus status = decommissionAttributeMetadata.status(); + if (decommissionAttribute != null && status != null) { + if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) + && (status.equals(DecommissionStatus.IN_PROGRESS) || status.equals(DecommissionStatus.SUCCESSFUL))) { + return false; + } + } + } + return true; + } } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index cd1c92a8b109f..b6ca8c52cd818 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -1016,11 +1016,7 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - aliasesVersion = in.readVLong(); - } else { - aliasesVersion = 1; - } + aliasesVersion = in.readVLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); primaryTerms = in.readVLongArray(); @@ -1051,9 +1047,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); out.writeVLongArray(primaryTerms); @@ -1093,11 +1087,7 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { builder.version(in.readLong()); builder.mappingVersion(in.readVLong()); builder.settingsVersion(in.readVLong()); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - builder.aliasesVersion(in.readVLong()); - } else { - builder.aliasesVersion(1); - } + builder.aliasesVersion(in.readVLong()); builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); @@ -1140,9 +1130,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(aliasesVersion); - } + out.writeVLong(aliasesVersion); out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); @@ -1821,8 +1809,8 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti if (Assertions.ENABLED) { assert settingsVersion : "settings version should be present for indices"; } - if (Assertions.ENABLED && Version.indexCreated(builder.settings).onOrAfter(LegacyESVersion.V_7_2_0)) { - assert aliasesVersion : "aliases version should be present for indices created on or after 7.2.0"; + if (Assertions.ENABLED) { + assert aliasesVersion : "aliases version should be present for indices"; } return builder.build(); } diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java index 35ee222541771..223127783621e 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MappingMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.cluster.AbstractDiffable; import org.opensearch.cluster.Diff; @@ -161,9 +160,6 @@ public void writeTo(StreamOutput out) throws IOException { source().writeTo(out); // routing out.writeBoolean(routingRequired); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(false); // hasParentField - } } @Override @@ -190,9 +186,6 @@ public MappingMetadata(StreamInput in) throws IOException { source = CompressedXContent.readCompressedString(in); // routing routingRequired = in.readBoolean(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readBoolean(); // hasParentField - } } public static Diff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java index eb5e8bbc2d49b..8e73a72d43219 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/Metadata.java @@ -39,7 +39,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; import org.apache.lucene.util.CollectionUtil; -import org.opensearch.LegacyESVersion; import org.opensearch.action.AliasesRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterState.FeatureAware; @@ -981,22 +980,12 @@ private static class MetadataDiff implements Diff { MetadataDiff(StreamInput in) throws IOException { clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - clusterUUIDCommitted = in.readBoolean(); - } + clusterUUIDCommitted = in.readBoolean(); version = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata = new CoordinationMetadata(in); - } else { - coordinationMetadata = CoordinationMetadata.EMPTY_METADATA; - } + coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); - } else { - hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; - } + hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); @@ -1005,18 +994,12 @@ private static class MetadataDiff implements Diff { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } + out.writeBoolean(clusterUUIDCommitted); out.writeLong(version); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + coordinationMetadata.writeTo(out); Settings.writeSettingsToStream(transientSettings, out); Settings.writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); @@ -1043,17 +1026,11 @@ public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); builder.version = in.readLong(); builder.clusterUUID = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.clusterUUIDCommitted = in.readBoolean(); - } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.coordinationMetadata(new CoordinationMetadata(in)); - } + builder.clusterUUIDCommitted = in.readBoolean(); + builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); - } + builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); int size = in.readVInt(); for (int i = 0; i < size; i++) { builder.put(IndexMetadata.readFrom(in), false); @@ -1074,17 +1051,11 @@ public static Metadata readFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeString(clusterUUID); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(clusterUUIDCommitted); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - coordinationMetadata.writeTo(out); - } + out.writeBoolean(clusterUUIDCommitted); + coordinationMetadata.writeTo(out); writeSettingsToStream(transientSettings, out); writeSettingsToStream(persistentSettings, out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - hashesOfConsistentSettings.writeTo(out); - } + hashesOfConsistentSettings.writeTo(out); out.writeVInt(indices.size()); for (IndexMetadata indexMetadata : this) { indexMetadata.writeTo(out); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java index d78e5e872fd2b..36e25b5458b76 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataCreateIndexService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.Version; @@ -1385,21 +1384,17 @@ static void prepareResizeIndexSettings( * the less default split operations are supported */ public static int calculateNumRoutingShards(int numShards, Version indexVersionCreated) { - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_0_0)) { - // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour - // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters - // will always have the behavior of the min node in the cluster. - // - // We use as a default number of routing shards the higher number that can be expressed - // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. - int log2MaxNumShards = 10; // logBase2(1024) - int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) - int numSplits = log2MaxNumShards - log2NumShards; - numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once - return numShards * 1 << numSplits; - } else { - return numShards; - } + // only select this automatically for indices that are created on or after 7.0 this will prevent this new behaviour + // until we have a fully upgraded cluster. Additionally it will make integratin testing easier since mixed clusters + // will always have the behavior of the min node in the cluster. + // + // We use as a default number of routing shards the higher number that can be expressed + // as {@code numShards * 2^x`} that is less than or equal to the maximum number of shards: 1024. + int log2MaxNumShards = 10; // logBase2(1024) + int log2NumShards = 32 - Integer.numberOfLeadingZeros(numShards - 1); // ceil(logBase2(numShards)) + int numSplits = log2MaxNumShards - log2NumShards; + numSplits = Math.max(1, numSplits); // Ensure the index can be split at least once + return numShards * 1 << numSplits; } public static void validateTranslogRetentionSettings(Settings indexSettings) { diff --git a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java index 4f1000e3407fd..6de69c5a6f8f4 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/MetadataIndexStateService.java @@ -36,7 +36,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -840,10 +839,6 @@ static Tuple> closeRoutingTable( final Map verifyResult ) { - // Remove the index routing table of closed indices if the cluster is in a mixed version - // that does not support the replication of closed indices - final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(LegacyESVersion.V_7_2_0); - final Metadata.Builder metadata = Metadata.builder(currentState.metadata()); final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable()); @@ -916,16 +911,11 @@ static Tuple> closeRoutingTable( blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID); blocks.addIndexBlock(index.getName(), INDEX_CLOSED_BLOCK); final IndexMetadata.Builder updatedMetadata = IndexMetadata.builder(indexMetadata).state(IndexMetadata.State.CLOSE); - if (removeRoutingTable) { - metadata.put(updatedMetadata); - routingTable.remove(index.getName()); - } else { - metadata.put( - updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) - .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) - ); - routingTable.addAsFromOpenToClose(metadata.getSafe(index)); - } + metadata.put( + updatedMetadata.settingsVersion(indexMetadata.getSettingsVersion() + 1) + .settings(Settings.builder().put(indexMetadata.getSettings()).put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)) + ); + routingTable.addAsFromOpenToClose(metadata.getSafe(index)); logger.debug("closing index {} succeeded", index); closedIndices.add(index.getName()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java index 27beb21f28f7c..07cdc949c4529 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/WeightedRoutingMetadata.java @@ -90,8 +90,6 @@ public static WeightedRoutingMetadata fromXContent(XContentParser parser) throws Map weights = new HashMap<>(); WeightedRouting weightedRouting = null; XContentParser.Token token; - // move to the first alias - parser.nextToken(); String awarenessField = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java index 199d79070c050..ec72ea8b49829 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNode.java @@ -49,7 +49,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -323,50 +322,30 @@ public DiscoveryNode(StreamInput in) throws IOException { } int rolesSize = in.readVInt(); final Set roles = new HashSet<>(rolesSize); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - for (int i = 0; i < rolesSize; i++) { - final String roleName = in.readString(); - final String roleNameAbbreviation = in.readString(); - final boolean canContainData; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - canContainData = in.readBoolean(); - } else { - canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); - } - final DiscoveryNodeRole role = roleMap.get(roleName); - if (role == null) { - if (in.getVersion().onOrAfter(Version.V_2_1_0)) { - roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); - } else { - roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); - } - } else { - assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; - assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" - + roleName - + "] does not match role [" - + role.roleNameAbbreviation() - + "]"; - roles.add(role); - } + for (int i = 0; i < rolesSize; i++) { + final String roleName = in.readString(); + final String roleNameAbbreviation = in.readString(); + final boolean canContainData; + if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + canContainData = in.readBoolean(); + } else { + canContainData = roleName.equals(DiscoveryNodeRole.DATA_ROLE.roleName()); } - } else { - // an old node will only send us legacy roles since pluggable roles is a new concept - for (int i = 0; i < rolesSize; i++) { - final LegacyRole legacyRole = in.readEnum(LegacyRole.class); - switch (legacyRole) { - case MASTER: - roles.add(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE); - break; - case DATA: - roles.add(DiscoveryNodeRole.DATA_ROLE); - break; - case INGEST: - roles.add(DiscoveryNodeRole.INGEST_ROLE); - break; - default: - throw new AssertionError(legacyRole.roleName()); + final DiscoveryNodeRole role = roleMap.get(roleName); + if (role == null) { + if (in.getVersion().onOrAfter(Version.V_2_1_0)) { + roles.add(new DiscoveryNodeRole.DynamicRole(roleName, roleNameAbbreviation, canContainData)); + } else { + roles.add(new DiscoveryNodeRole.UnknownRole(roleName, roleNameAbbreviation, canContainData)); } + } else { + assert roleName.equals(role.roleName()) : "role name [" + roleName + "] does not match role [" + role.roleName() + "]"; + assert roleNameAbbreviation.equals(role.roleNameAbbreviation()) : "role name abbreviation [" + + roleName + + "] does not match role [" + + role.roleNameAbbreviation() + + "]"; + roles.add(role); } } this.roles = Collections.unmodifiableSortedSet(new TreeSet<>(roles)); @@ -386,30 +365,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(entry.getKey()); out.writeString(entry.getValue()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeVInt(roles.size()); - for (final DiscoveryNodeRole role : roles) { - final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); - out.writeString(compatibleRole.roleName()); - out.writeString(compatibleRole.roleNameAbbreviation()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { - out.writeBoolean(compatibleRole.canContainData()); - } - } - } else { - // an old node will only understand legacy roles since pluggable roles is a new concept - final List rolesToWrite = roles.stream() - .filter(DiscoveryNodeRole.LEGACY_ROLES::contains) - .collect(Collectors.toList()); - out.writeVInt(rolesToWrite.size()); - for (final DiscoveryNodeRole role : rolesToWrite) { - if (role.isClusterManager()) { - out.writeEnum(LegacyRole.MASTER); - } else if (role.equals(DiscoveryNodeRole.DATA_ROLE)) { - out.writeEnum(LegacyRole.DATA); - } else if (role.equals(DiscoveryNodeRole.INGEST_ROLE)) { - out.writeEnum(LegacyRole.INGEST); - } + out.writeVInt(roles.size()); + for (final DiscoveryNodeRole role : roles) { + final DiscoveryNodeRole compatibleRole = role.getCompatibilityRole(out.getVersion()); + out.writeString(compatibleRole.roleName()); + out.writeString(compatibleRole.roleNameAbbreviation()); + if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { + out.writeBoolean(compatibleRole.canContainData()); } } if (out.getVersion().before(Version.V_1_0_0) && version.onOrAfter(Version.V_1_0_0)) { @@ -498,6 +460,15 @@ public boolean isRemoteClusterClient() { return roles.contains(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE); } + /** + * Returns whether the node is dedicated to provide search capability. + * + * @return true if the node contains search role, false otherwise + */ + public boolean isSearchNode() { + return roles.contains(DiscoveryNodeRole.SEARCH_ROLE); + } + /** * Returns a set of all the roles that the node has. The roles are returned in sorted order by the role name. *

    diff --git a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java index 5685667c05b1a..bfc44378632d8 100644 --- a/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java +++ b/server/src/main/java/org/opensearch/cluster/node/DiscoveryNodeRole.java @@ -290,11 +290,24 @@ public Setting legacySetting() { }; + /** + * Represents the role for a search node, which is dedicated to provide search capability. + */ + public static final DiscoveryNodeRole SEARCH_ROLE = new DiscoveryNodeRole("search", "s", true) { + + @Override + public Setting legacySetting() { + // search role is added in 2.4 so doesn't need to configure legacy setting + return null; + } + + }; + /** * The built-in node roles. */ public static SortedSet BUILT_IN_ROLES = Collections.unmodifiableSortedSet( - new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE)) + new TreeSet<>(Arrays.asList(DATA_ROLE, INGEST_ROLE, CLUSTER_MANAGER_ROLE, REMOTE_CLUSTER_CLIENT_ROLE, SEARCH_ROLE)) ); /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java index 539773296ed74..728bf9d1ae90e 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java +++ b/server/src/main/java/org/opensearch/cluster/routing/RecoverySource.java @@ -38,6 +38,7 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.ToXContent; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -257,12 +258,24 @@ public static class SnapshotRecoverySource extends RecoverySource { private final Snapshot snapshot; private final IndexId index; private final Version version; + private final boolean isSearchableSnapshot; public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version version, IndexId indexId) { + this(restoreUUID, snapshot, version, indexId, false); + } + + public SnapshotRecoverySource( + String restoreUUID, + Snapshot snapshot, + Version version, + IndexId indexId, + boolean isSearchableSnapshot + ) { this.restoreUUID = restoreUUID; this.snapshot = Objects.requireNonNull(snapshot); this.version = Objects.requireNonNull(version); this.index = Objects.requireNonNull(indexId); + this.isSearchableSnapshot = isSearchableSnapshot; } SnapshotRecoverySource(StreamInput in) throws IOException { @@ -274,6 +287,11 @@ public SnapshotRecoverySource(String restoreUUID, Snapshot snapshot, Version ver } else { index = new IndexId(in.readString(), IndexMetadata.INDEX_UUID_NA_VALUE); } + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && in.getVersion().onOrAfter(Version.V_3_0_0)) { + isSearchableSnapshot = in.readBoolean(); + } else { + isSearchableSnapshot = false; + } } public String restoreUUID() { @@ -298,6 +316,10 @@ public Version version() { return version; } + public boolean isSearchableSnapshot() { + return isSearchableSnapshot; + } + @Override protected void writeAdditionalFields(StreamOutput out) throws IOException { out.writeString(restoreUUID); @@ -308,6 +330,9 @@ protected void writeAdditionalFields(StreamOutput out) throws IOException { } else { out.writeString(index.getName()); } + if (FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) && out.getVersion().onOrAfter(Version.V_3_0_0)) { + out.writeBoolean(isSearchableSnapshot); + } } @Override @@ -321,7 +346,8 @@ public void addAdditionalFields(XContentBuilder builder, ToXContent.Params param .field("snapshot", snapshot.getSnapshotId().getName()) .field("version", version.toString()) .field("index", index.getName()) - .field("restoreUUID", restoreUUID); + .field("restoreUUID", restoreUUID) + .field("isSearchableSnapshot", isSearchableSnapshot); } @Override @@ -342,12 +368,13 @@ public boolean equals(Object o) { return restoreUUID.equals(that.restoreUUID) && snapshot.equals(that.snapshot) && index.equals(that.index) - && version.equals(that.version); + && version.equals(that.version) + && isSearchableSnapshot == that.isSearchableSnapshot; } @Override public int hashCode() { - return Objects.hash(restoreUUID, snapshot, index, version); + return Objects.hash(restoreUUID, snapshot, index, version, isSearchableSnapshot); } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java index 49c18fe44cc04..ed5a23cd09e40 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/opensearch/cluster/routing/UnassignedInfo.java @@ -33,7 +33,6 @@ package org.opensearch.cluster.routing; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.routing.allocation.RoutingAllocation; @@ -314,19 +313,11 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failure = in.readException(); this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); - } else { - this.failedNodeIds = Collections.emptySet(); - } + this.failedNodeIds = Collections.unmodifiableSet(in.readSet(StreamInput::readString)); } public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && reason == Reason.INDEX_CLOSED) { - out.writeByte((byte) Reason.REINITIALIZED.ordinal()); - } else { - out.writeByte((byte) reason.ordinal()); - } + out.writeByte((byte) reason.ordinal()); out.writeLong(unassignedTimeMillis); // Do not serialize unassignedTimeNanos as System.nanoTime() cannot be compared across different JVMs out.writeBoolean(delayed); @@ -334,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(failure); out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeCollection(failedNodeIds, StreamOutput::writeString); - } + out.writeCollection(failedNodeIds, StreamOutput::writeString); } /** diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java index a89271261ed14..0a6cfd8c04977 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdMonitor.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.support.GroupedActionListener; import org.opensearch.client.Client; @@ -54,7 +53,6 @@ import org.opensearch.common.Priority; import org.opensearch.common.Strings; import org.opensearch.common.collect.ImmutableOpenMap; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.set.Sets; @@ -88,7 +86,6 @@ public class DiskThresholdMonitor { private final RerouteService rerouteService; private final AtomicLong lastRunTimeMillis = new AtomicLong(Long.MIN_VALUE); private final AtomicBoolean checkInProgress = new AtomicBoolean(); - private final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(logger.getName()); /** * The IDs of the nodes that were over the low threshold in the last check (and maybe over another threshold too). Tracked so that we @@ -121,14 +118,6 @@ public DiskThresholdMonitor( this.rerouteService = rerouteService; this.diskThresholdSettings = new DiskThresholdSettings(settings, clusterSettings); this.client = client; - if (diskThresholdSettings.isAutoReleaseIndexEnabled() == false) { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - } } private void checkFinished() { @@ -371,23 +360,7 @@ public void onNewInfo(ClusterInfo info) { .collect(Collectors.toSet()); if (indicesToAutoRelease.isEmpty() == false) { - if (diskThresholdSettings.isAutoReleaseIndexEnabled()) { - logger.info("releasing read-only-allow-delete block on indices: [{}]", indicesToAutoRelease); - updateIndicesReadOnly(indicesToAutoRelease, listener, false); - } else { - deprecationLogger.deprecate( - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY.replace(".", "_"), - "[{}] will be removed in version {}", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - LegacyESVersion.V_7_4_0.major + 1 - ); - logger.debug( - "[{}] disabled, not releasing read-only-allow-delete block on indices: [{}]", - DiskThresholdSettings.AUTO_RELEASE_INDEX_ENABLED_KEY, - indicesToAutoRelease - ); - listener.onResponse(null); - } + updateIndicesReadOnly(indicesToAutoRelease, listener, false); } else { logger.trace("no auto-release required"); listener.onResponse(null); @@ -421,11 +394,9 @@ private void markNodesMissingUsageIneligibleForRelease( ) { for (RoutingNode routingNode : routingNodes) { if (usages.containsKey(routingNode.nodeId()) == false) { - if (routingNode != null) { - for (ShardRouting routing : routingNode) { - String indexName = routing.index().getName(); - indicesToMarkIneligibleForAutoRelease.add(indexName); - } + for (ShardRouting routing : routingNode) { + String indexName = routing.index().getName(); + indicesToMarkIneligibleForAutoRelease.add(indexName); } } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java index 0ce0b1bd7b688..56a1ccad112c5 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java +++ b/server/src/main/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettings.java @@ -33,6 +33,7 @@ package org.opensearch.cluster.routing.allocation; import org.opensearch.OpenSearchParseException; +import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Setting; @@ -108,18 +109,15 @@ public class DiskThresholdSettings { private volatile TimeValue rerouteInterval; private volatile Double freeDiskThresholdFloodStage; private volatile ByteSizeValue freeBytesThresholdFloodStage; - private static final boolean autoReleaseIndexEnabled; - public static final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; static { + assert Version.CURRENT.major == Version.V_2_0_0.major + 1; // this check is unnecessary in v4 + final String AUTO_RELEASE_INDEX_ENABLED_KEY = "opensearch.disk.auto_release_flood_stage_block"; + final String property = System.getProperty(AUTO_RELEASE_INDEX_ENABLED_KEY); - if (property == null) { - autoReleaseIndexEnabled = true; - } else if (Boolean.FALSE.toString().equals(property)) { - autoReleaseIndexEnabled = false; - } else { + if (property != null) { throw new IllegalArgumentException( - AUTO_RELEASE_INDEX_ENABLED_KEY + " may only be unset or set to [false] but was [" + property + "]" + "system property [" + AUTO_RELEASE_INDEX_ENABLED_KEY + "] has been removed in 3.0.0 and is not supported anymore" ); } } @@ -371,10 +369,6 @@ public ByteSizeValue getFreeBytesThresholdFloodStage() { return freeBytesThresholdFloodStage; } - public boolean isAutoReleaseIndexEnabled() { - return autoReleaseIndexEnabled; - } - public boolean includeRelocations() { return includeRelocations; } diff --git a/server/src/main/java/org/opensearch/common/Rounding.java b/server/src/main/java/org/opensearch/common/Rounding.java index c396f6c88fd57..7160cb1e6d233 100644 --- a/server/src/main/java/org/opensearch/common/Rounding.java +++ b/server/src/main/java/org/opensearch/common/Rounding.java @@ -459,20 +459,13 @@ static class TimeUnitRounding extends Rounding { } TimeUnitRounding(StreamInput in) throws IOException { - this( - DateTimeUnit.resolve(in.readByte()), - in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString()) - ); + this(DateTimeUnit.resolve(in.readByte()), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeByte(unit.getId()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override @@ -924,17 +917,13 @@ static class TimeIntervalRounding extends Rounding { } TimeIntervalRounding(StreamInput in) throws IOException { - this(in.readVLong(), in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readZoneId() : DateUtils.of(in.readString())); + this(in.readVLong(), in.readZoneId()); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(interval); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeZoneId(timeZone); - } else { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } + out.writeZoneId(timeZone); } @Override diff --git a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java index ee9977bfa2eb0..fda089cf26942 100644 --- a/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java +++ b/server/src/main/java/org/opensearch/common/breaker/CircuitBreakingException.java @@ -31,7 +31,6 @@ package org.opensearch.common.breaker; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -55,11 +54,7 @@ public CircuitBreakingException(StreamInput in) throws IOException { super(in); byteLimit = in.readLong(); bytesWanted = in.readLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - durability = in.readEnum(CircuitBreaker.Durability.class); - } else { - durability = CircuitBreaker.Durability.PERMANENT; - } + durability = in.readEnum(CircuitBreaker.Durability.class); } public CircuitBreakingException(String message, CircuitBreaker.Durability durability) { @@ -78,9 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeLong(byteLimit); out.writeLong(bytesWanted); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(durability); - } + out.writeEnum(durability); } public long getBytesWanted() { diff --git a/server/src/main/java/org/opensearch/common/joda/Joda.java b/server/src/main/java/org/opensearch/common/joda/Joda.java index 9ecb3f2236e7c..7a82b8ce49d21 100644 --- a/server/src/main/java/org/opensearch/common/joda/Joda.java +++ b/server/src/main/java/org/opensearch/common/joda/Joda.java @@ -32,8 +32,6 @@ package org.opensearch.common.joda; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.Strings; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.time.DateFormatter; @@ -388,17 +386,6 @@ public DateTimeField getField(Chronology chronology) { } }; - /** - * Checks if a pattern is Joda-style. - * Joda style patterns are not always compatible with java.time patterns. - * @param version - creation version of the index where pattern was used - * @param pattern - the pattern to check - * @return - true if pattern is joda style, otherwise false - */ - public static boolean isJodaPattern(Version version, String pattern) { - return version.before(LegacyESVersion.V_7_0_0) && pattern.startsWith("8") == false; - } - /** * parses epcoch timers * diff --git a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java index e42727b2aa2af..c0405f9e52b77 100644 --- a/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java +++ b/server/src/main/java/org/opensearch/common/logging/LogConfigurator.java @@ -259,6 +259,7 @@ private static void configureLoggerLevels(final Settings settings) { */ @SuppressForbidden(reason = "sets system property for logging configuration") private static void setLogConfigurationSystemProperty(final Path logsPath, final Settings settings) { + System.setProperty("java.util.logging.manager", "org.apache.logging.log4j.jul.LogManager"); System.setProperty("opensearch.logs.base_path", logsPath.toString()); System.setProperty("opensearch.logs.cluster_name", ClusterName.CLUSTER_NAME_SETTING.get(settings).value()); System.setProperty("opensearch.logs.node_name", Node.NODE_NAME_SETTING.get(settings)); diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 2692a8fa2b914..7b69dff020bc4 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -97,7 +97,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Nullable; import org.opensearch.common.Strings; import org.opensearch.common.SuppressForbidden; @@ -322,10 +321,7 @@ public static boolean exists(IndexSearcher searcher, Query query) throws IOExcep public static TotalHits readTotalHits(StreamInput in) throws IOException { long totalHits = in.readVLong(); - TotalHits.Relation totalHitsRelation = TotalHits.Relation.EQUAL_TO; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - totalHitsRelation = in.readEnum(TotalHits.Relation.class); - } + TotalHits.Relation totalHitsRelation = in.readEnum(TotalHits.Relation.class); return new TotalHits(totalHits, totalHitsRelation); } @@ -444,11 +440,7 @@ public static ScoreDoc readScoreDoc(StreamInput in) throws IOException { public static void writeTotalHits(StreamOutput out, TotalHits totalHits) throws IOException { out.writeVLong(totalHits.value); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeEnum(totalHits.relation); - } else if (totalHits.value > 0 && totalHits.relation != TotalHits.Relation.EQUAL_TO) { - throw new IllegalArgumentException("Cannot serialize approximate total hit counts to nodes that are on a version < 7.0.0"); - } + out.writeEnum(totalHits.relation); } public static void writeTopDocs(StreamOutput out, TopDocsAndMaxScore topDocs) throws IOException { @@ -648,20 +640,16 @@ public static void writeSortField(StreamOutput out, SortField sortField) throws } private static Number readExplanationValue(StreamInput in) throws IOException { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - final int numberType = in.readByte(); - switch (numberType) { - case 0: - return in.readFloat(); - case 1: - return in.readDouble(); - case 2: - return in.readZLong(); - default: - throw new IOException("Unexpected number type: " + numberType); - } - } else { - return in.readFloat(); + final int numberType = in.readByte(); + switch (numberType) { + case 0: + return in.readFloat(); + case 1: + return in.readDouble(); + case 2: + return in.readZLong(); + default: + throw new IOException("Unexpected number type: " + numberType); } } @@ -680,19 +668,15 @@ public static Explanation readExplanation(StreamInput in) throws IOException { } private static void writeExplanationValue(StreamOutput out, Number value) throws IOException { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - if (value instanceof Float) { - out.writeByte((byte) 0); - out.writeFloat(value.floatValue()); - } else if (value instanceof Double) { - out.writeByte((byte) 1); - out.writeDouble(value.doubleValue()); - } else { - out.writeByte((byte) 2); - out.writeZLong(value.longValue()); - } - } else { + if (value instanceof Float) { + out.writeByte((byte) 0); out.writeFloat(value.floatValue()); + } else if (value instanceof Double) { + out.writeByte((byte) 1); + out.writeDouble(value.doubleValue()); + } else { + out.writeByte((byte) 2); + out.writeZLong(value.longValue()); } } diff --git a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java index 1665614c18496..54579031aac08 100644 --- a/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/ClusterSettings.java @@ -534,6 +534,7 @@ public void apply(Settings value, Settings current, Settings previous) { PersistentTasksClusterService.CLUSTER_TASKS_ALLOCATION_RECHECK_INTERVAL_SETTING, EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING, PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING, + PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING, PeerFinder.DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING, ClusterFormationFailureHelper.DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING, ElectionSchedulerFactory.ELECTION_INITIAL_TIMEOUT_SETTING, diff --git a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java index 7be9adc786f24..079fc38415328 100644 --- a/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/IndexScopedSettings.java @@ -221,12 +221,19 @@ public final class IndexScopedSettings extends AbstractScopedSettings { */ public static final Map> FEATURE_FLAGGED_INDEX_SETTINGS = Map.of( FeatureFlags.REPLICATION_TYPE, - Collections.singletonList(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), + List.of(IndexMetadata.INDEX_REPLICATION_TYPE_SETTING), FeatureFlags.REMOTE_STORE, - Arrays.asList( + List.of( IndexMetadata.INDEX_REMOTE_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_TRANSLOG_STORE_ENABLED_SETTING, IndexMetadata.INDEX_REMOTE_STORE_REPOSITORY_SETTING + ), + FeatureFlags.SEARCHABLE_SNAPSHOT, + List.of( + IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY, + IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID, + IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME, + IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID ) ); diff --git a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java index fa39dc9ac5aa0..7297479776da9 100644 --- a/server/src/main/java/org/opensearch/common/util/FeatureFlags.java +++ b/server/src/main/java/org/opensearch/common/util/FeatureFlags.java @@ -29,6 +29,14 @@ public class FeatureFlags { */ public static final String REMOTE_STORE = "opensearch.experimental.feature.remote_store.enabled"; + /** + * Gates the functionality of a new parameter to the snapshot restore API + * that allows for creation of a new index type that searches a snapshot + * directly in a remote repository without restoring all index data to disk + * ahead of time. + */ + public static final String SEARCHABLE_SNAPSHOT = "opensearch.experimental.feature.searchable_snapshot.enabled"; + /** * Used to test feature flags whose values are expected to be booleans. * This method returns true if the value is "true" (case-insensitive), diff --git a/server/src/main/java/org/opensearch/discovery/PeerFinder.java b/server/src/main/java/org/opensearch/discovery/PeerFinder.java index a601a6fbe4d82..e8b6c72c512a2 100644 --- a/server/src/main/java/org/opensearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/opensearch/discovery/PeerFinder.java @@ -84,6 +84,14 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); + // the time between attempts to find all peers when node is in decommissioned state, default set to 2 minutes + public static final Setting DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING = Setting.timeSetting( + "discovery.find_peers_interval_during_decommission", + TimeValue.timeValueSeconds(120L), + TimeValue.timeValueMillis(1000), + Setting.Property.NodeScope + ); + public static final Setting DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING = Setting.timeSetting( "discovery.request_peers_timeout", TimeValue.timeValueMillis(3000), @@ -91,7 +99,8 @@ public abstract class PeerFinder { Setting.Property.NodeScope ); - private final TimeValue findPeersInterval; + private final Settings settings; + private TimeValue findPeersInterval; private final TimeValue requestPeersTimeout; private final Object mutex = new Object(); @@ -112,6 +121,7 @@ public PeerFinder( TransportAddressConnector transportAddressConnector, ConfiguredHostsResolver configuredHostsResolver ) { + this.settings = settings; findPeersInterval = DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings); requestPeersTimeout = DISCOVERY_REQUEST_PEERS_TIMEOUT_SETTING.get(settings); this.transportService = transportService; @@ -128,6 +138,23 @@ public PeerFinder( ); } + public synchronized void onNodeCommissionStatusChange(boolean localNodeCommissioned) { + findPeersInterval = localNodeCommissioned + ? DISCOVERY_FIND_PEERS_INTERVAL_SETTING.get(settings) + : DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING.get(settings); + logger.info( + "setting findPeersInterval to [{}] as node commission status = [{}] for local node [{}]", + findPeersInterval, + localNodeCommissioned, + transportService.getLocalNode() + ); + } + + // package private for tests + TimeValue getFindPeersInterval() { + return findPeersInterval; + } + public void activate(final DiscoveryNodes lastAcceptedNodes) { logger.trace("activating with {}", lastAcceptedNodes); diff --git a/server/src/main/java/org/opensearch/env/NodeMetadata.java b/server/src/main/java/org/opensearch/env/NodeMetadata.java index 3944ecfd72d4c..03e92424c4517 100644 --- a/server/src/main/java/org/opensearch/env/NodeMetadata.java +++ b/server/src/main/java/org/opensearch/env/NodeMetadata.java @@ -32,7 +32,6 @@ package org.opensearch.env; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.xcontent.ObjectParser; @@ -93,7 +92,7 @@ public Version nodeVersion() { public NodeMetadata upgradeToCurrentVersion() { if (nodeVersion.equals(Version.V_EMPTY)) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; return new NodeMetadata(nodeId, Version.CURRENT); } @@ -127,8 +126,7 @@ public void setNodeVersionId(int nodeVersionId) { public NodeMetadata build() { final Version nodeVersion; if (this.nodeVersion == null) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "version is required in the node metadata from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 : "version is required in the node metadata from v4 onwards"; nodeVersion = Version.V_EMPTY; } else { nodeVersion = this.nodeVersion; diff --git a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java index f70fdea153893..48dd0ddf90413 100644 --- a/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/GatewayMetaState.java @@ -37,7 +37,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.Version; @@ -136,8 +135,8 @@ public void start( long currentTerm = onDiskState.currentTerm; if (onDiskState.empty()) { - assert Version.CURRENT.major <= LegacyESVersion.V_7_0_0.major + 1 - : "legacy metadata loader is not needed anymore from v9 onwards"; + assert Version.CURRENT.major <= Version.V_3_0_0.major + 1 + : "legacy metadata loader is not needed anymore from v4 onwards"; final Tuple legacyState = metaStateService.loadFullState(); if (legacyState.v1().isEmpty() == false) { metadata = legacyState.v2(); diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java index be914c6a40a83..ba1490a7929bd 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayMetaState.java @@ -37,7 +37,6 @@ import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -52,6 +51,7 @@ import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.unit.TimeValue; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -168,7 +168,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { NodeRequest() {} NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java index c43f539243d7a..fb114bff9aa12 100644 --- a/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/opensearch/gateway/TransportNodesListGatewayStartedShards.java @@ -40,7 +40,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -65,6 +64,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -159,7 +159,8 @@ protected NodeGatewayStartedShards nodeOperation(NodeRequest request) { nodeEnv.availableShardPaths(request.shardId) ); if (shardStateMetadata != null) { - if (indicesService.getShardOrNull(shardId) == null) { + if (indicesService.getShardOrNull(shardId) == null + && shardStateMetadata.indexDataLocation == ShardStateMetadata.IndexDataLocation.LOCAL) { final String customDataPath; if (request.getCustomDataPath() != null) { customDataPath = request.getCustomDataPath(); @@ -307,7 +308,7 @@ protected void writeNodesTo(StreamOutput out, List nod * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/index/IndexModule.java b/server/src/main/java/org/opensearch/index/IndexModule.java index e52a2ba39ed52..2a462f6165184 100644 --- a/server/src/main/java/org/opensearch/index/IndexModule.java +++ b/server/src/main/java/org/opensearch/index/IndexModule.java @@ -70,12 +70,14 @@ import org.opensearch.index.shard.SearchOperationListener; import org.opensearch.index.similarity.SimilarityService; import org.opensearch.index.store.FsDirectoryFactory; +import org.opensearch.index.store.RemoteSnapshotDirectoryFactory; import org.opensearch.indices.IndicesQueryCache; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; import org.opensearch.threadpool.ThreadPool; @@ -94,6 +96,7 @@ import java.util.function.BooleanSupplier; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; /** * IndexModule represents the central extension point for index level custom implementations like: @@ -390,15 +393,6 @@ IndexEventListener freeze() { // pkg private for testing } } - public static boolean isBuiltinType(String storeType) { - for (Type type : Type.values()) { - if (type.match(storeType)) { - return true; - } - } - return false; - } - /** * Type of file system * @@ -409,7 +403,8 @@ public enum Type { NIOFS("niofs"), MMAPFS("mmapfs"), SIMPLEFS("simplefs"), - FS("fs"); + FS("fs"), + REMOTE_SNAPSHOT("remote_snapshot"); private final String settingsKey; private final boolean deprecated; @@ -426,7 +421,7 @@ public enum Type { private static final Map TYPES; static { - final Map types = new HashMap<>(4); + final Map types = new HashMap<>(values().length); for (final Type type : values()) { types.put(type.settingsKey, type); } @@ -441,6 +436,10 @@ public boolean isDeprecated() { return deprecated; } + static boolean containsSettingsKey(String key) { + return TYPES.containsKey(key); + } + public static Type fromSettingsKey(final String key) { final Type type = TYPES.get(key); if (type == null) { @@ -459,6 +458,13 @@ public boolean match(String setting) { return getSettingsKey().equals(setting); } + /** + * Convenience method to check whether the given IndexSettings contains + * an {@link #INDEX_STORE_TYPE_SETTING} set to the value of this type. + */ + public boolean match(IndexSettings settings) { + return match(INDEX_STORE_TYPE_SETTING.get(settings.getSettings())); + } } public static Type defaultStoreType(final boolean allowMmap) { @@ -562,7 +568,7 @@ private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( if (storeType.isEmpty() || Type.FS.getSettingsKey().equals(storeType)) { type = defaultStoreType(allowMmap); } else { - if (isBuiltinType(storeType)) { + if (Type.containsSettingsKey(storeType)) { type = Type.fromSettingsKey(storeType); } else { type = null; @@ -572,7 +578,7 @@ private static IndexStorePlugin.DirectoryFactory getDirectoryFactory( throw new IllegalArgumentException("store type [" + storeType + "] is not allowed because mmap is disabled"); } final IndexStorePlugin.DirectoryFactory factory; - if (storeType.isEmpty() || isBuiltinType(storeType)) { + if (storeType.isEmpty()) { factory = DEFAULT_DIRECTORY_FACTORY; } else { factory = indexStoreFactories.get(storeType); @@ -641,4 +647,26 @@ private void ensureNotFrozen() { } } + public static Map createBuiltInDirectoryFactories( + Supplier repositoriesService + ) { + final Map factories = new HashMap<>(); + for (Type type : Type.values()) { + switch (type) { + case HYBRIDFS: + case NIOFS: + case FS: + case MMAPFS: + case SIMPLEFS: + factories.put(type.getSettingsKey(), DEFAULT_DIRECTORY_FACTORY); + break; + case REMOTE_SNAPSHOT: + factories.put(type.getSettingsKey(), new RemoteSnapshotDirectoryFactory(repositoriesService)); + break; + default: + throw new IllegalStateException("No directory factory mapping for built-in type " + type); + } + } + return factories; + } } diff --git a/server/src/main/java/org/opensearch/index/IndexSettings.java b/server/src/main/java/org/opensearch/index/IndexSettings.java index 9c7f4804755d4..7648f0a192ce7 100644 --- a/server/src/main/java/org/opensearch/index/IndexSettings.java +++ b/server/src/main/java/org/opensearch/index/IndexSettings.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.index.MergePolicy; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.common.Strings; @@ -551,6 +550,30 @@ public final class IndexSettings { Property.Dynamic ); + public static final Setting SEARCHABLE_SNAPSHOT_REPOSITORY = Setting.simpleString( + "index.searchable_snapshot.repository", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_ID_UUID = Setting.simpleString( + "index.searchable_snapshot.snapshot_id.uuid", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_ID_NAME = Setting.simpleString( + "index.searchable_snapshot.snapshot_id.name", + Property.IndexScope, + Property.InternalIndex + ); + + public static final Setting SEARCHABLE_SNAPSHOT_INDEX_ID = Setting.simpleString( + "index.searchable_snapshot.index.id", + Property.IndexScope, + Property.InternalIndex + ); + private final Index index; private final Version version; private final Logger logger; @@ -1122,8 +1145,7 @@ public int getTranslogRetentionTotalFiles() { } private static boolean shouldDisableTranslogRetention(Settings settings) { - return INDEX_SOFT_DELETES_SETTING.get(settings) - && IndexMetadata.SETTING_INDEX_VERSION_CREATED.get(settings).onOrAfter(LegacyESVersion.V_7_4_0); + return INDEX_SOFT_DELETES_SETTING.get(settings); } /** diff --git a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java index 8ec2b70001fc9..7a78d97edf360 100644 --- a/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java +++ b/server/src/main/java/org/opensearch/index/analysis/AnalysisRegistry.java @@ -35,7 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.core.KeywordTokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -213,12 +212,10 @@ public Analyzer getAnalyzer(String analyzer) throws IOException { } }); } else if ("standard_html_strip".equals(analyzer)) { - if (Version.CURRENT.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "[standard_html_strip] analyzer is not supported for new indices, " - + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" - ); - } + throw new IllegalArgumentException( + "[standard_html_strip] analyzer is not supported for new indices, " + + "use a custom analyzer using [standard] tokenizer and [html_strip] char_filter, plus [lowercase] filter" + ); } return analyzerProvider.get(environment, analyzer).get(); diff --git a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java index 701a9302fc164..e66ae20508dfe 100644 --- a/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java +++ b/server/src/main/java/org/opensearch/index/analysis/ShingleTokenFilterFactory.java @@ -35,8 +35,6 @@ import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.miscellaneous.DisableGraphAttribute; import org.apache.lucene.analysis.shingle.ShingleFilter; -import org.opensearch.LegacyESVersion; -import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; @@ -48,8 +46,6 @@ */ public class ShingleTokenFilterFactory extends AbstractTokenFilterFactory { - private static final DeprecationLogger DEPRECATION_LOGGER = DeprecationLogger.getLogger(ShingleTokenFilterFactory.class); - private final Factory factory; public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { @@ -61,27 +57,17 @@ public ShingleTokenFilterFactory(IndexSettings indexSettings, Environment enviro int shingleDiff = maxShingleSize - minShingleSize + (outputUnigrams ? 1 : 0); if (shingleDiff > maxAllowedShingleDiff) { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" - + " must be less than or equal to: [" - + maxAllowedShingleDiff - + "] but was [" - + shingleDiff - + "]. This limit" - + " can be set by changing the [" - + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() - + "] index level setting." - ); - } else { - deprecationLogger.deprecate( - "excessive_shingle_diff", - "Deprecated big difference between maxShingleSize and minShingleSize" - + " in Shingle TokenFilter, expected difference must be less than or equal to: [" - + maxAllowedShingleDiff - + "]" - ); - } + throw new IllegalArgumentException( + "In Shingle TokenFilter the difference between max_shingle_size and min_shingle_size (and +1 if outputting unigrams)" + + " must be less than or equal to: [" + + maxAllowedShingleDiff + + "] but was [" + + shingleDiff + + "]. This limit" + + " can be set by changing the [" + + IndexSettings.MAX_SHINGLE_DIFF_SETTING.getKey() + + "] index level setting." + ); } Boolean outputUnigramsIfNoShingles = settings.getAsBoolean("output_unigrams_if_no_shingles", false); @@ -105,16 +91,7 @@ public TokenStream create(TokenStream tokenStream) { @Override public TokenFilterFactory getSynonymFilter() { - if (indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); - } else { - DEPRECATION_LOGGER.deprecate( - name() + "_synonym_tokenfilters", - "Token filter " + name() + "] will not be usable to parse synonyms after v7.0" - ); - } - return this; - + throw new IllegalArgumentException("Token filter [" + name() + "] cannot be used to parse synonyms"); } public Factory getInnerFactory() { diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index f426768119c1d..dceb26bc33aa7 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -39,8 +39,6 @@ import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.store.Directory; import org.apache.lucene.store.Lock; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.common.concurrent.GatedCloseable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; @@ -174,25 +172,21 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(final SeqNoStats seqNoStat if (requireCompleteHistory == false) { return; } - // Before 8.0 the global checkpoint is not known and up to date when the engine is created after + // Before 3.0 the global checkpoint is not known and up to date when the engine is created after // peer recovery, so we only check the max seq no / global checkpoint coherency when the global // checkpoint is different from the unassigned sequence number value. // In addition to that we only execute the check if the index the engine belongs to has been // created after the refactoring of the Close Index API and its TransportVerifyShardBeforeCloseAction // that guarantee that all operations have been flushed to Lucene. - final Version indexVersionCreated = engineConfig.getIndexSettings().getIndexVersionCreated(); - if (indexVersionCreated.onOrAfter(LegacyESVersion.V_7_2_0) - || (seqNoStats.getGlobalCheckpoint() != SequenceNumbers.UNASSIGNED_SEQ_NO)) { - assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); - if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { - throw new IllegalStateException( - "Maximum sequence number [" - + seqNoStats.getMaxSeqNo() - + "] from last commit does not match global checkpoint [" - + seqNoStats.getGlobalCheckpoint() - + "]" - ); - } + assert assertMaxSeqNoEqualsToGlobalCheckpoint(seqNoStats.getMaxSeqNo(), seqNoStats.getGlobalCheckpoint()); + if (seqNoStats.getMaxSeqNo() != seqNoStats.getGlobalCheckpoint()) { + throw new IllegalStateException( + "Maximum sequence number [" + + seqNoStats.getMaxSeqNo() + + "] from last commit does not match global checkpoint [" + + seqNoStats.getGlobalCheckpoint() + + "]" + ); } } diff --git a/server/src/main/java/org/opensearch/index/get/GetResult.java b/server/src/main/java/org/opensearch/index/get/GetResult.java index e2f23353f250e..5da4f8d5c7833 100644 --- a/server/src/main/java/org/opensearch/index/get/GetResult.java +++ b/server/src/main/java/org/opensearch/index/get/GetResult.java @@ -32,7 +32,6 @@ package org.opensearch.index.get; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -105,20 +104,8 @@ public GetResult(StreamInput in) throws IOException { if (source.length() == 0) { source = null; } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - documentFields = readFields(in); - metaFields = readFields(in); - } else { - Map fields = readFields(in); - documentFields = new HashMap<>(); - metaFields = new HashMap<>(); - fields.forEach( - (fieldName, docField) -> (MapperService.META_FIELDS_BEFORE_7DOT8.contains(fieldName) ? metaFields : documentFields).put( - fieldName, - docField - ) - ); - } + documentFields = readFields(in); + metaFields = readFields(in); } else { metaFields = Collections.emptyMap(); documentFields = Collections.emptyMap(); @@ -446,12 +433,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(exists); if (exists) { out.writeBytesReference(source); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - writeFields(out, documentFields); - writeFields(out, metaFields); - } else { - writeFields(out, this.getFields()); - } + writeFields(out, documentFields); + writeFields(out, metaFields); } } diff --git a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java index 8e01c1f41f078..4b19fe4c5de79 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DateFieldMapper.java @@ -45,7 +45,6 @@ import org.opensearch.Version; import org.opensearch.common.Nullable; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.lucene.BytesRefs; import org.opensearch.common.time.DateFormatter; @@ -260,11 +259,7 @@ public Builder( private DateFormatter buildFormatter() { try { - if (Joda.isJodaPattern(indexCreatedVersion, format.getValue())) { - return Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + return DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Error parsing [format] on field [" + name() + "]: " + e.getMessage(), e); } diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java index 1e5b3b4a9c93e..23d58fa18b7e3 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapper.java @@ -40,7 +40,6 @@ import org.apache.lucene.search.Weight; import org.apache.lucene.util.BytesRef; import org.opensearch.OpenSearchGenerationException; -import org.opensearch.Version; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.compress.CompressedXContent; import org.opensearch.common.settings.Settings; @@ -90,12 +89,8 @@ public Builder(RootObjectMapper.Builder builder, MapperService mapperService) { this.builderContext = new Mapper.BuilderContext(indexSettings, new ContentPath(1)); this.rootObjectMapper = builder.build(builderContext); - final String type = rootObjectMapper.name(); final DocumentMapper existingMapper = mapperService.documentMapper(); - final Version indexCreatedVersion = mapperService.getIndexSettings().getIndexVersionCreated(); - final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers( - indexCreatedVersion - ); + final Map metadataMapperParsers = mapperService.mapperRegistry.getMetadataMapperParsers(); for (Map.Entry entry : metadataMapperParsers.entrySet()) { final String name = entry.getKey(); final MetadataFieldMapper existingMetadataMapper = existingMapper == null diff --git a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java index 9fa088396a38b..237d69e3ad244 100644 --- a/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java +++ b/server/src/main/java/org/opensearch/index/mapper/DocumentMapperParser.java @@ -91,7 +91,7 @@ public DocumentMapperParser( this.scriptService = scriptService; this.typeParsers = mapperRegistry.getMapperParsers(); this.indexVersionCreated = indexSettings.getIndexVersionCreated(); - this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(indexVersionCreated); + this.rootTypeParsers = mapperRegistry.getMetadataMapperParsers(); } public Mapper.TypeParser.ParserContext parserContext() { diff --git a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java index 71d76c6a835c2..a2224e7214f4b 100644 --- a/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/LegacyGeoShapeFieldMapper.java @@ -41,7 +41,6 @@ import org.apache.lucene.spatial.prefix.tree.PackedQuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.QuadPrefixTree; import org.apache.lucene.spatial.prefix.tree.SpatialPrefixTree; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Explicit; @@ -577,11 +576,7 @@ public void doXContentBody(XContentBuilder builder, boolean includeDefaults, Par } else if (includeDefaults && fieldType().treeLevels() == 0) { // defaults only make sense if tree levels are not specified builder.field(DeprecatedParameters.Names.PRECISION.getPreferredName(), DistanceUnit.METERS.toString(50)); } - - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); - } - + builder.field(DeprecatedParameters.Names.STRATEGY.getPreferredName(), fieldType().strategy().getStrategyName()); if (includeDefaults || fieldType().distanceErrorPct() != fieldType().defaultDistanceErrorPct) { builder.field(DeprecatedParameters.Names.DISTANCE_ERROR_PCT.getPreferredName(), fieldType().distanceErrorPct()); } diff --git a/server/src/main/java/org/opensearch/index/mapper/MapperService.java b/server/src/main/java/org/opensearch/index/mapper/MapperService.java index f0d0b77396b0e..af37ddc41b567 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/opensearch/index/mapper/MapperService.java @@ -36,7 +36,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.DelegatingAnalyzerWrapper; import org.opensearch.Assertions; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.MappingMetadata; @@ -228,8 +227,7 @@ public MapperService( this.mapperRegistry = mapperRegistry; this.idFieldDataEnabled = idFieldDataEnabled; - if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings()) - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { + if (INDEX_MAPPER_DYNAMIC_SETTING.exists(indexSettings.getSettings())) { throw new IllegalArgumentException("Setting " + INDEX_MAPPER_DYNAMIC_SETTING.getKey() + " was removed after version 6.0.0"); } } @@ -674,7 +672,7 @@ public void close() throws IOException { * this method considers all mapper plugins */ public boolean isMetadataField(String field) { - return mapperRegistry.isMetadataField(indexVersionCreated, field); + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java index 5257609e0cba9..faf9fd5182690 100644 --- a/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/RangeFieldMapper.java @@ -40,7 +40,6 @@ import org.opensearch.common.Explicit; import org.opensearch.common.collect.Tuple; import org.opensearch.common.geo.ShapeRelation; -import org.opensearch.common.joda.Joda; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.network.InetAddresses; import org.opensearch.common.settings.Setting; @@ -197,12 +196,7 @@ protected RangeFieldType setupFieldType(BuilderContext context) { // The builder context may not have index created version, falling back to indexCreatedVersion // property of this mapper builder. - DateFormatter dateTimeFormatter; - if (Joda.isJodaPattern(context.indexCreatedVersionOrDefault(indexCreatedVersion), format.getValue())) { - dateTimeFormatter = Joda.forPattern(format.getValue()).withLocale(locale.getValue()); - } else { - dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); - } + DateFormatter dateTimeFormatter = DateFormatter.forPattern(format.getValue()).withLocale(locale.getValue()); return new RangeFieldType( buildFullName(context), index.getValue(), diff --git a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java index 82bd8ebb4d362..15aae1774213a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/TextFieldMapper.java @@ -69,7 +69,6 @@ import org.apache.lucene.util.automaton.Automata; import org.apache.lucene.util.automaton.Automaton; import org.apache.lucene.util.automaton.Operations; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.collect.Iterators; import org.opensearch.common.lucene.Lucene; @@ -422,7 +421,7 @@ private PrefixFieldMapper buildPrefixMapper(BuilderContext context, FieldType fi * or a multi-field). This way search will continue to work on old indices and new indices * will use the expected full name. */ - String fullName = indexCreatedVersion.before(LegacyESVersion.V_7_2_1) ? name() : buildFullName(context); + String fullName = buildFullName(context); // Copy the index options of the main field to allow phrase queries on // the prefix field. FieldType pft = new FieldType(fieldType); diff --git a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java index cc69a44c9c80c..e1cb0cdff4ebd 100644 --- a/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/opensearch/index/query/IntervalsSourceProvider.java @@ -41,7 +41,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.automaton.CompiledAutomaton; import org.apache.lucene.util.automaton.RegExp; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -166,11 +165,7 @@ public Match(StreamInput in) throws IOException { } this.analyzer = in.readOptionalString(); this.filter = in.readOptionalWriteable(IntervalFilter::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - this.useField = in.readOptionalString(); - } else { - this.useField = null; - } + this.useField = in.readOptionalString(); } @Override @@ -234,9 +229,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalString(analyzer); out.writeOptionalWriteable(filter); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(useField); - } + out.writeOptionalString(useField); } @Override diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index 4e7b6fb51291b..fe3bcd81e72be 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -34,7 +34,6 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -254,9 +253,6 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { maxExpansions = in.readVInt(); minimumShouldMatch = in.readOptionalString(); fuzzyRewrite = in.readOptionalString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - in.readOptionalBoolean(); // unused use_dis_max flag - } tieBreaker = in.readOptionalFloat(); lenient = in.readOptionalBoolean(); cutoffFrequency = in.readOptionalFloat(); @@ -282,9 +278,6 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(maxExpansions); out.writeOptionalString(minimumShouldMatch); out.writeOptionalString(fuzzyRewrite); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalBoolean(null); - } out.writeOptionalFloat(tieBreaker); out.writeOptionalBoolean(lenient); out.writeOptionalFloat(cutoffFrequency); diff --git a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java index e11b22e9296cf..d50585ae0aebf 100644 --- a/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java +++ b/server/src/main/java/org/opensearch/index/query/VectorGeoShapeQueryProcessor.java @@ -67,7 +67,7 @@ public class VectorGeoShapeQueryProcessor { public Query geoShapeQuery(Geometry shape, String fieldName, ShapeRelation relation, QueryShardContext context) { // CONTAINS queries are not supported by VECTOR strategy for indices created before version 7.5.0 (Lucene 8.3.0) - if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.V_7_5_0)) { + if (relation == ShapeRelation.CONTAINS && context.indexVersionCreated().before(LegacyESVersion.fromId(7050099))) { throw new QueryShardException(context, ShapeRelation.CONTAINS + " query relation not supported for Field [" + fieldName + "]."); } // wrap geoQuery as a ConstantScoreQuery diff --git a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java index 9605ba424bfb0..ef606ce35b84f 100644 --- a/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/functionscore/ScriptScoreQueryBuilder.java @@ -33,7 +33,6 @@ package org.opensearch.index.query.functionscore; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; @@ -123,22 +122,14 @@ public ScriptScoreQueryBuilder(QueryBuilder query, Script script) { public ScriptScoreQueryBuilder(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script = new Script(in); - } else { - script = in.readNamedWriteable(ScriptScoreFunctionBuilder.class).getScript(); - } + script = new Script(in); minScore = in.readOptionalFloat(); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(query); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - script.writeTo(out); - } else { - out.writeNamedWriteable(new ScriptScoreFunctionBuilder(script)); - } + script.writeTo(out); out.writeOptionalFloat(minScore); } diff --git a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java index 8a54e5105c61e..fc874608fb3b1 100644 --- a/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/opensearch/index/refresh/RefreshStats.java @@ -32,7 +32,6 @@ package org.opensearch.index.refresh; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; @@ -68,10 +67,8 @@ public RefreshStats() {} public RefreshStats(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - externalTotal = in.readVLong(); - externalTotalTimeInMillis = in.readVLong(); - } + externalTotal = in.readVLong(); + externalTotalTimeInMillis = in.readVLong(); listeners = in.readVInt(); } @@ -79,10 +76,8 @@ public RefreshStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVLong(totalTimeInMillis); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(externalTotal); - out.writeVLong(externalTotalTimeInMillis); - } + out.writeVLong(externalTotal); + out.writeVLong(externalTotalTimeInMillis); out.writeVInt(listeners); } diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 55d95381923b3..ea1604c16190b 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -223,7 +223,7 @@ public class ReplicationTracker extends AbstractIndexShardComponent implements L /** * Whether there should be a peer recovery retention lease (PRRL) for every tracked shard copy. Always true on indices created from - * {@link LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an + * {@code LegacyESVersion#V_7_4_0} onwards, because these versions create PRRLs properly. May be false on indices created in an * earlier version if we recently did a rolling upgrade and * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)} has not yet completed. Is only permitted * to change from false to true; can be removed once support for pre-PRRL indices is no longer needed. @@ -996,9 +996,7 @@ public ReplicationTracker( this.routingTable = null; this.replicationGroup = null; this.hasAllPeerRecoveryRetentionLeases = indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_6_0) - || (indexSettings.isSoftDeleteEnabled() - && indexSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_4_0) - && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); + || (indexSettings.isSoftDeleteEnabled() && indexSettings.getIndexMetadata().getState() == IndexMetadata.State.OPEN); this.fileBasedRecoveryThreshold = IndexSettings.FILE_BASED_RECOVERY_THRESHOLD_SETTING.get(indexSettings.getSettings()); this.safeCommitInfoSupplier = safeCommitInfoSupplier; this.onReplicationGroupUpdated = onReplicationGroupUpdated; @@ -1126,7 +1124,7 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { /** * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@link LegacyESVersion#V_7_4_0}, in which case the missing leases should be created + * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { @@ -1495,7 +1493,7 @@ public synchronized void activateWithPrimaryContext(PrimaryContext primaryContex assert primaryMode == false; if (primaryContext.checkpoints.containsKey(shardAllocationId) == false) { // can happen if the old primary was on an old version - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_3_0); + assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.fromId(7000099)); throw new IllegalStateException("primary context [" + primaryContext + "] does not contain " + shardAllocationId); } final Runnable runAfter = getClusterManagerUpdateOperationFromCurrentState(); @@ -1528,7 +1526,7 @@ public synchronized boolean hasAllPeerRecoveryRetentionLeases() { /** * Create any required peer-recovery retention leases that do not currently exist because we just did a rolling upgrade from a version - * prior to {@link LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. + * prior to {@code LegacyESVersion#V_7_4_0} that does not create peer-recovery retention leases. */ public synchronized void createMissingPeerRecoveryRetentionLeases(ActionListener listener) { if (hasAllPeerRecoveryRetentionLeases == false) { diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index d05f7c34f80ce..3a3c4b19a02f6 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -52,7 +52,6 @@ import org.apache.lucene.util.ThreadInterruptedException; import org.opensearch.Assertions; import org.opensearch.ExceptionsHelper; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; @@ -679,7 +678,7 @@ public void onFailure(Exception e) { this.shardRouting = newRouting; assert this.shardRouting.primary() == false || this.shardRouting.started() == false || // note that we use started and not - // active to avoid relocating shards + // active to avoid relocating shards this.indexShardOperationPermits.isBlocked() || // if permits are blocked, we are still transitioning this.replicationTracker.isPrimaryMode() : "a started primary with non-pending operation term must be in primary mode " + this.shardRouting; @@ -1991,7 +1990,12 @@ public void openEngineAndRecoverFromTranslog() throws IOException { translogRecoveryStats::incrementRecoveredOperations ); }; - loadGlobalCheckpointToReplicationTracker(); + + // Do not load the global checkpoint if this is a remote snapshot index + if (IndexModule.Type.REMOTE_SNAPSHOT.match(indexSettings) == false) { + loadGlobalCheckpointToReplicationTracker(); + } + innerOpenEngineAndTranslog(replicationTracker); getEngine().translogManager() .recoverFromTranslog(translogRecoveryRunner, getEngine().getProcessedLocalCheckpoint(), Long.MAX_VALUE); @@ -3090,13 +3094,18 @@ public void startRecovery( } break; case SNAPSHOT: - final String repo = ((SnapshotRecoverySource) recoveryState.getRecoverySource()).snapshot().getRepository(); - executeRecovery( - "from snapshot", - recoveryState, - recoveryListener, - l -> restoreFromRepository(repositoriesService.repository(repo), l) - ); + final SnapshotRecoverySource recoverySource = (SnapshotRecoverySource) recoveryState.getRecoverySource(); + if (recoverySource.isSearchableSnapshot()) { + executeRecovery("from snapshot (remote)", recoveryState, recoveryListener, this::recoverFromStore); + } else { + final String repo = recoverySource.snapshot().getRepository(); + executeRecovery( + "from snapshot", + recoveryState, + recoveryListener, + l -> restoreFromRepository(repositoriesService.repository(repo), l) + ); + } break; case LOCAL_SHARDS: final IndexMetadata indexMetadata = indexSettings().getIndexMetadata(); @@ -3187,7 +3196,7 @@ public RetentionLease addPeerRecoveryRetentionLease( ) { assert assertPrimaryMode(); // only needed for BWC reasons involving rolling upgrades from versions that do not support PRRLs: - assert indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_4_0) || indexSettings.isSoftDeleteEnabled() == false; + assert indexSettings.isSoftDeleteEnabled() == false; return replicationTracker.addPeerRecoveryRetentionLease(nodeId, globalCheckpoint, listener); } @@ -3257,10 +3266,15 @@ private static void persistMetadata( writeReason = "routing changed from " + currentRouting + " to " + newRouting; } logger.trace("{} writing shard state, reason [{}]", shardId, writeReason); + + final ShardStateMetadata.IndexDataLocation indexDataLocation = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.exists( + indexSettings.getSettings() + ) ? ShardStateMetadata.IndexDataLocation.REMOTE : ShardStateMetadata.IndexDataLocation.LOCAL; final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata( newRouting.primary(), indexSettings.getUUID(), - newRouting.allocationId() + newRouting.allocationId(), + indexDataLocation ); ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardPath.getShardStatePath()); } else { diff --git a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java index ccc620fc8cf64..c7e380f842fa0 100644 --- a/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java +++ b/server/src/main/java/org/opensearch/index/shard/RemoveCorruptedShardDataCommand.java @@ -484,7 +484,8 @@ private void newAllocationId(ShardPath shardPath, Terminal terminal) throws IOEx final ShardStateMetadata newShardStateMetadata = new ShardStateMetadata( shardStateMetadata.primary, shardStateMetadata.indexUUID, - newAllocationId + newAllocationId, + ShardStateMetadata.IndexDataLocation.LOCAL ); ShardStateMetadata.FORMAT.writeAndCleanup(newShardStateMetadata, shardStatePath); diff --git a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java index 9cd9149cda913..9e334bc6ffd54 100644 --- a/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java +++ b/server/src/main/java/org/opensearch/index/shard/ShardStateMetadata.java @@ -56,17 +56,39 @@ public final class ShardStateMetadata { private static final String PRIMARY_KEY = "primary"; private static final String INDEX_UUID_KEY = "index_uuid"; private static final String ALLOCATION_ID_KEY = "allocation_id"; + private static final String INDEX_DATA_LOCATION_KEY = "index_data_location"; + + /** + * Enumeration of types of data locations for an index + */ + public enum IndexDataLocation { + /** + * Indicates index data is on the local disk + */ + LOCAL, + /** + * Indicates index data is remote, such as for a searchable snapshot + * index + */ + REMOTE + } public final String indexUUID; public final boolean primary; @Nullable public final AllocationId allocationId; // can be null if we read from legacy format (see fromXContent and MultiDataPathUpgrader) + public final IndexDataLocation indexDataLocation; public ShardStateMetadata(boolean primary, String indexUUID, AllocationId allocationId) { + this(primary, indexUUID, allocationId, IndexDataLocation.LOCAL); + } + + public ShardStateMetadata(boolean primary, String indexUUID, AllocationId allocationId, IndexDataLocation indexDataLocation) { assert indexUUID != null; this.primary = primary; this.indexUUID = indexUUID; this.allocationId = allocationId; + this.indexDataLocation = Objects.requireNonNull(indexDataLocation); } @Override @@ -89,6 +111,9 @@ public boolean equals(Object o) { if (Objects.equals(allocationId, that.allocationId) == false) { return false; } + if (Objects.equals(indexDataLocation, that.indexDataLocation) == false) { + return false; + } return true; } @@ -98,17 +123,16 @@ public int hashCode() { int result = indexUUID.hashCode(); result = 31 * result + (allocationId != null ? allocationId.hashCode() : 0); result = 31 * result + (primary ? 1 : 0); + result = 31 * result + indexDataLocation.hashCode(); return result; } @Override public String toString() { - return "primary [" + primary + "], allocation [" + allocationId + "]"; + return "primary [" + primary + "], allocation [" + allocationId + "], index data location [" + indexDataLocation + "]"; } - public static final MetadataStateFormat FORMAT = new MetadataStateFormat( - SHARD_STATE_FILE_PREFIX - ) { + public static final MetadataStateFormat FORMAT = new MetadataStateFormat<>(SHARD_STATE_FILE_PREFIX) { @Override protected XContentBuilder newXContentBuilder(XContentType type, OutputStream stream) throws IOException { @@ -124,6 +148,11 @@ public void toXContent(XContentBuilder builder, ShardStateMetadata shardStateMet if (shardStateMetadata.allocationId != null) { builder.field(ALLOCATION_ID_KEY, shardStateMetadata.allocationId); } + // Omit the index data location field if it is LOCAL (the implicit default) + // to maintain compatibility for local indices + if (shardStateMetadata.indexDataLocation != IndexDataLocation.LOCAL) { + builder.field(INDEX_DATA_LOCATION_KEY, shardStateMetadata.indexDataLocation); + } } @Override @@ -136,6 +165,7 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException String currentFieldName = null; String indexUUID = IndexMetadata.INDEX_UUID_NA_VALUE; AllocationId allocationId = null; + IndexDataLocation indexDataLocation = IndexDataLocation.LOCAL; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { if (token == XContentParser.Token.FIELD_NAME) { currentFieldName = parser.currentName(); @@ -144,6 +174,13 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException primary = parser.booleanValue(); } else if (INDEX_UUID_KEY.equals(currentFieldName)) { indexUUID = parser.text(); + } else if (INDEX_DATA_LOCATION_KEY.equals(currentFieldName)) { + final String stringValue = parser.text(); + try { + indexDataLocation = IndexDataLocation.valueOf(stringValue); + } catch (IllegalArgumentException e) { + throw new CorruptStateException("unexpected value for data location [" + stringValue + "]"); + } } else { throw new CorruptStateException("unexpected field in shard state [" + currentFieldName + "]"); } @@ -160,7 +197,7 @@ public ShardStateMetadata fromXContent(XContentParser parser) throws IOException if (primary == null) { throw new CorruptStateException("missing value for [primary] in shard state"); } - return new ShardStateMetadata(primary, indexUUID, allocationId); + return new ShardStateMetadata(primary, indexUUID, allocationId, indexDataLocation); } }; } diff --git a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java index 06916c4cc87fe..6ca5036808818 100644 --- a/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/opensearch/index/shard/StoreRecovery.java @@ -104,9 +104,6 @@ final class StoreRecovery { */ void recoverFromStore(final IndexShard indexShard, ActionListener listener) { if (canRecover(indexShard)) { - RecoverySource.Type recoveryType = indexShard.recoveryState().getRecoverySource().getType(); - assert recoveryType == RecoverySource.Type.EMPTY_STORE || recoveryType == RecoverySource.Type.EXISTING_STORE - : "expected store recovery type but was: " + recoveryType; ActionListener.completeWith(recoveryListener(indexShard, listener), () -> { logger.debug("starting recovery from store ..."); internalRecoverFromStore(indexShard); diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java index 1e3ec368df411..139b8fffbac3a 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityProviders.java @@ -63,7 +63,6 @@ import org.apache.lucene.search.similarities.NormalizationH2; import org.apache.lucene.search.similarities.NormalizationH3; import org.apache.lucene.search.similarities.NormalizationZ; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.logging.DeprecationLogger; import org.opensearch.common.settings.Settings; @@ -157,22 +156,9 @@ private static BasicModel parseBasicModel(Version indexCreatedVersion, Settings if (model == null) { String replacement = LEGACY_BASIC_MODELS.get(basicModel); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." - ); - } else { - deprecationLogger.deprecate( - basicModel + "_similarity_model_replaced", - "Basic model [" - + basicModel - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - model = BASIC_MODELS.get(replacement); - assert model != null; - } + throw new IllegalArgumentException( + "Basic model [" + basicModel + "] isn't supported anymore, " + "please use another model." + ); } } @@ -195,22 +181,9 @@ private static AfterEffect parseAfterEffect(Version indexCreatedVersion, Setting if (effect == null) { String replacement = LEGACY_AFTER_EFFECTS.get(afterEffect); if (replacement != null) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." - ); - } else { - deprecationLogger.deprecate( - afterEffect + "_after_effect_replaced", - "After effect [" - + afterEffect - + "] isn't supported anymore and has arbitrarily been replaced with [" - + replacement - + "]." - ); - effect = AFTER_EFFECTS.get(replacement); - assert effect != null; - } + throw new IllegalArgumentException( + "After effect [" + afterEffect + "] isn't supported anymore, please use another effect." + ); } } @@ -294,14 +267,7 @@ static void assertSettingsIsSubsetOf(String type, Version version, Settings sett unknownSettings.removeAll(Arrays.asList(supportedSettings)); unknownSettings.remove("type"); // used to figure out which sim this is if (unknownSettings.isEmpty() == false) { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); - } else { - deprecationLogger.deprecate( - "unknown_similarity_setting", - "Unknown settings for similarity of type [" + type + "]: " + unknownSettings - ); - } + throw new IllegalArgumentException("Unknown settings for similarity of type [" + type + "]: " + unknownSettings); } } diff --git a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java index d575ec508acb6..c3fc7ffbb0fe5 100644 --- a/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java +++ b/server/src/main/java/org/opensearch/index/similarity/SimilarityService.java @@ -39,12 +39,10 @@ import org.apache.lucene.search.Explanation; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; -import org.apache.lucene.search.similarities.ClassicSimilarity; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.Similarity.SimScorer; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.TriFunction; import org.opensearch.common.logging.DeprecationLogger; @@ -76,25 +74,12 @@ public final class SimilarityService extends AbstractIndexComponent { static { Map>> defaults = new HashMap<>(); defaults.put(CLASSIC_SIMILARITY, version -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - return () -> { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - }; - } else { - final ClassicSimilarity similarity = SimilarityProviders.createClassicSimilarity(Settings.EMPTY, version); - return () -> { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return similarity; - }; - } + return () -> { + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); + }; }); defaults.put("BM25", version -> { final LegacyBM25Similarity similarity = SimilarityProviders.createBM25Similarity(Settings.EMPTY, version); @@ -107,20 +92,10 @@ public final class SimilarityService extends AbstractIndexComponent { Map> builtIn = new HashMap<>(); builtIn.put(CLASSIC_SIMILARITY, (settings, version, script) -> { - if (version.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException( - "The [classic] similarity may not be used anymore. Please use the [BM25] " - + "similarity or build a custom [scripted] similarity instead." - ); - } else { - deprecationLogger.deprecate( - "classic_similarity", - "The [classic] similarity is now deprecated in favour of BM25, which is generally " - + "accepted as a better alternative. Use the [BM25] similarity or build a custom [scripted] similarity " - + "instead." - ); - return SimilarityProviders.createClassicSimilarity(settings, version); - } + throw new IllegalArgumentException( + "The [classic] similarity may not be used anymore. Please use the [BM25] " + + "similarity or build a custom [scripted] similarity instead." + ); }); builtIn.put("BM25", (settings, version, scriptService) -> SimilarityProviders.createBM25Similarity(settings, version)); builtIn.put("boolean", (settings, version, scriptService) -> SimilarityProviders.createBooleanSimilarity(settings, version)); @@ -258,10 +233,7 @@ private static void validateScoresArePositive(Version indexCreatedVersion, Simil for (int freq = 1; freq <= 10; ++freq) { float score = scorer.score(freq, norm); if (score < 0) { - fail( - indexCreatedVersion, - "Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm) - ); + fail("Similarities should not return negative scores:\n" + scorer.explain(Explanation.match(freq, "term freq"), norm)); break; } } @@ -288,7 +260,6 @@ private static void validateScoresDoNotDecreaseWithFreq(Version indexCreatedVers float score = scorer.score(freq, norm); if (score < previousScore) { fail( - indexCreatedVersion, "Similarity scores should not decrease when term frequency increases:\n" + scorer.explain(Explanation.match(freq - 1, "term freq"), norm) + "\n" @@ -327,7 +298,6 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers float score = scorer.score(1, norm); if (score > previousScore) { fail( - indexCreatedVersion, "Similarity scores should not increase when norm increases:\n" + scorer.explain(Explanation.match(1, "term freq"), norm - 1) + "\n" @@ -340,12 +310,8 @@ private static void validateScoresDoNotIncreaseWithNorm(Version indexCreatedVers } } - private static void fail(Version indexCreatedVersion, String message) { - if (indexCreatedVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException(message); - } else { - deprecationLogger.deprecate("similarity_failure", message); - } + private static void fail(String message) { + throw new IllegalArgumentException(message); } } diff --git a/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java b/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java new file mode 100644 index 0000000000000..0757d88a4099a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/InMemoryRemoteSnapshotDirectory.java @@ -0,0 +1,169 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.store; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.NoLockFactory; +import org.apache.lucene.util.SetOnce; +import org.opensearch.common.blobstore.BlobContainer; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.common.lucene.store.ByteArrayIndexInput; +import org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotId; + +/** + * Trivial in-memory implementation of a Directory that reads from a snapshot + * in a repository. This is functional but is only temporary to demonstrate + * functional searchable snapshot functionality. The proper implementation will + * be implemented per https://github.com/opensearch-project/OpenSearch/issues/3114. + * + * @opensearch.internal + */ +public final class InMemoryRemoteSnapshotDirectory extends Directory { + + private final BlobStoreRepository blobStoreRepository; + private final SnapshotId snapshotId; + private final BlobPath blobPath; + private final SetOnce blobContainer = new SetOnce<>(); + private final SetOnce> fileInfoMap = new SetOnce<>(); + + public InMemoryRemoteSnapshotDirectory(BlobStoreRepository blobStoreRepository, BlobPath blobPath, SnapshotId snapshotId) { + this.blobStoreRepository = blobStoreRepository; + this.snapshotId = snapshotId; + this.blobPath = blobPath; + } + + @Override + public String[] listAll() throws IOException { + return fileInfoMap().keySet().toArray(new String[0]); + } + + @Override + public void deleteFile(String name) throws IOException {} + + @Override + public IndexOutput createOutput(String name, IOContext context) { + return NoopIndexOutput.INSTANCE; + } + + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + final BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfoMap().get(name); + + // Virtual files are contained entirely in the metadata hash field + if (fileInfo.name().startsWith("v__")) { + return new ByteArrayIndexInput(name, fileInfo.metadata().hash().bytes); + } + + try (InputStream is = blobContainer().readBlob(fileInfo.name())) { + return new ByteArrayIndexInput(name, is.readAllBytes()); + } + } + + @Override + public void close() throws IOException {} + + @Override + public long fileLength(String name) throws IOException { + initializeIfNecessary(); + return fileInfoMap.get().get(name).length(); + } + + @Override + public Set getPendingDeletions() throws IOException { + return Collections.emptySet(); + } + + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public void sync(Collection names) throws IOException {} + + @Override + public void syncMetaData() {} + + @Override + public void rename(String source, String dest) throws IOException {} + + @Override + public Lock obtainLock(String name) throws IOException { + return NoLockFactory.INSTANCE.obtainLock(null, null); + } + + static class NoopIndexOutput extends IndexOutput { + + final static NoopIndexOutput INSTANCE = new NoopIndexOutput(); + + NoopIndexOutput() { + super("noop", "noop"); + } + + @Override + public void close() throws IOException { + + } + + @Override + public long getFilePointer() { + return 0; + } + + @Override + public long getChecksum() throws IOException { + return 0; + } + + @Override + public void writeByte(byte b) throws IOException { + + } + + @Override + public void writeBytes(byte[] b, int offset, int length) throws IOException { + + } + } + + private BlobContainer blobContainer() { + initializeIfNecessary(); + return blobContainer.get(); + } + + private Map fileInfoMap() { + initializeIfNecessary(); + return fileInfoMap.get(); + } + + /** + * Bit of a hack to lazily initialize the blob store to avoid running afoul + * of the assertion in {@code BlobStoreRepository#assertSnapshotOrGenericThread}. + */ + private void initializeIfNecessary() { + if (blobContainer.get() == null || fileInfoMap.get() == null) { + blobContainer.set(blobStoreRepository.blobStore().blobContainer(blobPath)); + final BlobStoreIndexShardSnapshot snapshot = blobStoreRepository.loadShardSnapshot(blobContainer.get(), snapshotId); + fileInfoMap.set( + snapshot.indexFiles().stream().collect(Collectors.toMap(BlobStoreIndexShardSnapshot.FileInfo::physicalName, f -> f)) + ); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java b/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java new file mode 100644 index 0000000000000..bf7806b836b65 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/store/RemoteSnapshotDirectoryFactory.java @@ -0,0 +1,49 @@ +/* + * Copyright OpenSearch Contributors + * SPDX-License-Identifier: Apache-2.0 + */ + +package org.opensearch.index.store; + +import java.io.IOException; +import java.util.function.Supplier; + +import org.apache.lucene.store.Directory; +import org.opensearch.common.blobstore.BlobPath; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.shard.ShardPath; +import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.repositories.RepositoriesService; +import org.opensearch.repositories.Repository; +import org.opensearch.repositories.blobstore.BlobStoreRepository; +import org.opensearch.snapshots.SnapshotId; + +/** + * Factory for a Directory implementation that can read directly from index + * data stored remotely in a repository. + * + * @opensearch.internal + */ +public final class RemoteSnapshotDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + private final Supplier repositoriesService; + + public RemoteSnapshotDirectoryFactory(Supplier repositoriesService) { + this.repositoriesService = repositoriesService; + } + + @Override + public Directory newDirectory(IndexSettings indexSettings, ShardPath shardPath) throws IOException { + final String repositoryName = IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.get(indexSettings.getSettings()); + final Repository repository = repositoriesService.get().repository(repositoryName); + assert repository instanceof BlobStoreRepository : "repository should be instance of BlobStoreRepository"; + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + final BlobPath blobPath = new BlobPath().add("indices") + .add(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.get(indexSettings.getSettings())) + .add(Integer.toString(shardPath.getShardId().getId())); + final SnapshotId snapshotId = new SnapshotId( + IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.get(indexSettings.getSettings()), + IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.get(indexSettings.getSettings()) + ); + return new InMemoryRemoteSnapshotDirectory(blobStoreRepository, blobPath, snapshotId); + } +} diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 6808803ee0988..b2f48ccdd389c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -40,10 +40,8 @@ import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.RamUsageEstimator; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.ResourceAlreadyExistsException; -import org.opensearch.Version; import org.opensearch.action.admin.indices.stats.CommonStats; import org.opensearch.action.admin.indices.stats.CommonStatsFlags; import org.opensearch.action.admin.indices.stats.CommonStatsFlags.Flag; @@ -111,6 +109,7 @@ import org.opensearch.index.engine.InternalEngineFactory; import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NoOpEngine; +import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.flush.FlushStats; import org.opensearch.index.get.GetStats; @@ -132,6 +131,7 @@ import org.opensearch.index.shard.IndexingOperationListener; import org.opensearch.index.shard.IndexingStats; import org.opensearch.index.shard.ShardId; +import org.opensearch.index.translog.TranslogStats; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; @@ -338,13 +338,6 @@ public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, lon this.metaStateService = metaStateService; this.engineFactoryProviders = engineFactoryProviders; - // do not allow any plugin-provided index store type to conflict with a built-in type - for (final String indexStoreType : directoryFactories.keySet()) { - if (IndexModule.isBuiltinType(indexStoreType)) { - throw new IllegalStateException("registered index store type [" + indexStoreType + "] conflicts with a built-in type"); - } - } - this.directoryFactories = directoryFactories; this.recoveryStateFactories = recoveryStateFactories; // doClose() is called when shutting down a node, yet there might still be ongoing requests @@ -699,8 +692,7 @@ private synchronized IndexService createIndexService( IndexingOperationListener... indexingOperationListeners ) throws IOException { final IndexSettings idxSettings = new IndexSettings(indexMetadata, settings, indexScopedSettings); - if (idxSettings.getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0) - && EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { + if (EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.exists(idxSettings.getSettings())) { throw new IllegalArgumentException( "Setting [" + EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS.getKey() + "] was removed in version 7.0.0" ); @@ -772,6 +764,9 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { if (idxSettings.isSegRepEnabled()) { return new NRTReplicationEngineFactory(); } + if (IndexModule.Type.REMOTE_SNAPSHOT.match(idxSettings)) { + return config -> new ReadOnlyEngine(config, new SeqNoStats(0, 0, 0), new TranslogStats(), true, Function.identity(), false); + } return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); @@ -1710,8 +1705,8 @@ public Function> getFieldFilter() { /** * Returns true if the provided field is a registered metadata field (including ones registered via plugins), false otherwise. */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return mapperRegistry.isMetadataField(indexCreatedVersion, field); + public boolean isMetadataField(String field) { + return mapperRegistry.isMetadataField(field); } /** diff --git a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java index cc87c982a684d..22be07dd90f94 100644 --- a/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java +++ b/server/src/main/java/org/opensearch/indices/analysis/AnalysisModule.java @@ -33,7 +33,6 @@ package org.opensearch.indices.analysis; import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.TokenStream; import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.metadata.IndexMetadata; @@ -42,7 +41,6 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.index.IndexSettings; -import org.opensearch.index.analysis.AbstractTokenFilterFactory; import org.opensearch.index.analysis.AnalysisRegistry; import org.opensearch.index.analysis.AnalyzerProvider; import org.opensearch.index.analysis.CharFilterFactory; @@ -152,20 +150,7 @@ private NamedRegistry> setupTokenFilters( tokenFilters.register("standard", new AnalysisProvider() { @Override public TokenFilterFactory get(IndexSettings indexSettings, Environment environment, String name, Settings settings) { - if (indexSettings.getIndexVersionCreated().before(LegacyESVersion.V_7_0_0)) { - deprecationLogger.deprecate( - "standard_deprecation", - "The [standard] token filter name is deprecated and will be removed in a future version." - ); - } else { - throw new IllegalArgumentException("The [standard] token filter has been removed."); - } - return new AbstractTokenFilterFactory(indexSettings, name, settings) { - @Override - public TokenStream create(TokenStream tokenStream) { - return tokenStream; - } - }; + throw new IllegalArgumentException("The [standard] token filter has been removed."); } @Override diff --git a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java index 3a1d7b1ebb1e3..c26428309aec5 100644 --- a/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java +++ b/server/src/main/java/org/opensearch/indices/mapper/MapperRegistry.java @@ -32,10 +32,8 @@ package org.opensearch.indices.mapper; -import org.opensearch.Version; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MetadataFieldMapper; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.plugins.MapperPlugin; import java.util.Collections; @@ -53,7 +51,6 @@ public final class MapperRegistry { private final Map mapperParsers; private final Map metadataMapperParsers; - private final Map metadataMapperParsersPre20; private final Function> fieldFilter; public MapperRegistry( @@ -63,9 +60,6 @@ public MapperRegistry( ) { this.mapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(mapperParsers)); this.metadataMapperParsers = Collections.unmodifiableMap(new LinkedHashMap<>(metadataMapperParsers)); - Map tempPre20 = new LinkedHashMap<>(metadataMapperParsers); - tempPre20.remove(NestedPathFieldMapper.NAME); - this.metadataMapperParsersPre20 = Collections.unmodifiableMap(tempPre20); this.fieldFilter = fieldFilter; } @@ -81,15 +75,15 @@ public Map getMapperParsers() { * Return a map of the meta mappers that have been registered. The * returned map uses the name of the field as a key. */ - public Map getMetadataMapperParsers(Version indexCreatedVersion) { - return indexCreatedVersion.onOrAfter(Version.V_2_0_0) ? metadataMapperParsers : metadataMapperParsersPre20; + public Map getMetadataMapperParsers() { + return metadataMapperParsers; } /** * Returns true if the provided field is a registered metadata field, false otherwise */ - public boolean isMetadataField(Version indexCreatedVersion, String field) { - return getMetadataMapperParsers(indexCreatedVersion).containsKey(field); + public boolean isMetadataField(String field) { + return getMetadataMapperParsers().containsKey(field); } /** diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java index 6c597fcd086c4..d346ec5c975f4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryCleanFilesRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; @@ -76,11 +74,7 @@ public RecoveryCleanFilesRequest( shardId = new ShardId(in); snapshotFiles = new Store.MetadataSnapshot(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - globalCheckpoint = in.readZLong(); - } else { - globalCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + globalCheckpoint = in.readZLong(); } @Override @@ -90,9 +84,7 @@ public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); snapshotFiles.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeZLong(globalCheckpoint); - } + out.writeZLong(globalCheckpoint); } public Store.MetadataSnapshot sourceMetaSnapshot() { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java index a7334fba15664..446fb78958db4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryFinalizeRecoveryRequest.java @@ -32,10 +32,8 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.index.seqno.SequenceNumbers; import org.opensearch.index.shard.ShardId; import java.io.IOException; @@ -57,11 +55,7 @@ final class RecoveryFinalizeRecoveryRequest extends RecoveryTransportRequest { recoveryId = in.readLong(); shardId = new ShardId(in); globalCheckpoint = in.readZLong(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - trimAboveSeqNo = in.readZLong(); - } else { - trimAboveSeqNo = SequenceNumbers.UNASSIGNED_SEQ_NO; - } + trimAboveSeqNo = in.readZLong(); } RecoveryFinalizeRecoveryRequest( @@ -100,9 +94,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeZLong(globalCheckpoint); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeZLong(trimAboveSeqNo); - } + out.writeZLong(trimAboveSeqNo); } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java index bdacb0b724884..68979fa4b69bc 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryPrepareForTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.shard.ShardId; @@ -62,9 +61,6 @@ class RecoveryPrepareForTranslogOperationsRequest extends RecoveryTransportReque recoveryId = in.readLong(); shardId = new ShardId(in); totalTranslogOps = in.readVInt(); - if (in.getVersion().before(LegacyESVersion.V_7_4_0)) { - in.readBoolean(); // was fileBasedRecovery - } } public long recoveryId() { @@ -85,8 +81,5 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(recoveryId); shardId.writeTo(out); out.writeVInt(totalTranslogOps); - if (out.getVersion().before(LegacyESVersion.V_7_4_0)) { - out.writeBoolean(true); // was fileBasedRecovery - } } } diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java index 665e79722770e..505d3c7adfb3f 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoverySourceHandler.java @@ -40,7 +40,6 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.StepListener; @@ -720,8 +719,7 @@ void createRetentionLease(final long startingSeqNo, ActionListener addRetentionLeaseStep = new StepListener<>(); final long estimatedGlobalCheckpoint = startingSeqNo - 1; final RetentionLease newLease = shard.addPeerRecoveryRetentionLease( diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 57208ab029bf4..de2ee1b8512b4 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.RecoverySource; import org.opensearch.cluster.routing.ShardRouting; @@ -430,9 +429,7 @@ public Translog(StreamInput in) throws IOException { recovered = in.readVInt(); total = in.readVInt(); totalOnStart = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - totalLocal = in.readVInt(); - } + totalLocal = in.readVInt(); } @Override @@ -441,9 +438,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(recovered); out.writeVInt(total); out.writeVInt(totalOnStart); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeVInt(totalLocal); - } + out.writeVInt(totalLocal); } public synchronized void reset() { diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java index eacfb87ecc732..32560bc211669 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTranslogOperationsRequest.java @@ -32,7 +32,6 @@ package org.opensearch.indices.recovery; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.index.seqno.RetentionLeases; @@ -139,8 +138,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(maxSeenAutoIdTimestampOnPrimary); out.writeZLong(maxSeqNoOfUpdatesOrDeletesOnPrimary); retentionLeases.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeVLong(mappingVersionOnPrimary); - } + out.writeVLong(mappingVersionOnPrimary); } } diff --git a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java index b49cdcd127962..6189c983e3c8a 100644 --- a/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/opensearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -39,7 +39,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.FailedNodeException; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -68,6 +67,7 @@ import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.indices.IndicesService; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -253,20 +253,14 @@ public StoreFilesMetadata( public StoreFilesMetadata(StreamInput in) throws IOException { this.shardId = new ShardId(in); this.metadataSnapshot = new Store.MetadataSnapshot(in); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); - } else { - this.peerRecoveryRetentionLeases = Collections.emptyList(); - } + this.peerRecoveryRetentionLeases = in.readList(RetentionLease::new); } @Override public void writeTo(StreamOutput out) throws IOException { shardId.writeTo(out); metadataSnapshot.writeTo(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { - out.writeList(peerRecoveryRetentionLeases); - } + out.writeList(peerRecoveryRetentionLeases); } public ShardId shardId() { @@ -410,7 +404,7 @@ protected void writeNodesTo(StreamOutput out, List nodes * * @opensearch.internal */ - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { private final ShardId shardId; @Nullable diff --git a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java index 819d67cb8621a..426551ab50f18 100644 --- a/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/opensearch/monitor/jvm/JvmInfo.java @@ -33,7 +33,6 @@ package org.opensearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.opensearch.LegacyESVersion; import org.opensearch.common.Booleans; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; @@ -305,13 +304,8 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - bundledJdk = in.readBoolean(); - usingBundledJdk = in.readOptionalBoolean(); - } else { - bundledJdk = false; - usingBundledJdk = null; - } + bundledJdk = in.readBoolean(); + usingBundledJdk = in.readOptionalBoolean(); startTime = in.readLong(); inputArguments = new String[in.readInt()]; for (int i = 0; i < inputArguments.length; i++) { @@ -341,10 +335,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(bundledJdk); - out.writeOptionalBoolean(usingBundledJdk); - } + out.writeBoolean(bundledJdk); + out.writeOptionalBoolean(usingBundledJdk); out.writeLong(startTime); out.writeInt(inputArguments.length); for (String inputArgument : inputArguments) { diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 24300c884d194..504550378e14f 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -38,6 +38,7 @@ import org.apache.lucene.util.SetOnce; import org.opensearch.common.util.FeatureFlags; import org.opensearch.cluster.routing.allocation.AwarenessReplicaBalance; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexingPressureService; import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; @@ -213,6 +214,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -604,11 +606,23 @@ protected Node( .map(plugin -> (Function>) plugin::getEngineFactory) .collect(Collectors.toList()); - final Map indexStoreFactories = pluginsService.filterPlugins(IndexStorePlugin.class) + final Map builtInDirectoryFactories = IndexModule.createBuiltInDirectoryFactories( + repositoriesServiceReference::get + ); + final Map directoryFactories = new HashMap<>(); + pluginsService.filterPlugins(IndexStorePlugin.class) .stream() .map(IndexStorePlugin::getDirectoryFactories) .flatMap(m -> m.entrySet().stream()) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)) + .forEach((k, v) -> { + // do not allow any plugin-provided index store type to conflict with a built-in type + if (builtInDirectoryFactories.containsKey(k)) { + throw new IllegalStateException("registered index store type [" + k + "] conflicts with a built-in type"); + } + directoryFactories.put(k, v); + }); + directoryFactories.putAll(builtInDirectoryFactories); final Map recoveryStateFactories = pluginsService.filterPlugins( IndexStorePlugin.class @@ -653,7 +667,7 @@ protected Node( client, metaStateService, engineFactoryProviders, - indexStoreFactories, + Map.copyOf(directoryFactories), searchModule.getValuesSourceRegistry(), recoveryStateFactories, remoteDirectoryFactory diff --git a/server/src/main/java/org/opensearch/repositories/FilterRepository.java b/server/src/main/java/org/opensearch/repositories/FilterRepository.java index aaa021a0e8b93..a6a649fa2cd44 100644 --- a/server/src/main/java/org/opensearch/repositories/FilterRepository.java +++ b/server/src/main/java/org/opensearch/repositories/FilterRepository.java @@ -52,7 +52,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -95,11 +94,6 @@ public void getRepositoryData(ActionListener listener) { in.getRepositoryData(listener); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - in.initializeSnapshot(snapshotId, indices, metadata); - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/server/src/main/java/org/opensearch/repositories/Repository.java b/server/src/main/java/org/opensearch/repositories/Repository.java index a16e0e8d441bc..1826fe1aa51da 100644 --- a/server/src/main/java/org/opensearch/repositories/Repository.java +++ b/server/src/main/java/org/opensearch/repositories/Repository.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -129,19 +128,6 @@ default Repository create(RepositoryMetadata metadata, Function listener); - /** - * Starts snapshotting process - * - * @param snapshotId snapshot id - * @param indices list of indices to be snapshotted - * @param metadata cluster metadata - * - * @deprecated this method is only used when taking snapshots in a mixed version cluster where a cluster-manager node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION} is present. - */ - @Deprecated - void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata); - /** * Finalizes snapshotting process *

    diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java index c36d92abcf498..bf06191bdc8d3 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/BlobStoreRepository.java @@ -123,7 +123,6 @@ import org.opensearch.repositories.RepositoryVerificationException; import org.opensearch.repositories.ShardGenerations; import org.opensearch.snapshots.AbortedSnapshotException; -import org.opensearch.snapshots.SnapshotCreationException; import org.opensearch.snapshots.SnapshotException; import org.opensearch.snapshots.SnapshotId; import org.opensearch.snapshots.SnapshotInfo; @@ -713,21 +712,6 @@ public RepositoryStats stats() { return new RepositoryStats(store.stats()); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata clusterMetadata) { - try { - // Write Global Metadata - GLOBAL_METADATA_FORMAT.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), compress); - - // write the index metadata for each index in the snapshot - for (IndexId index : indices) { - INDEX_METADATA_FORMAT.write(clusterMetadata.index(index.getName()), indexContainer(index), snapshotId.getUUID(), compress); - } - } catch (IOException ex) { - throw new SnapshotCreationException(metadata.name(), snapshotId, ex); - } - } - @Override public void deleteSnapshots( Collection snapshotIds, diff --git a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java index aacd386cd4bd7..b13a63ef77f6a 100644 --- a/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java +++ b/server/src/main/java/org/opensearch/repositories/blobstore/package-info.java @@ -163,23 +163,9 @@ * *

    Creating a snapshot in the repository happens in the three steps described in detail below.

    * - *

    Initializing a Snapshot in the Repository (Mixed Version Clusters only)

    - * - *

    In mixed version clusters that contain a node older than - * {@link org.opensearch.snapshots.SnapshotsService#NO_REPO_INITIALIZE_VERSION}, creating a snapshot in the repository starts with a - * call to {@link org.opensearch.repositories.Repository#initializeSnapshot} which the blob store repository implements via the - * following actions:

    - *
      - *
    1. Verify that no snapshot by the requested name exists.
    2. - *
    3. Write a blob containing the cluster metadata to the root of the blob store repository at {@code /meta-${snapshot-uuid}.dat}
    4. - *
    5. Write the metadata for each index to a blob in that index's directory at - * {@code /indices/${index-snapshot-uuid}/meta-${snapshot-uuid}.dat}
    6. - *
    - * TODO: Remove this section once BwC logic it references is removed - * *

    Writing Shard Data (Segments)

    * - *

    Once all the metadata has been written by the snapshot initialization, the snapshot process moves on to writing the actual shard data + *

    The snapshot process writes the actual shard data * to the repository by invoking {@link org.opensearch.repositories.Repository#snapshotShard} on the data-nodes that hold the primaries * for the shards in the current snapshot. It is implemented as follows:

    * diff --git a/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java new file mode 100644 index 0000000000000..9fd7ae2248c30 --- /dev/null +++ b/server/src/main/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateAction.java @@ -0,0 +1,52 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.client.Requests; +import org.opensearch.client.node.NodeClient; +import org.opensearch.rest.BaseRestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.rest.action.RestToXContentListener; + +import java.io.IOException; +import java.util.List; + +import static java.util.Collections.singletonList; +import static org.opensearch.rest.RestRequest.Method.DELETE; + +/** + * Clears the decommission metadata. + * + * @opensearch.api + */ +public class RestDeleteDecommissionStateAction extends BaseRestHandler { + + @Override + public List routes() { + return singletonList(new Route(DELETE, "/_cluster/decommission/awareness")); + } + + @Override + public String getName() { + return "delete_decommission_state_action"; + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + DeleteDecommissionStateRequest deleteDecommissionStateRequest = createRequest(); + return channel -> client.admin() + .cluster() + .deleteDecommissionState(deleteDecommissionStateRequest, new RestToXContentListener<>(channel)); + } + + DeleteDecommissionStateRequest createRequest() { + return Requests.deleteDecommissionStateRequest(); + } +} diff --git a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java index e7b1da91aba8f..bd2c11cf71ff1 100644 --- a/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java +++ b/server/src/main/java/org/opensearch/rest/action/document/RestIndexAction.java @@ -32,7 +32,6 @@ package org.opensearch.rest.action.document; -import org.opensearch.LegacyESVersion; import org.opensearch.action.DocWriteRequest; import org.opensearch.action.index.IndexRequest; import org.opensearch.action.support.ActiveShardCount; @@ -128,7 +127,7 @@ public List routes() { @Override public RestChannelConsumer prepareRequest(RestRequest request, final NodeClient client) throws IOException { assert request.params().get("id") == null : "non-null id: " + request.params().get("id"); - if (request.params().get("op_type") == null && nodesInCluster.get().getMinNodeVersion().onOrAfter(LegacyESVersion.V_7_5_0)) { + if (request.params().get("op_type") == null) { // default to op_type create request.params().put("op_type", "create"); } diff --git a/server/src/main/java/org/opensearch/script/ScriptStats.java b/server/src/main/java/org/opensearch/script/ScriptStats.java index 34d868f1d6046..9c8f1157cb718 100644 --- a/server/src/main/java/org/opensearch/script/ScriptStats.java +++ b/server/src/main/java/org/opensearch/script/ScriptStats.java @@ -89,7 +89,7 @@ public ScriptStats(ScriptContextStats context) { public ScriptStats(StreamInput in) throws IOException { compilations = in.readVLong(); cacheEvictions = in.readVLong(); - compilationLimitTriggered = in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0) ? in.readVLong() : 0; + compilationLimitTriggered = in.readVLong(); contextStats = in.getVersion().onOrAfter(LegacyESVersion.V_7_9_0) ? in.readList(ScriptContextStats::new) : Collections.emptyList(); } @@ -97,9 +97,7 @@ public ScriptStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeVLong(compilationLimitTriggered); - } + out.writeVLong(compilationLimitTriggered); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_9_0)) { out.writeList(contextStats); } diff --git a/server/src/main/java/org/opensearch/search/DocValueFormat.java b/server/src/main/java/org/opensearch/search/DocValueFormat.java index 84c46e400543a..4b592303ee253 100644 --- a/server/src/main/java/org/opensearch/search/DocValueFormat.java +++ b/server/src/main/java/org/opensearch/search/DocValueFormat.java @@ -34,7 +34,6 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.NamedWriteable; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -44,7 +43,6 @@ import org.opensearch.common.network.NetworkAddress; import org.opensearch.common.time.DateFormatter; import org.opensearch.common.time.DateMathParser; -import org.opensearch.common.time.DateUtils; import org.opensearch.geometry.utils.Geohash; import org.opensearch.index.mapper.DateFieldMapper; import org.opensearch.search.aggregations.bucket.GeoTileUtils; @@ -224,34 +222,12 @@ public DateTime(DateFormatter formatter, ZoneId timeZone, DateFieldMapper.Resolu public DateTime(StreamInput in) throws IOException { String datePattern = in.readString(); - String zoneId = in.readString(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.of(zoneId); - this.resolution = DateFieldMapper.Resolution.MILLISECONDS; - } else { - this.timeZone = ZoneId.of(zoneId); - this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); - } - final boolean isJoda; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // if stream is from 7.7 Node it will have a flag indicating if format is joda - isJoda = in.readBoolean(); - } else { - /* - When received a stream from 6.0-6.latest Node it can be java if starts with 8 otherwise joda. - - If a stream is from [7.0 - 7.7) the boolean indicating that this is joda is not present. - This means that if an index was created in 6.x using joda pattern and then cluster was upgraded to - 7.x but earlier then 7.0, there is no information that can tell that the index is using joda style pattern. - It will be assumed that clusters upgrading from [7.0 - 7.7) are using java style patterns. - */ - isJoda = Joda.isJodaPattern(in.getVersion(), datePattern); - } + this.timeZone = ZoneId.of(zoneId); + this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); + final boolean isJoda = in.readBoolean(); this.formatter = isJoda ? Joda.forPattern(datePattern) : DateFormatter.forPattern(datePattern); - this.parser = formatter.toDateMathParser(); - } @Override @@ -262,16 +238,10 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeString(DateUtils.zoneIdToDateTimeZone(timeZone).getID()); - } else { - out.writeString(timeZone.getId()); - out.writeVInt(resolution.ordinal()); - } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { - // in order not to loose information if the formatter is a joda we send a flag - out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. - } + out.writeString(timeZone.getId()); + out.writeVInt(resolution.ordinal()); + // in order not to loose information if the formatter is a joda we send a flag + out.writeBoolean(formatter instanceof JodaDateFormatter);// todo pg consider refactor to isJoda method.. } public DateMathParser getDateMathParser() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java index d8526e684f391..6ca64c2186cb8 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/composite/CompositeValuesSourceParserHelper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.composite; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; import org.opensearch.common.io.stream.StreamInput; @@ -103,15 +102,6 @@ public static void writeTo(CompositeValuesSourceBuilder builder, StreamOutput aggregationType = BUILDER_CLASS_TO_AGGREGATION_TYPE.get(builder.getClass()); if (BUILDER_CLASS_TO_BYTE_CODE.containsKey(builder.getClass())) { code = BUILDER_CLASS_TO_BYTE_CODE.get(builder.getClass()); - if (code == 3 && out.getVersion().before(LegacyESVersion.V_7_5_0)) { - throw new IOException( - "Attempting to serialize [" - + builder.getClass().getSimpleName() - + "] to a node with unsupported version [" - + out.getVersion() - + "]" - ); - } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index 6b6d33717bab4..f7e3de4eb988f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.io.stream.StreamInput; @@ -150,17 +149,13 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - minimumIntervalExpression = in.readOptionalString(); - } + minimumIntervalExpression = in.readOptionalString(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeOptionalString(minimumIntervalExpression); - } + out.writeOptionalString(minimumIntervalExpression); } protected AutoDateHistogramAggregationBuilder( @@ -321,17 +316,7 @@ public RoundingInfo(StreamInput in) throws IOException { roughEstimateDurationMillis = in.readVLong(); innerIntervals = in.readIntArray(); unitAbbreviation = in.readString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - dateTimeUnit = in.readString(); - } else { - /* - * This *should* be safe because we only deserialize RoundingInfo - * when reading result and results don't actually use this at all. - * We just set it to something non-null to line up with the normal - * ctor. "seconds" is the smallest unit anyway. - */ - dateTimeUnit = "second"; - } + dateTimeUnit = in.readString(); } @Override @@ -340,9 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(roughEstimateDurationMillis); out.writeIntArray(innerIntervals); out.writeString(unitAbbreviation); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_3_0)) { - out.writeString(dateTimeUnit); - } + out.writeString(dateTimeUnit); } public int getMaximumInnerInterval() { diff --git a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java index d378bb2ec1bd2..f8a4c4ffd9cba 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java +++ b/server/src/main/java/org/opensearch/search/aggregations/bucket/histogram/DateIntervalWrapper.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.bucket.histogram; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Rounding; import org.opensearch.common.Rounding.DateTimeUnit; @@ -143,21 +142,8 @@ public static void declareIntervalFields(Object public DateIntervalWrapper() {} public DateIntervalWrapper(StreamInput in) throws IOException { - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - long interval = in.readLong(); - DateHistogramInterval histoInterval = in.readOptionalWriteable(DateHistogramInterval::new); - - if (histoInterval != null) { - dateHistogramInterval = histoInterval; - intervalType = IntervalTypeEnum.LEGACY_DATE_HISTO; - } else { - dateHistogramInterval = new DateHistogramInterval(interval + "ms"); - intervalType = IntervalTypeEnum.LEGACY_INTERVAL; - } - } else { - dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); - intervalType = IntervalTypeEnum.fromStream(in); - } + dateHistogramInterval = in.readOptionalWriteable(DateHistogramInterval::new); + intervalType = IntervalTypeEnum.fromStream(in); } public IntervalTypeEnum getIntervalType() { @@ -402,20 +388,8 @@ public boolean isEmpty() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - if (intervalType.equals(IntervalTypeEnum.LEGACY_INTERVAL)) { - out.writeLong( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), DateHistogramAggregationBuilder.NAME + ".innerWriteTo") - .getMillis() - ); - } else { - out.writeLong(0L); - } - out.writeOptionalWriteable(dateHistogramInterval); - } else { - out.writeOptionalWriteable(dateHistogramInterval); - intervalType.writeTo(out); - } + out.writeOptionalWriteable(dateHistogramInterval); + intervalType.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java index 96e7541de25d9..35449a28c9087 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java +++ b/server/src/main/java/org/opensearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -33,7 +33,6 @@ package org.opensearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.geo.GeoPoint; import org.opensearch.common.io.stream.StreamInput; @@ -84,13 +83,7 @@ public InternalGeoCentroid(StreamInput in) throws IOException { super(in); count = in.readVLong(); if (in.readBoolean()) { - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - centroid = new GeoPoint(in.readDouble(), in.readDouble()); - } else { - final long hash = in.readLong(); - centroid = new GeoPoint(decodeLatitude(hash), decodeLongitude(hash)); - } - + centroid = new GeoPoint(in.readDouble(), in.readDouble()); } else { centroid = null; } @@ -101,12 +94,8 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVLong(count); if (centroid != null) { out.writeBoolean(true); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeDouble(centroid.lat()); - out.writeDouble(centroid.lon()); - } else { - out.writeLong(encodeLatLon(centroid.lat(), centroid.lon())); - } + out.writeDouble(centroid.lat()); + out.writeDouble(centroid.lon()); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java index 40c7a5791454c..7c30656505663 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/InternalPercentilesBucket.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.xcontent.XContentBuilder; @@ -99,11 +98,7 @@ public InternalPercentilesBucket(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); percentiles = in.readDoubleArray(); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } - + keyed = in.readBoolean(); computeLookup(); } @@ -112,10 +107,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeDoubleArray(percentiles); out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 33c09e04bd4b0..501f1af63b3d9 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; @@ -114,11 +113,7 @@ public MovFnPipelineAggregationBuilder(StreamInput in) throws IOException { format = in.readOptionalString(); gapPolicy = GapPolicy.readFrom(in); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -128,9 +123,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(format); gapPolicy.writeTo(out); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java index 70652e7ddce44..7b20a796b8134 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/MovFnPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.script.Script; @@ -106,11 +105,7 @@ public MovFnPipelineAggregator(StreamInput in) throws IOException { gapPolicy = BucketHelpers.GapPolicy.readFrom(in); bucketsPath = in.readString(); window = in.readInt(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - shift = in.readInt(); - } else { - shift = 0; - } + shift = in.readInt(); } @Override @@ -120,9 +115,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { gapPolicy.writeTo(out); out.writeString(bucketsPath); out.writeInt(window); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_4_0)) { - out.writeInt(shift); - } + out.writeInt(shift); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index bef97bbbaa83a..8e68e62b04766 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -34,7 +34,6 @@ import com.carrotsearch.hppc.DoubleArrayList; -import org.opensearch.LegacyESVersion; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; @@ -70,19 +69,13 @@ public PercentilesBucketPipelineAggregationBuilder(String name, String bucketsPa public PercentilesBucketPipelineAggregationBuilder(StreamInput in) throws IOException { super(in, NAME); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } /** diff --git a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java index bd838fe23da8b..7fad7e233c424 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java +++ b/server/src/main/java/org/opensearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregator.java @@ -32,7 +32,6 @@ package org.opensearch.search.aggregations.pipeline; -import org.opensearch.LegacyESVersion; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.search.DocValueFormat; @@ -76,19 +75,13 @@ public class PercentilesBucketPipelineAggregator extends BucketMetricsPipelineAg public PercentilesBucketPipelineAggregator(StreamInput in) throws IOException { super(in); percents = in.readDoubleArray(); - - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - keyed = in.readBoolean(); - } + keyed = in.readBoolean(); } @Override public void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(percents); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(keyed); - } + out.writeBoolean(keyed); } @Override diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java index b7e157b6050fc..ae76fd0a3aa3f 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/BaseMultiValuesSourceFieldConfig.java @@ -15,7 +15,6 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; @@ -86,11 +85,7 @@ public BaseMultiValuesSourceFieldConfig(StreamInput in) throws IOException { } this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - this.timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - this.timeZone = in.readOptionalZoneId(); - } + this.timeZone = in.readOptionalZoneId(); } @Override @@ -102,11 +97,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeGenericValue(missing); out.writeOptionalWriteable(script); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); doWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java index 52afc6435d562..b492d9cadb975 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/ValuesSourceAggregationBuilder.java @@ -31,12 +31,10 @@ package org.opensearch.search.aggregations.support; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.ParseField; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; -import org.opensearch.common.time.DateUtils; import org.opensearch.common.xcontent.AbstractObjectParser; import org.opensearch.common.xcontent.ObjectParser; import org.opensearch.common.xcontent.XContentBuilder; @@ -233,11 +231,7 @@ private void read(StreamInput in) throws IOException { } format = in.readOptionalString(); missing = in.readGenericValue(); - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - timeZone = DateUtils.dateTimeZoneToZoneId(in.readOptionalTimeZone()); - } else { - timeZone = in.readOptionalZoneId(); - } + timeZone = in.readOptionalZoneId(); } @Override @@ -259,11 +253,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { } out.writeOptionalString(format); out.writeGenericValue(missing); - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeOptionalTimeZone(DateUtils.zoneIdToDateTimeZone(timeZone)); - } else { - out.writeOptionalZoneId(timeZone); - } + out.writeOptionalZoneId(timeZone); innerWriteTo(out); } diff --git a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java index d3b131cf9f792..565932f1bca13 100644 --- a/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/opensearch/search/builder/SearchSourceBuilder.java @@ -260,11 +260,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - trackTotalHitsUpTo = in.readOptionalInt(); - } else { - trackTotalHitsUpTo = in.readBoolean() ? TRACK_TOTAL_HITS_ACCURATE : TRACK_TOTAL_HITS_DISABLED; - } + trackTotalHitsUpTo = in.readOptionalInt(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { if (in.readBoolean()) { fetchFields = in.readList(FieldAndFormat::new); @@ -326,11 +322,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeOptionalInt(trackTotalHitsUpTo); - } else { - out.writeBoolean(trackTotalHitsUpTo == null ? true : trackTotalHitsUpTo > SearchContext.TRACK_TOTAL_HITS_DISABLED); - } + out.writeOptionalInt(trackTotalHitsUpTo); if (out.getVersion().onOrAfter(LegacyESVersion.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { diff --git a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java index 90cc547f62a95..4a82b8eba653f 100644 --- a/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/opensearch/search/dfs/DfsSearchResult.java @@ -147,16 +147,10 @@ public static void writeFieldStats(StreamOutput out, ObjectObjectHashMap= 0; out.writeVLong(statistics.maxDoc()); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - out.writeVLong(statistics.docCount()); - out.writeVLong(statistics.sumTotalTermFreq()); - out.writeVLong(statistics.sumDocFreq()); - } else { - out.writeVLong(addOne(statistics.docCount())); - out.writeVLong(addOne(statistics.sumTotalTermFreq())); - out.writeVLong(addOne(statistics.sumDocFreq())); - } + // stats are always positive numbers + out.writeVLong(statistics.docCount()); + out.writeVLong(statistics.sumTotalTermFreq()); + out.writeVLong(statistics.sumDocFreq()); } } @@ -188,16 +182,10 @@ static ObjectObjectHashMap readFieldStats(StreamIn final long docCount; final long sumTotalTermFreq; final long sumDocFreq; - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - // stats are always positive numbers - docCount = in.readVLong(); - sumTotalTermFreq = in.readVLong(); - sumDocFreq = in.readVLong(); - } else { - docCount = subOne(in.readVLong()); - sumTotalTermFreq = subOne(in.readVLong()); - sumDocFreq = subOne(in.readVLong()); - } + // stats are always positive numbers + docCount = in.readVLong(); + sumTotalTermFreq = in.readVLong(); + sumDocFreq = in.readVLong(); CollectionStatistics stats = new CollectionStatistics(field, maxDoc, docCount, sumTotalTermFreq, sumDocFreq); fieldStatistics.put(field, stats); } diff --git a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java index 828c2f8c78d69..006a0627c337d 100644 --- a/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/opensearch/search/internal/ShardSearchRequest.java @@ -258,11 +258,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { outboundNetworkTime = in.readVLong(); } clusterAlias = in.readOptionalString(); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - allowPartialSearchResults = in.readBoolean(); - } else { - allowPartialSearchResults = false; - } + allowPartialSearchResults = in.readBoolean(); indexRoutings = in.readStringArray(); preference = in.readOptionalString(); if (in.getVersion().onOrAfter(LegacyESVersion.V_7_7_0)) { @@ -336,9 +332,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeVLong(outboundNetworkTime); } out.writeOptionalString(clusterAlias); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeBoolean(allowPartialSearchResults); - } + out.writeBoolean(allowPartialSearchResults); if (asKey == false) { out.writeStringArray(indexRoutings); out.writeOptionalString(preference); diff --git a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java index dceacd57d623e..31fdc5c9d9e9d 100644 --- a/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java +++ b/server/src/main/java/org/opensearch/search/query/QuerySearchResult.java @@ -45,7 +45,6 @@ import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.InternalAggregation; import org.opensearch.search.aggregations.InternalAggregations; -import org.opensearch.search.aggregations.pipeline.PipelineAggregator; import org.opensearch.search.internal.ShardSearchContextId; import org.opensearch.search.internal.ShardSearchRequest; import org.opensearch.search.profile.NetworkTime; @@ -54,7 +53,6 @@ import java.io.IOException; -import static java.util.Collections.emptyList; import static org.opensearch.common.lucene.Lucene.readTopDocs; import static org.opensearch.common.lucene.Lucene.writeTopDocs; @@ -361,10 +359,6 @@ public void readFromWithId(ShardSearchContextId id, StreamInput in) throws IOExc if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.referencing(InternalAggregations.readFrom(in)); } - if (in.getVersion().before(LegacyESVersion.V_7_2_0)) { - // The list of PipelineAggregators is sent by old versions. We don't need it anyway. - in.readNamedWriteableList(PipelineAggregator.class); - } } else { if (hasAggs = in.readBoolean()) { aggregations = DelayableWriteable.delayed(InternalAggregations::readFrom, in); @@ -410,35 +404,11 @@ public void writeToNoId(StreamOutput out) throws IOException { writeTopDocs(out, topDocsAndMaxScore); if (aggregations == null) { out.writeBoolean(false); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(emptyList()); - } } else { out.writeBoolean(true); if (out.getVersion().before(LegacyESVersion.V_7_7_0)) { InternalAggregations aggs = aggregations.expand(); aggs.writeTo(out); - if (out.getVersion().before(LegacyESVersion.V_7_2_0)) { - /* - * Earlier versions expect sibling pipeline aggs separately - * as they used to be set to QuerySearchResult directly, while - * later versions expect them in InternalAggregations. Note - * that despite serializing sibling pipeline aggs as part of - * InternalAggregations is supported since 6.7.0, the shards - * set sibling pipeline aggs to InternalAggregations only from - * 7.1 on. - */ - out.writeNamedWriteableList(aggs.getTopLevelPipelineAggregators()); - } } else { aggregations.writeTo(out); } diff --git a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java index 21e8a5646b9a5..1fd94eaddb2dd 100644 --- a/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java +++ b/server/src/main/java/org/opensearch/search/slice/SliceBuilder.java @@ -35,7 +35,6 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.routing.GroupShardsIterator; @@ -260,16 +259,7 @@ public Query toFilter(ClusterService clusterService, ShardSearchRequest request, String field = this.field; boolean useTermQuery = false; if ("_uid".equals(field)) { - // on new indices, the _id acts as a _uid - field = IdFieldMapper.NAME; - if (context.getIndexSettings().getIndexVersionCreated().onOrAfter(LegacyESVersion.V_7_0_0)) { - throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); - } - DEPRECATION_LOG.deprecate( - "slice_on_uid", - "Computing slices on the [_uid] field is deprecated for 6.x indices, use [_id] instead" - ); - useTermQuery = true; + throw new IllegalArgumentException("Computing slices on the [_uid] field is illegal for 7.x indices, use [_id] instead"); } else if (IdFieldMapper.NAME.equals(field)) { useTermQuery = true; } else if (type.hasDocValues() == false) { diff --git a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java index b4a93ec9869e6..395ebaf2523e7 100644 --- a/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java +++ b/server/src/main/java/org/opensearch/search/sort/FieldSortBuilder.java @@ -38,7 +38,6 @@ import org.apache.lucene.index.PointValues; import org.apache.lucene.index.Terms; import org.apache.lucene.search.SortField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -161,9 +160,7 @@ public FieldSortBuilder(StreamInput in) throws IOException { sortMode = in.readOptionalWriteable(SortMode::readFromStream); unmappedType = in.readOptionalString(); nestedSort = in.readOptionalWriteable(NestedSortBuilder::new); - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - numericType = in.readOptionalString(); - } + numericType = in.readOptionalString(); } @Override @@ -176,9 +173,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(sortMode); out.writeOptionalString(unmappedType); out.writeOptionalWriteable(nestedSort); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - out.writeOptionalString(numericType); - } + out.writeOptionalString(numericType); } /** Returns the document field this sort should be based on. */ diff --git a/server/src/main/java/org/opensearch/search/suggest/Suggest.java b/server/src/main/java/org/opensearch/search/suggest/Suggest.java index 90cc382ee4126..0aa881e2a3c9e 100644 --- a/server/src/main/java/org/opensearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/opensearch/search/suggest/Suggest.java @@ -33,7 +33,6 @@ import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.SetOnce; -import org.opensearch.LegacyESVersion; import org.opensearch.common.CheckedFunction; import org.opensearch.common.ParseField; import org.opensearch.common.ParsingException; @@ -53,8 +52,6 @@ import org.opensearch.search.suggest.Suggest.Suggestion.Entry; import org.opensearch.search.suggest.Suggest.Suggestion.Entry.Option; import org.opensearch.search.suggest.completion.CompletionSuggestion; -import org.opensearch.search.suggest.phrase.PhraseSuggestion; -import org.opensearch.search.suggest.term.TermSuggestion; import java.io.IOException; import java.util.ArrayList; @@ -101,36 +98,11 @@ public Suggest(List>> suggestions) } public Suggest(StreamInput in) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (in.getVersion().before(LegacyESVersion.V_7_0_0)) { - final int size = in.readVInt(); - suggestions = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - Suggestion> suggestion; - final int type = in.readVInt(); - switch (type) { - case TermSuggestion.TYPE: - suggestion = new TermSuggestion(in); - break; - case CompletionSuggestion.TYPE: - suggestion = new CompletionSuggestion(in); - break; - case PhraseSuggestion.TYPE: - suggestion = new PhraseSuggestion(in); - break; - default: - throw new IllegalArgumentException("Unknown suggestion type with ordinal " + type); - } - suggestions.add(suggestion); - } - } else { - int suggestionCount = in.readVInt(); - suggestions = new ArrayList<>(suggestionCount); - for (int i = 0; i < suggestionCount; i++) { - suggestions.add(in.readNamedWriteable(Suggestion.class)); - } + int suggestionCount = in.readVInt(); + suggestions = new ArrayList<>(suggestionCount); + for (int i = 0; i < suggestionCount; i++) { + suggestions.add(in.readNamedWriteable(Suggestion.class)); } - hasScoreDocs = filter(CompletionSuggestion.class).stream().anyMatch(CompletionSuggestion::hasScoreDocs); } @@ -169,18 +141,9 @@ public boolean hasScoreDocs() { @Override public void writeTo(StreamOutput out) throws IOException { - // in older versions, Suggestion types were serialized as Streamable - if (out.getVersion().before(LegacyESVersion.V_7_0_0)) { - out.writeVInt(suggestions.size()); - for (Suggestion command : suggestions) { - out.writeVInt(command.getWriteableType()); - command.writeTo(out); - } - } else { - out.writeVInt(suggestions.size()); - for (Suggestion> suggestion : suggestions) { - out.writeNamedWriteable(suggestion); - } + out.writeVInt(suggestions.size()); + for (Suggestion> suggestion : suggestions) { + out.writeNamedWriteable(suggestion); } } @@ -284,13 +247,6 @@ public Suggestion(String name, int size) { public Suggestion(StreamInput in) throws IOException { name = in.readString(); size = in.readVInt(); - - // this is a hack to work around slightly different serialization order of earlier versions of TermSuggestion - if (in.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion t = (TermSuggestion) this; - t.setSort(SortBy.readFromStream(in)); - } - int entriesCount = in.readVInt(); entries.clear(); for (int i = 0; i < entriesCount; i++) { @@ -398,13 +354,6 @@ public void trim() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeVInt(size); - - // this is a hack to work around slightly different serialization order in older versions of TermSuggestion - if (out.getVersion().before(LegacyESVersion.V_7_0_0) && this instanceof TermSuggestion) { - TermSuggestion termSuggestion = (TermSuggestion) this; - termSuggestion.getSort().writeTo(out); - } - out.writeVInt(entries.size()); for (Entry entry : entries) { entry.writeTo(out); diff --git a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java index e3b809dc57b83..bf9598a3110ad 100644 --- a/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java +++ b/server/src/main/java/org/opensearch/search/suggest/completion/context/GeoContextMapping.java @@ -37,7 +37,6 @@ import org.apache.lucene.document.StringField; import org.apache.lucene.index.DocValuesType; import org.apache.lucene.index.IndexableField; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.geo.GeoPoint; @@ -312,37 +311,14 @@ public void validateReferences(Version indexVersionCreated, Function sortComparator() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - sort.writeTo(out); - } + sort.writeTo(out); } @Override diff --git a/server/src/main/java/org/opensearch/snapshots/RestoreService.java b/server/src/main/java/org/opensearch/snapshots/RestoreService.java index 417498467622a..60c01d0b04639 100644 --- a/server/src/main/java/org/opensearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/opensearch/snapshots/RestoreService.java @@ -84,7 +84,9 @@ import org.opensearch.common.settings.ClusterSettings; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; +import org.opensearch.index.IndexModule; import org.opensearch.index.IndexSettings; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; @@ -391,16 +393,6 @@ public void restoreSnapshot(final RestoreSnapshotRequest request, final ActionLi @Override public ClusterState execute(ClusterState currentState) { RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE, RestoreInProgress.EMPTY); - if (currentState.getNodes().getMinNodeVersion().before(LegacyESVersion.V_7_0_0)) { - // Check if another restore process is already running - cannot run two restore processes at the - // same time in versions prior to 7.0 - if (restoreInProgress.isEmpty() == false) { - throw new ConcurrentSnapshotExecutionException( - snapshot, - "Restore process is already running in this cluster" - ); - } - } // Check if the snapshot to restore is currently being deleted SnapshotDeletionsInProgress deletionsInProgress = currentState.custom( SnapshotDeletionsInProgress.TYPE, @@ -431,20 +423,30 @@ public ClusterState execute(ClusterState currentState) { .getMaxNodeVersion() .minimumIndexCompatibilityVersion(); for (Map.Entry indexEntry : indices.entrySet()) { + String renamedIndexName = indexEntry.getKey(); String index = indexEntry.getValue(); boolean partial = checkPartial(index); - SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( + + IndexMetadata snapshotIndexMetadata = updateIndexSettings( + metadata.index(index), + request.indexSettings(), + request.ignoreIndexSettings() + ); + final boolean isSearchableSnapshot = FeatureFlags.isEnabled(FeatureFlags.SEARCHABLE_SNAPSHOT) + && IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey().equals(request.storageType().toString()); + if (isSearchableSnapshot) { + snapshotIndexMetadata = addSnapshotToIndexSettings( + snapshotIndexMetadata, + snapshot, + repositoryData.resolveIndexId(index) + ); + } + final SnapshotRecoverySource recoverySource = new SnapshotRecoverySource( restoreUUID, snapshot, snapshotInfo.version(), - repositoryData.resolveIndexId(index) - ); - String renamedIndexName = indexEntry.getKey(); - IndexMetadata snapshotIndexMetadata = metadata.index(index); - snapshotIndexMetadata = updateIndexSettings( - snapshotIndexMetadata, - request.indexSettings(), - request.ignoreIndexSettings() + repositoryData.resolveIndexId(index), + isSearchableSnapshot ); try { snapshotIndexMetadata = metadataIndexUpgradeService.upgradeIndexMetadata( @@ -1217,4 +1219,16 @@ public void applyClusterState(ClusterChangedEvent event) { logger.warn("Failed to update restore state ", t); } } + + private static IndexMetadata addSnapshotToIndexSettings(IndexMetadata metadata, Snapshot snapshot, IndexId indexId) { + final Settings newSettings = Settings.builder() + .put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), IndexModule.Type.REMOTE_SNAPSHOT.getSettingsKey()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_REPOSITORY.getKey(), snapshot.getRepository()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_UUID.getKey(), snapshot.getSnapshotId().getUUID()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_ID_NAME.getKey(), snapshot.getSnapshotId().getName()) + .put(IndexSettings.SEARCHABLE_SNAPSHOT_INDEX_ID.getKey(), indexId.getId()) + .put(metadata.getSettings()) + .build(); + return IndexMetadata.builder(metadata).settings(newSettings).build(); + } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java index d7ebba721a52c..38d9df0e960e0 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotInfo.java @@ -73,7 +73,6 @@ public final class SnapshotInfo implements Comparable, ToXContent, public static final String CONTEXT_MODE_PARAM = "context_mode"; public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT"; - public static final Version METADATA_FIELD_INTRODUCED = LegacyESVersion.V_7_3_0; private static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("strict_date_optional_time"); private static final String SNAPSHOT = "snapshot"; private static final String UUID = "uuid"; @@ -401,11 +400,7 @@ public SnapshotInfo(final StreamInput in) throws IOException { shardFailures = Collections.unmodifiableList(in.readList(SnapshotShardFailure::new)); version = in.readBoolean() ? Version.readVersion(in) : null; includeGlobalState = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - userMetadata = in.readMap(); - } else { - userMetadata = null; - } + userMetadata = in.readMap(); if (in.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { dataStreams = in.readStringList(); } else { @@ -840,11 +835,9 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeBoolean(false); } out.writeOptionalBoolean(includeGlobalState); - if (out.getVersion().onOrAfter(METADATA_FIELD_INTRODUCED)) { - out.writeMap(userMetadata); - if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { - out.writeStringCollection(dataStreams); - } + out.writeMap(userMetadata); + if (out.getVersion().onOrAfter(DATA_STREAMS_IN_SNAPSHOT)) { + out.writeStringCollection(dataStreams); } } diff --git a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java index 4f672c9813d64..e53c2889f88e6 100644 --- a/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/opensearch/snapshots/SnapshotsService.java @@ -90,7 +90,6 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.index.Index; import org.opensearch.index.shard.ShardId; import org.opensearch.repositories.IndexId; @@ -142,12 +141,6 @@ */ public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateApplier { - /** - * Minimum node version which does not use {@link Repository#initializeSnapshot(SnapshotId, List, Metadata)} to write snapshot metadata - * when starting a snapshot. - */ - public static final Version NO_REPO_INITIALIZE_VERSION = LegacyESVersion.V_7_5_0; - public static final Version FULL_CONCURRENCY_VERSION = LegacyESVersion.V_7_9_0; public static final Version CLONE_SNAPSHOT_VERSION = LegacyESVersion.V_7_10_0; @@ -156,7 +149,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final Version INDEX_GEN_IN_REPO_DATA_VERSION = LegacyESVersion.V_7_9_0; - public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.V_7_5_0; + public static final Version OLD_SNAPSHOT_FORMAT = LegacyESVersion.fromId(7050099); public static final Version MULTI_DELETE_VERSION = LegacyESVersion.V_7_8_0; @@ -244,144 +237,6 @@ public SnapshotsService( } } - /** - * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of - * the snapshot. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot completion listener - */ - public void executeSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - createSnapshotLegacy( - request, - ActionListener.wrap(snapshot -> addListener(snapshot, ActionListener.map(listener, Tuple::v2)), listener::onFailure) - ); - } - - /** - * Initializes the snapshotting process. - *

    - * This method is used by clients to start snapshot. It makes sure that there is no snapshots are currently running and - * creates a snapshot record in cluster state metadata. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param request snapshot request - * @param listener snapshot creation listener - */ - public void createSnapshotLegacy(final CreateSnapshotRequest request, final ActionListener listener) { - final String repositoryName = request.repository(); - final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); - validate(repositoryName, snapshotName); - final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot - Repository repository = repositoriesService.repository(request.repository()); - final Map userMeta = repository.adaptUserMetadata(request.userMetadata()); - clusterService.submitStateUpdateTask("create_snapshot [" + snapshotName + ']', new ClusterStateUpdateTask() { - - private List indices; - - private SnapshotsInProgress.Entry newEntry; - - @Override - public ClusterState execute(ClusterState currentState) { - validate(repositoryName, snapshotName, currentState); - SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE); - if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a snapshot deletion is in-progress in [" + deletionsInProgress + "]" - ); - } - final RepositoryCleanupInProgress repositoryCleanupInProgress = currentState.custom(RepositoryCleanupInProgress.TYPE); - if (repositoryCleanupInProgress != null && repositoryCleanupInProgress.hasCleanupInProgress()) { - throw new ConcurrentSnapshotExecutionException( - repositoryName, - snapshotName, - "cannot snapshot while a repository cleanup is in-progress in [" + repositoryCleanupInProgress + "]" - ); - } - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - // Fail if there are any concurrently running snapshots. The only exception to this being a snapshot in INIT state from a - // previous cluster-manager that we can simply ignore and remove from the cluster state because we would clean it up from - // the cluster state anyway in #applyClusterState. - if (snapshots != null - && snapshots.entries() - .stream() - .anyMatch( - entry -> (entry.state() == State.INIT && initializingSnapshots.contains(entry.snapshot()) == false) == false - )) { - throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, " a snapshot is already running"); - } - // Store newSnapshot here to be processed in clusterStateProcessed - indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request)); - - final List dataStreams = indexNameExpressionResolver.dataStreamNames( - currentState, - request.indicesOptions(), - request.indices() - ); - - logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices); - newEntry = new SnapshotsInProgress.Entry( - new Snapshot(repositoryName, snapshotId), - request.includeGlobalState(), - request.partial(), - State.INIT, - Collections.emptyList(), // We'll resolve the list of indices when moving to the STARTED state in #beginSnapshot - dataStreams, - threadPool.absoluteTimeInMillis(), - RepositoryData.UNKNOWN_REPO_GEN, - ImmutableOpenMap.of(), - userMeta, - Version.CURRENT - ); - initializingSnapshots.add(newEntry.snapshot()); - snapshots = SnapshotsInProgress.of(Collections.singletonList(newEntry)); - return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn(() -> new ParameterizedMessage("[{}][{}] failed to create snapshot", repositoryName, snapshotName), e); - if (newEntry != null) { - initializingSnapshots.remove(newEntry.snapshot()); - } - newEntry = null; - listener.onFailure(e); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, final ClusterState newState) { - if (newEntry != null) { - final Snapshot current = newEntry.snapshot(); - assert initializingSnapshots.contains(current); - assert indices != null; - beginSnapshot(newState, newEntry, request.partial(), indices, repository, new ActionListener() { - @Override - public void onResponse(final Snapshot snapshot) { - initializingSnapshots.remove(snapshot); - listener.onResponse(snapshot); - } - - @Override - public void onFailure(final Exception e) { - initializingSnapshots.remove(current); - listener.onFailure(e); - } - }); - } - } - - @Override - public TimeValue timeout() { - return request.clusterManagerNodeTimeout(); - } - }); - } - /** * Same as {@link #createSnapshot(CreateSnapshotRequest, ActionListener)} but invokes its callback on completion of * the snapshot. @@ -946,227 +801,6 @@ private static void validate(final String repositoryName, final String snapshotN } } - /** - * Starts snapshot. - *

    - * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed. - * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards - * compatible path for initializing the snapshot in the repository is executed. - * - * @param clusterState cluster state - * @param snapshot snapshot meta data - * @param partial allow partial snapshots - * @param userCreateSnapshotListener listener - */ - private void beginSnapshot( - final ClusterState clusterState, - final SnapshotsInProgress.Entry snapshot, - final boolean partial, - final List indices, - final Repository repository, - final ActionListener userCreateSnapshotListener - ) { - threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() { - - boolean hadAbortedInitializations; - - @Override - protected void doRun() { - assert initializingSnapshots.contains(snapshot.snapshot()); - if (repository.isReadOnly()) { - throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository"); - } - final String snapshotName = snapshot.snapshot().getSnapshotId().getName(); - final StepListener repositoryDataListener = new StepListener<>(); - repository.getRepositoryData(repositoryDataListener); - repositoryDataListener.whenComplete(repositoryData -> { - // check if the snapshot name already exists in the repository - if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { - throw new InvalidSnapshotNameException( - repository.getMetadata().name(), - snapshotName, - "snapshot with the same name already exists" - ); - } - if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) { - // In mixed version clusters we initialize the snapshot in the repository so that in case of a cluster-manager - // failover to an - // older version cluster-manager node snapshot finalization (that assumes initializeSnapshot was called) produces a - // valid - // snapshot. - repository.initializeSnapshot( - snapshot.snapshot().getSnapshotId(), - snapshot.indices(), - metadataForSnapshot(snapshot, clusterState.metadata()) - ); - } - - logger.info("snapshot [{}] started", snapshot.snapshot()); - final Version version = minCompatibleVersion(clusterState.nodes().getMinNodeVersion(), repositoryData, null); - if (indices.isEmpty()) { - // No indices in this snapshot - we are done - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - endSnapshot( - SnapshotsInProgress.startedEntry( - snapshot.snapshot(), - snapshot.includeGlobalState(), - snapshot.partial(), - Collections.emptyList(), - Collections.emptyList(), - threadPool.absoluteTimeInMillis(), - repositoryData.getGenId(), - ImmutableOpenMap.of(), - snapshot.userMetadata(), - version - ), - clusterState.metadata(), - repositoryData - ); - return; - } - clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() { - - @Override - public ClusterState execute(ClusterState currentState) { - SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE); - List entries = new ArrayList<>(); - for (SnapshotsInProgress.Entry entry : snapshots.entries()) { - if (entry.snapshot().equals(snapshot.snapshot()) == false) { - entries.add(entry); - continue; - } - - if (entry.state() == State.ABORTED) { - entries.add(entry); - assert entry.shards().isEmpty(); - hadAbortedInitializations = true; - } else { - final List indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap()); - // Replace the snapshot that was just initialized - ImmutableOpenMap shards = shards( - snapshots, - currentState.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY), - currentState.metadata(), - currentState.routingTable(), - indexIds, - useShardGenerations(version), - repositoryData, - entry.repository() - ); - if (!partial) { - Tuple, Set> indicesWithMissingShards = indicesWithMissingShards( - shards, - currentState.metadata() - ); - Set missing = indicesWithMissingShards.v1(); - Set closed = indicesWithMissingShards.v2(); - if (missing.isEmpty() == false || closed.isEmpty() == false) { - final StringBuilder failureMessage = new StringBuilder(); - if (missing.isEmpty() == false) { - failureMessage.append("Indices don't have primary shards "); - failureMessage.append(missing); - } - if (closed.isEmpty() == false) { - if (failureMessage.length() > 0) { - failureMessage.append("; "); - } - failureMessage.append("Indices are closed "); - failureMessage.append(closed); - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.FAILED, - indexIds, - repositoryData.getGenId(), - shards, - version, - failureMessage.toString() - ) - ); - continue; - } - } - entries.add( - new SnapshotsInProgress.Entry( - entry, - State.STARTED, - indexIds, - repositoryData.getGenId(), - shards, - version, - null - ) - ); - } - } - return ClusterState.builder(currentState) - .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))) - .build(); - } - - @Override - public void onFailure(String source, Exception e) { - logger.warn( - () -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), - e - ); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - - @Override - public void onNoLongerClusterManager(String source) { - // We are not longer a cluster-manager - we shouldn't try to do any cleanup - // The new cluster-manager will take care of it - logger.warn( - "[{}] failed to create snapshot - no longer a cluster-manager", - snapshot.snapshot().getSnapshotId() - ); - userCreateSnapshotListener.onFailure( - new SnapshotException(snapshot.snapshot(), "cluster-manager changed during snapshot initialization") - ); - } - - @Override - public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted - // for processing. If client wants to wait for the snapshot completion, it can register snapshot - // completion listener in this method. For the snapshot completion to work properly, the snapshot - // should still exist when listener is registered. - userCreateSnapshotListener.onResponse(snapshot.snapshot()); - - if (hadAbortedInitializations) { - final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE); - assert snapshotsInProgress != null; - final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot()); - assert entry != null; - endSnapshot(entry, newState.metadata(), repositoryData); - } else { - endCompletedSnapshots(newState); - } - } - }); - }, this::onFailure); - } - - @Override - public void onFailure(Exception e) { - logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e); - removeFailedSnapshotFromClusterState( - snapshot.snapshot(), - e, - null, - new CleanupAfterErrorListener(userCreateSnapshotListener, e) - ); - } - }); - } - private static class CleanupAfterErrorListener { private final ActionListener userCreateSnapshotListener; diff --git a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java index ac88eae624813..92c0d482a848f 100644 --- a/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/opensearch/transport/RemoteConnectionInfo.java @@ -36,20 +36,14 @@ import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; import org.opensearch.common.io.stream.Writeable; -import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.ToXContentFragment; import org.opensearch.common.xcontent.XContentBuilder; import java.io.IOException; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.Arrays; import java.util.List; import java.util.Objects; -import java.util.stream.Collectors; - -import static java.util.Collections.emptyList; /** * This class encapsulates all remote cluster information to be rendered on @@ -79,26 +73,7 @@ public RemoteConnectionInfo(StreamInput input) throws IOException { clusterAlias = input.readString(); skipUnavailable = input.readBoolean(); } else { - List seedNodes; - if (input.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - seedNodes = Arrays.asList(input.readStringArray()); - } else { - // versions prior to 7.0.0 sent the resolved transport address of the seed nodes - final List transportAddresses = input.readList(TransportAddress::new); - seedNodes = transportAddresses.stream() - .map(a -> a.address().getHostString() + ":" + a.address().getPort()) - .collect(Collectors.toList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * We just throw any HTTP addresses received here on the floor - * because we don't need to do anything with them. - */ - input.readList(TransportAddress::new); - } - + List seedNodes = Arrays.asList(input.readStringArray()); int connectionsPerCluster = input.readVInt(); initialConnectionTimeout = input.readTimeValue(); int numNodesConnected = input.readVInt(); @@ -137,52 +112,12 @@ public void writeTo(StreamOutput out) throws IOException { } else { if (modeInfo.modeType() == RemoteConnectionStrategy.ConnectionStrategy.SNIFF) { SniffConnectionStrategy.SniffModeInfo sniffInfo = (SniffConnectionStrategy.SniffModeInfo) this.modeInfo; - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(sniffInfo.seedNodes.stream().map(s -> { - final String host = RemoteConnectionStrategy.parseHost(s); - final int port = RemoteConnectionStrategy.parsePort(s); - try { - return new TransportAddress(InetAddress.getByAddress(host, TransportAddress.META_ADDRESS.getAddress()), port); - } catch (final UnknownHostException e) { - throw new AssertionError(e); - } - }).collect(Collectors.toList())); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); out.writeVInt(sniffInfo.maxConnectionsPerCluster); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(sniffInfo.numNodesConnected); } else { - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_0_0)) { - out.writeStringArray(new String[0]); - } else { - // versions prior to 7.0.0 received the resolved transport address of the seed nodes - out.writeList(emptyList()); - /* - * Versions before 7.0 sent the HTTP addresses of all nodes in the - * remote cluster here but it was expensive to fetch and we - * ultimately figured out how to do without it. So we removed it. - * - * When sending this request to a node that expects HTTP addresses - * here we pretend that we didn't find any. This *should* be fine - * because, after all, we haven't been using this information for - * a while. - */ - out.writeList(emptyList()); - } + out.writeStringArray(new String[0]); out.writeVInt(0); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(0); diff --git a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy index bcf0b704374c9..60b704dc12f0c 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/test-framework.policy @@ -106,6 +106,11 @@ grant codeBase "${codebase.opensearch-rest-client}" { permission java.net.NetPermission "getProxySelector"; }; +grant codeBase "${codebase.httpcore5}" { + // httpcore makes socket connections for rest tests + permission java.net.SocketPermission "*", "connect"; +}; + grant codeBase "${codebase.httpcore-nio}" { // httpcore makes socket connections for rest tests permission java.net.SocketPermission "*", "connect"; @@ -118,6 +123,7 @@ grant codeBase "${codebase.httpasyncclient}" { permission java.net.NetPermission "getProxySelector"; }; + grant codeBase "${codebase.junit-rt.jar}" { // allows IntelliJ IDEA JUnit test runner to control number of test iterations permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; diff --git a/server/src/test/java/org/opensearch/BuildTests.java b/server/src/test/java/org/opensearch/BuildTests.java index eeb6890699fdc..6e6a91419b762 100644 --- a/server/src/test/java/org/opensearch/BuildTests.java +++ b/server/src/test/java/org/opensearch/BuildTests.java @@ -299,26 +299,17 @@ public void testSerializationBWC() throws IOException { new Build(Build.Type.DOCKER, randomAlphaOfLength(6), randomAlphaOfLength(6), randomBoolean(), randomAlphaOfLength(6), "other") ); - final List versions = Version.getDeclaredVersions(LegacyESVersion.class); - final Version post70Version = randomFrom( - versions.stream().filter(v -> v.onOrAfter(LegacyESVersion.V_7_0_0)).collect(Collectors.toList()) - ); + final List versions = Version.getDeclaredVersions(Version.class); + final Version post10OpenSearchVersion = randomFrom( versions.stream().filter(v -> v.onOrAfter(Version.V_1_0_0)).collect(Collectors.toList()) ); - - final WriteableBuild post70 = copyWriteable(dockerBuild, writableRegistry(), WriteableBuild::new, post70Version); final WriteableBuild post10OpenSearch = copyWriteable( dockerBuild, writableRegistry(), WriteableBuild::new, post10OpenSearchVersion ); - - assertThat(post70.build.type(), equalTo(dockerBuild.build.type())); - - assertThat(post70.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); - assertThat(post70.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); assertThat(post10OpenSearch.build.getQualifiedVersion(), equalTo(dockerBuild.build.getQualifiedVersion())); assertThat(post10OpenSearch.build.getDistribution(), equalTo(dockerBuild.build.getDistribution())); } diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index ff2bb77531486..0ca4cdd780f94 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -393,7 +393,7 @@ public void testSearchContextMissingException() throws IOException { public void testCircuitBreakingException() throws IOException { CircuitBreakingException ex = serialize( new CircuitBreakingException("Too large", 0, 100, CircuitBreaker.Durability.TRANSIENT), - LegacyESVersion.V_7_0_0 + Version.V_2_0_0 ); assertEquals("Too large", ex.getMessage()); assertEquals(100, ex.getByteLimit()); diff --git a/server/src/test/java/org/opensearch/VersionTests.java b/server/src/test/java/org/opensearch/VersionTests.java index 5b3213ded1c02..70bcf343e4c1e 100644 --- a/server/src/test/java/org/opensearch/VersionTests.java +++ b/server/src/test/java/org/opensearch/VersionTests.java @@ -415,7 +415,7 @@ public static void assertUnknownVersion(Version version) { public void testIsCompatible() { assertTrue(isCompatible(Version.CURRENT, Version.CURRENT.minimumCompatibilityVersion())); - assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.V_7_0_0)); + assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(7000099))); assertFalse(isCompatible(Version.fromId(2000099), LegacyESVersion.fromId(6050099))); int currentMajorID = Version.computeID(Version.CURRENT.major, 0, 0, 99); diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java new file mode 100644 index 0000000000000..1a95b77cc1024 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateRequestTests.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteDecommissionStateRequestTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + final DeleteDecommissionStateRequest originalRequest = new DeleteDecommissionStateRequest(); + + final DeleteDecommissionStateRequest cloneRequest; + try (BytesStreamOutput out = new BytesStreamOutput()) { + originalRequest.writeTo(out); + try (StreamInput in = out.bytes().streamInput()) { + cloneRequest = new DeleteDecommissionStateRequest(in); + } + } + assertEquals(cloneRequest.clusterManagerNodeTimeout(), originalRequest.clusterManagerNodeTimeout()); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java new file mode 100644 index 0000000000000..085eda3e9d0e7 --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/decommission/awareness/DeleteDecommissionStateResponseTests.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.decommission.awareness; + +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; +import org.opensearch.test.OpenSearchTestCase; + +import java.io.IOException; + +public class DeleteDecommissionStateResponseTests extends OpenSearchTestCase { + + public void testSerialization() throws IOException { + final DeleteDecommissionStateResponse originalResponse = new DeleteDecommissionStateResponse(true); + + final DeleteDecommissionStateResponse deserialized = copyWriteable( + originalResponse, + writableRegistry(), + DeleteDecommissionStateResponse::new + ); + assertEquals(deserialized, originalResponse); + + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java index 5b2b4f361083b..ffd3a66ad1d48 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/CancellableTasksTests.java @@ -40,7 +40,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.replication.ClusterStateCreationUtils; import org.opensearch.cluster.node.DiscoveryNode; @@ -55,6 +54,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.tasks.TaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; import java.util.ArrayList; @@ -78,7 +78,7 @@ public class CancellableTasksTests extends TaskManagerTestCase { - public static class CancellableNodeRequest extends BaseNodeRequest { + public static class CancellableNodeRequest extends TransportRequest { protected String requestName; public CancellableNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java index 5d947a743385f..d49dd14492327 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/ResourceAwareTasksTests.java @@ -16,7 +16,6 @@ import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.opensearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.opensearch.action.support.ActionTestUtils; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.SuppressForbidden; @@ -32,6 +31,7 @@ import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.test.tasks.MockTaskManagerListener; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -56,7 +56,7 @@ public class ResourceAwareTasksTests extends TaskManagerTestCase { private static final ThreadMXBean threadMXBean = (ThreadMXBean) ManagementFactory.getThreadMXBean(); - public static class ResourceAwareNodeRequest extends BaseNodeRequest { + public static class ResourceAwareNodeRequest extends TransportRequest { protected String requestName; public ResourceAwareNodeRequest() { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 68cf69e30f8a6..8b0c2187d05af 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -37,7 +37,6 @@ import org.opensearch.action.admin.cluster.node.tasks.cancel.TransportCancelTasksAction; import org.opensearch.action.admin.cluster.node.tasks.list.TransportListTasksAction; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -66,6 +65,7 @@ import org.opensearch.threadpool.RunnableTaskExecutionListener; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; import org.junit.After; @@ -156,8 +156,8 @@ public int failureCount() { /** * Simulates node-based task that can be used to block node tasks so they are guaranteed to be registered by task manager */ - abstract class AbstractTestNodesAction, NodeRequest extends BaseNodeRequest> extends - TransportNodesAction { + abstract class AbstractTestNodesAction, NodeRequest extends TransportRequest> + extends TransportNodesAction { AbstractTestNodesAction( String actionName, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 3bb1957c69fb4..aa0e9511f86ce 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -41,7 +41,6 @@ import org.opensearch.action.ActionType; import org.opensearch.action.TaskOperationFailure; import org.opensearch.action.support.ActionFilters; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodeResponse; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.nodes.BaseNodesResponse; @@ -182,7 +181,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected final String requestName; protected final boolean shouldBlock; diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java index 7590bf88eeca0..97a045872477d 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/node/tasks/TransportTasksActionTests.java @@ -44,7 +44,6 @@ import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; -import org.opensearch.action.support.nodes.BaseNodeRequest; import org.opensearch.action.support.nodes.BaseNodesRequest; import org.opensearch.action.support.tasks.BaseTasksRequest; import org.opensearch.action.support.tasks.BaseTasksResponse; @@ -67,6 +66,7 @@ import org.opensearch.tasks.TaskInfo; import org.opensearch.test.tasks.MockTaskManager; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import java.io.IOException; @@ -91,7 +91,7 @@ public class TransportTasksActionTests extends TaskManagerTestCase { - public static class NodeRequest extends BaseNodeRequest { + public static class NodeRequest extends TransportRequest { protected String requestName; public NodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java index 7127e0001592f..672e5ace8b5ae 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.admin.cluster.state; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.support.IndicesOptions; import org.opensearch.common.io.stream.BytesStreamOutput; @@ -64,14 +63,12 @@ public void testSerialization() throws Exception { Version.CURRENT.minimumCompatibilityVersion(), Version.CURRENT ); - // TODO: change version to V_6_6_0 after backporting: - if (testVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - if (randomBoolean()) { - clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); - } - if (randomBoolean()) { - clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); - } + + if (randomBoolean()) { + clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); + } + if (randomBoolean()) { + clusterStateRequest.waitForTimeout(new TimeValue(randomNonNegativeLong())); } BytesStreamOutput output = new BytesStreamOutput(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java index 4a90a23cbd2f0..07246f144d95b 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -82,11 +82,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { assertEquals(request.indicesOptions(), indicesOptions); } - if (in.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); - } else { - assertEquals(0, in.available()); - } + assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); } } } @@ -100,9 +96,7 @@ public void testBwcSerialization() throws Exception { out.writeTimeValue(sample.timeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - sample.waitForActiveShards().writeTo(out); - } + sample.waitForActiveShards().writeTo(out); final CloseIndexRequest deserializedRequest; try (StreamInput in = out.bytes().streamInput()) { @@ -119,11 +113,7 @@ public void testBwcSerialization() throws Exception { if (out.getVersion().onOrAfter(LegacyESVersion.V_7_7_0) || sample.indicesOptions().expandWildcardsHidden()) { assertEquals(sample.indicesOptions(), deserializedRequest.indicesOptions()); } - if (out.getVersion().onOrAfter(LegacyESVersion.V_7_2_0)) { - assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); - } else { - assertEquals(ActiveShardCount.NONE, deserializedRequest.waitForActiveShards()); - } + assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); } } } diff --git a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java index 19544af63944c..3ffa6d6910548 100644 --- a/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/opensearch/action/search/SearchRequestTests.java @@ -32,7 +32,6 @@ package org.opensearch.action.search; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.support.IndicesOptions; @@ -107,11 +106,7 @@ public void testRandomVersionSerialization() throws IOException { SearchRequest searchRequest = createSearchRequest(); Version version = VersionUtils.randomVersion(random()); SearchRequest deserializedRequest = copyWriteable(searchRequest, namedWriteableRegistry, SearchRequest::new, version); - if (version.before(LegacyESVersion.V_7_0_0)) { - assertTrue(deserializedRequest.isCcsMinimizeRoundtrips()); - } else { - assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); - } + assertEquals(searchRequest.isCcsMinimizeRoundtrips(), deserializedRequest.isCcsMinimizeRoundtrips()); assertEquals(searchRequest.getLocalClusterAlias(), deserializedRequest.getLocalClusterAlias()); assertEquals(searchRequest.getAbsoluteStartMillis(), deserializedRequest.getAbsoluteStartMillis()); assertEquals(searchRequest.isFinalReduce(), deserializedRequest.isFinalReduce()); diff --git a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java index 86657a98d9a1d..76142efc60b7d 100644 --- a/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/nodes/TransportNodesActionTests.java @@ -50,6 +50,7 @@ import org.opensearch.test.transport.CapturingTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportRequest; import org.opensearch.transport.TransportService; import org.junit.After; import org.junit.AfterClass; @@ -378,7 +379,7 @@ protected void writeNodesTo(StreamOutput out, List nodes) thro } } - private static class TestNodeRequest extends BaseNodeRequest { + private static class TestNodeRequest extends TransportRequest { TestNodeRequest() {} TestNodeRequest(StreamInput in) throws IOException { diff --git a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java index 089dfcaf65517..bf33d7a45f4fb 100644 --- a/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/opensearch/action/termvectors/TermVectorsUnitTests.java @@ -286,7 +286,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // write using older version which contains types ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setVersion(LegacyESVersion.V_7_2_0); + out.setVersion(LegacyESVersion.fromId(7000099)); request.writeTo(out); // First check the type on the stream was written as "_doc" by manually parsing the stream until the type @@ -302,7 +302,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // now read the stream as normal to check it is parsed correct if received from an older node opensearchInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); opensearchBuffer = new InputStreamStreamInput(opensearchInBuffer); - opensearchBuffer.setVersion(LegacyESVersion.V_7_2_0); + opensearchBuffer.setVersion(LegacyESVersion.fromId(7000099)); TermVectorsRequest req2 = new TermVectorsRequest(opensearchBuffer); assertThat(request.offsets(), equalTo(req2.offsets())); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java index d96c972bc6021..74c5d0fcccbed 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/CoordinatorTests.java @@ -87,6 +87,7 @@ import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_SETTING; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_WRITES; import static org.opensearch.cluster.coordination.Reconfigurator.CLUSTER_AUTO_SHRINK_VOTING_CONFIGURATION; +import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING; import static org.opensearch.discovery.PeerFinder.DISCOVERY_FIND_PEERS_INTERVAL_SETTING; import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; @@ -1780,6 +1781,48 @@ public void testImproveConfigurationPerformsVotingConfigExclusionStateCheck() { } } + public void testLocalNodeAlwaysCommissionedWithoutDecommissionedException() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + } + } + + public void testClusterStabilisesForPreviouslyDecommissionedNode() { + try (Cluster cluster = new Cluster(randomIntBetween(1, 5))) { + cluster.runRandomly(); + cluster.stabilise(); + for (ClusterNode node : cluster.clusterNodes) { + assertTrue(node.coordinator.localNodeCommissioned()); + } + final ClusterNode leader = cluster.getAnyLeader(); + + ClusterNode decommissionedNode = cluster.new ClusterNode( + nextNodeIndex.getAndIncrement(), true, leader.nodeSettings, () -> new StatusInfo(HEALTHY, "healthy-info") + ); + decommissionedNode.coordinator.onNodeCommissionStatusChange(false); + cluster.clusterNodes.add(decommissionedNode); + + assertFalse(decommissionedNode.coordinator.localNodeCommissioned()); + + cluster.stabilise( + // Interval is updated to decommissioned find peer interval + defaultMillis(DISCOVERY_FIND_PEERS_INTERVAL_DURING_DECOMMISSION_SETTING) + // One message delay to send a join + + DEFAULT_DELAY_VARIABILITY + // Commit a new cluster state with the new node(s). Might be split into multiple commits, and each might need a + // followup reconfiguration + + 3 * 2 * DEFAULT_CLUSTER_STATE_UPDATE_DELAY + ); + + // once cluster stabilises the node joins and would be commissioned + assertTrue(decommissionedNode.coordinator.localNodeCommissioned()); + } + } + private ClusterState buildNewClusterStateWithVotingConfigExclusion( ClusterState currentState, Set newVotingConfigExclusion diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java index a3c945cdbac3a..7b21042b2ed4a 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinHelperTests.java @@ -90,7 +90,8 @@ public void testJoinDeduplication() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> new StatusInfo(HEALTHY, "info") + () -> new StatusInfo(HEALTHY, "info"), + nodeCommissioned -> {} ); transportService.start(); @@ -230,7 +231,8 @@ private void assertJoinValidationRejectsMismatchedClusterUUID(String actionName, startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - null + null, + nodeCommissioned -> {} ); // registers request handler transportService.start(); transportService.acceptIncomingRequests(); @@ -284,7 +286,8 @@ public void testJoinFailureOnUnhealthyNodes() { startJoinRequest -> { throw new AssertionError(); }, Collections.emptyList(), (s, p, r) -> {}, - () -> nodeHealthServiceStatus.get() + () -> nodeHealthServiceStatus.get(), + nodeCommissioned -> {} ); transportService.start(); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java index 5aa582a5e73f6..66a3b00f2979d 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/JoinTaskExecutorTests.java @@ -111,18 +111,16 @@ public void testPreventJoinClusterWithUnsupportedNodeVersions() { final Version maxNodeVersion = nodes.getMaxNodeVersion(); final Version minNodeVersion = nodes.getMinNodeVersion(); - if (maxNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0)) { - final Version tooLow = LegacyESVersion.fromString("6.7.0"); - expectThrows(IllegalStateException.class, () -> { - if (randomBoolean()) { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); - } else { - JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); - } - }); - } + final Version tooLow = LegacyESVersion.fromString("6.7.0"); + expectThrows(IllegalStateException.class, () -> { + if (randomBoolean()) { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, nodes); + } else { + JoinTaskExecutor.ensureNodesCompatibility(tooLow, minNodeVersion, maxNodeVersion); + } + }); - if (minNodeVersion.onOrAfter(LegacyESVersion.V_7_0_0) && minNodeVersion.before(Version.V_3_0_0)) { + if (minNodeVersion.before(Version.V_3_0_0)) { Version oldMajor = minNodeVersion.minimumCompatibilityVersion(); expectThrows(IllegalStateException.class, () -> JoinTaskExecutor.ensureMajorVersionBarrier(oldMajor, minNodeVersion)); } @@ -263,6 +261,51 @@ public void testJoinClusterWithDifferentDecommission() { JoinTaskExecutor.ensureNodeCommissioned(discoveryNode, metadata); } + public void testJoinFailedForDecommissionedNode() throws Exception { + final AllocationService allocationService = mock(AllocationService.class); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + + final JoinTaskExecutor joinTaskExecutor = new JoinTaskExecutor(Settings.EMPTY, allocationService, logger, rerouteService, null); + + final DiscoveryNode clusterManagerNode = new DiscoveryNode(UUIDs.base64UUID(), buildNewFakeTransportAddress(), Version.CURRENT); + + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone1"); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + final ClusterState clusterManagerClusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes( + DiscoveryNodes.builder() + .add(clusterManagerNode) + .localNodeId(clusterManagerNode.getId()) + .clusterManagerNodeId(clusterManagerNode.getId()) + ) + .metadata(Metadata.builder().decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + + final DiscoveryNode decommissionedNode = new DiscoveryNode( + UUIDs.base64UUID(), + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + + String decommissionedNodeID = decommissionedNode.getId(); + + final ClusterStateTaskExecutor.ClusterTasksResult result = joinTaskExecutor.execute( + clusterManagerClusterState, + List.of(new JoinTaskExecutor.Task(decommissionedNode, "test")) + ); + assertThat(result.executionResults.entrySet(), hasSize(1)); + final ClusterStateTaskExecutor.TaskResult taskResult = result.executionResults.values().iterator().next(); + assertFalse(taskResult.isSuccess()); + assertTrue(taskResult.getFailure() instanceof NodeDecommissionedException); + assertFalse(result.resultingState.getNodes().nodeExists(decommissionedNodeID)); + } + public void testJoinClusterWithDecommissionFailed() { Settings.builder().build(); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-1"); diff --git a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java index c77baba5fe167..18a7b892a424c 100644 --- a/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/opensearch/cluster/coordination/NodeJoinTests.java @@ -39,6 +39,10 @@ import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfiguration; +import org.opensearch.cluster.decommission.DecommissionAttribute; +import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; +import org.opensearch.cluster.decommission.DecommissionStatus; +import org.opensearch.cluster.decommission.NodeDecommissionedException; import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodeRole; @@ -775,6 +779,60 @@ public void testJoinElectedLeaderWithDeprecatedMasterRole() { assertTrue(clusterStateHasNode(node1)); } + public void testJoinFailsWhenDecommissioned() { + DiscoveryNode node0 = newNode(0, true); + DiscoveryNode node1 = newNode(1, true); + long initialTerm = randomLongBetween(1, 10); + long initialVersion = randomLongBetween(1, 10); + setupFakeClusterManagerServiceAndCoordinator( + initialTerm, + initialStateWithDecommissionedAttribute( + initialState(node0, initialTerm, initialVersion, VotingConfiguration.of(node0)), + new DecommissionAttribute("zone", "zone1") + ), + () -> new StatusInfo(HEALTHY, "healthy-info") + ); + assertFalse(isLocalNodeElectedMaster()); + long newTerm = initialTerm + randomLongBetween(1, 10); + joinNodeAndRun(new JoinRequest(node0, newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertFalse(clusterStateHasNode(node1)); + joinNodeAndRun(new JoinRequest(node1, newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion)))); + assertTrue(isLocalNodeElectedMaster()); + assertTrue(clusterStateHasNode(node1)); + DiscoveryNode decommissionedNode = new DiscoveryNode( + "data_2", + 2 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone1"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long anotherTerm = newTerm + randomLongBetween(1, 10); + + assertThat( + expectThrows( + NodeDecommissionedException.class, + () -> joinNodeAndRun(new JoinRequest(decommissionedNode, anotherTerm, Optional.empty())) + ).getMessage(), + containsString("with current status of decommissioning") + ); + assertFalse(clusterStateHasNode(decommissionedNode)); + + DiscoveryNode node3 = new DiscoveryNode( + "data_3", + 3 + "", + buildNewFakeTransportAddress(), + Collections.singletonMap("zone", "zone2"), + DiscoveryNodeRole.BUILT_IN_ROLES, + Version.CURRENT + ); + long termForNode3 = anotherTerm + randomLongBetween(1, 10); + + joinNodeAndRun(new JoinRequest(node3, termForNode3, Optional.empty())); + assertTrue(clusterStateHasNode(node3)); + } + private boolean isLocalNodeElectedMaster() { return MasterServiceTests.discoveryState(clusterManagerService).nodes().isLocalNodeElectedMaster(); } @@ -782,4 +840,17 @@ private boolean isLocalNodeElectedMaster() { private boolean clusterStateHasNode(DiscoveryNode node) { return node.equals(MasterServiceTests.discoveryState(clusterManagerService).nodes().get(node.getId())); } + + private static ClusterState initialStateWithDecommissionedAttribute( + ClusterState clusterState, + DecommissionAttribute decommissionAttribute + ) { + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + DecommissionStatus.SUCCESSFUL + ); + return ClusterState.builder(clusterState) + .metadata(Metadata.builder(clusterState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + } } diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 840ce1634a68e..7dee51b7713f9 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -15,9 +15,9 @@ import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.ActionListener; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; -import org.opensearch.action.support.master.AcknowledgedResponse; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -217,9 +217,9 @@ public void testClearClusterDecommissionState() throws InterruptedException { .metadata(Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build()) .build(); - ActionListener listener = new ActionListener() { + ActionListener listener = new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse decommissionResponse) { + public void onResponse(DeleteDecommissionStateResponse decommissionResponse) { DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); assertNull(metadata); countDownLatch.countDown(); @@ -268,9 +268,9 @@ public void testDeleteDecommissionAttributeClearVotingExclusion() { public void testClusterUpdateTaskForDeletingDecommission() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); - ActionListener listener = new ActionListener<>() { + ActionListener listener = new ActionListener<>() { @Override - public void onResponse(AcknowledgedResponse response) { + public void onResponse(DeleteDecommissionStateResponse response) { assertTrue(response.isAcknowledged()); assertNull(clusterService.state().metadata().decommissionAttributeMetadata()); countDownLatch.countDown(); diff --git a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java index fb01a493ff7c3..72b22e0efc09b 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/MetadataIndexStateServiceTests.java @@ -32,7 +32,6 @@ package org.opensearch.cluster.metadata; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest; import org.opensearch.action.admin.indices.close.CloseIndexResponse; @@ -44,9 +43,6 @@ import org.opensearch.cluster.SnapshotsInProgress; import org.opensearch.cluster.block.ClusterBlock; import org.opensearch.cluster.block.ClusterBlocks; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.node.DiscoveryNodeRole; -import org.opensearch.cluster.node.DiscoveryNodes; import org.opensearch.cluster.routing.IndexRoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.RoutingTable; @@ -97,7 +93,6 @@ import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -181,91 +176,6 @@ public void testCloseRoutingTableWithSnapshottedIndex() { assertThat(updatedState.blocks().hasIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID), is(true)); } - public void testCloseRoutingTableRemovesRoutingTable() { - final Set nonBlockedIndices = new HashSet<>(); - final Map blockedIndices = new HashMap<>(); - final Map results = new HashMap<>(); - final ClusterBlock closingBlock = MetadataIndexStateService.createIndexClosingBlock(); - - ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build(); - for (int i = 0; i < randomIntBetween(1, 25); i++) { - final String indexName = "index-" + i; - - if (randomBoolean()) { - state = addOpenedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state); - nonBlockedIndices.add(state.metadata().index(indexName).getIndex()); - } else { - state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock); - final Index index = state.metadata().index(indexName).getIndex(); - blockedIndices.put(index, closingBlock); - if (randomBoolean()) { - results.put(index, new CloseIndexResponse.IndexResult(index)); - } else { - results.put(index, new CloseIndexResponse.IndexResult(index, new Exception("test"))); - } - } - } - - state = ClusterState.builder(state) - .nodes( - DiscoveryNodes.builder(state.nodes()) - .add( - new DiscoveryNode( - "old_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_0_0 - ) - ) - .add( - new DiscoveryNode( - "new_node", - buildNewFakeTransportAddress(), - emptyMap(), - new HashSet<>(DiscoveryNodeRole.BUILT_IN_ROLES), - LegacyESVersion.V_7_2_0 - ) - ) - ) - .build(); - - state = MetadataIndexStateService.closeRoutingTable(state, blockedIndices, results).v1(); - assertThat(state.metadata().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size())); - - for (Index nonBlockedIndex : nonBlockedIndices) { - assertIsOpened(nonBlockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false)); - } - for (Index blockedIndex : blockedIndices.keySet()) { - if (results.get(blockedIndex).hasFailures() == false) { - IndexMetadata indexMetadata = state.metadata().index(blockedIndex); - assertThat(indexMetadata.getState(), is(IndexMetadata.State.CLOSE)); - Settings indexSettings = indexMetadata.getSettings(); - assertThat(indexSettings.hasValue(MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false)); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), MetadataIndexStateService.INDEX_CLOSED_BLOCK), is(true)); - assertThat( - "Index must have only 1 block with [id=" + MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]", - state.blocks() - .indices() - .getOrDefault(blockedIndex.getName(), emptySet()) - .stream() - .filter(clusterBlock -> clusterBlock.id() == MetadataIndexStateService.INDEX_CLOSED_BLOCK_ID) - .count(), - equalTo(1L) - ); - assertThat( - "Index routing table should have been removed when closing the index on mixed cluster version", - state.routingTable().index(blockedIndex), - nullValue() - ); - } else { - assertIsOpened(blockedIndex.getName(), state); - assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), closingBlock), is(true)); - } - } - } - public void testAddIndexClosedBlocks() { final ClusterState initialState = ClusterState.builder(new ClusterName("testAddIndexClosedBlocks")).build(); { diff --git a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java index 70a64fc60bdb4..8c30a8ff19c89 100644 --- a/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/opensearch/cluster/node/DiscoveryNodeTests.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Setting; import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; +import org.opensearch.test.NodeRoles; import org.opensearch.test.OpenSearchTestCase; import java.net.InetAddress; @@ -204,4 +205,10 @@ public void testGetRoleFromRoleNameIsCaseInsensitive() { assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleName()); assertEquals(dynamicRoleName.toLowerCase(Locale.ROOT), dynamicNodeRole.roleNameAbbreviation()); } + + public void testDiscoveryNodeIsSearchNode() { + final Settings settingWithSearchRole = NodeRoles.onlyRole(DiscoveryNodeRole.SEARCH_ROLE); + final DiscoveryNode node = DiscoveryNode.createLocal(settingWithSearchRole, buildNewFakeTransportAddress(), "node"); + assertThat(node.isSearchNode(), equalTo(true)); + } } diff --git a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java index 363484777fe66..5184ca7fe887d 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/allocation/DiskThresholdSettingsTests.java @@ -60,7 +60,6 @@ public void testDefaults() { assertTrue(diskThresholdSettings.includeRelocations()); assertEquals(zeroBytes, diskThresholdSettings.getFreeBytesThresholdFloodStage()); assertEquals(5.0D, diskThresholdSettings.getFreeDiskThresholdFloodStage(), 0.0D); - assertTrue(diskThresholdSettings.isAutoReleaseIndexEnabled()); } public void testUpdate() { diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java index 6848cd2bbc773..a1b6cd763476a 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsTests.java @@ -32,7 +32,6 @@ package org.opensearch.common.settings; -import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchParseException; import org.opensearch.Version; import org.opensearch.common.Strings; @@ -634,7 +633,7 @@ public void testMissingValue() throws Exception { public void testReadWriteArray() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); - output.setVersion(randomFrom(Version.CURRENT, LegacyESVersion.V_7_0_0)); + output.setVersion(randomFrom(Version.CURRENT, Version.V_2_0_0)); Settings settings = Settings.builder().putList("foo.bar", "0", "1", "2", "3").put("foo.bar.baz", "baz").build(); Settings.writeSettingsToStream(settings, output); StreamInput in = StreamInput.wrap(BytesReference.toBytes(output.bytes())); diff --git a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java index 5e7dede0309c6..7e7bb2f0a2911 100644 --- a/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java +++ b/server/src/test/java/org/opensearch/discovery/PeerFinderTests.java @@ -807,6 +807,42 @@ public void testReconnectsToDisconnectedNodes() { assertFoundPeers(rebootedOtherNode); } + public void testConnectionAttemptDuringDecommissioning() { + boolean localNodeCommissioned = randomBoolean(); + peerFinder.onNodeCommissionStatusChange(localNodeCommissioned); + + long findPeersInterval = peerFinder.getFindPeersInterval().millis(); + + final DiscoveryNode otherNode = newDiscoveryNode("node-1"); + providedAddresses.add(otherNode.getAddress()); + transportAddressConnector.addReachableNode(otherNode); + + peerFinder.activate(lastAcceptedNodes); + runAllRunnableTasks(); + assertFoundPeers(otherNode); + + transportAddressConnector.reachableNodes.clear(); + final DiscoveryNode newNode = new DiscoveryNode("new-node", otherNode.getAddress(), Version.CURRENT); + transportAddressConnector.addReachableNode(newNode); + + connectedNodes.remove(otherNode); + disconnectedNodes.add(otherNode); + + // peer discovery will be delayed now + if (localNodeCommissioned == false) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + assertPeersNotDiscovered(newNode); + } + + final long expectedTime = CONNECTION_TIMEOUT_MILLIS + findPeersInterval; + while (deterministicTaskQueue.getCurrentTimeMillis() < expectedTime) { + deterministicTaskQueue.advanceTime(); + runAllRunnableTasks(); + } + assertFoundPeers(newNode); + } + private void respondToRequests(Function responseFactory) { final CapturedRequest[] capturedRequests = capturingTransport.getCapturedRequestsAndClear(); for (final CapturedRequest capturedRequest : capturedRequests) { @@ -828,6 +864,16 @@ private void assertFoundPeers(DiscoveryNode... expectedNodesArray) { assertNotifiedOfAllUpdates(); } + private void assertPeersNotDiscovered(DiscoveryNode... undiscoveredNodesArray) { + final Set undiscoveredNodes = Arrays.stream(undiscoveredNodesArray).collect(Collectors.toSet()); + final List actualNodesList = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false) + .collect(Collectors.toList()); + final HashSet actualNodesSet = new HashSet<>(actualNodesList); + Set intersection = new HashSet<>(actualNodesSet); + intersection.retainAll(undiscoveredNodes); + assertEquals(intersection.size(), 0); + } + private void assertNotifiedOfAllUpdates() { final Stream actualNodes = StreamSupport.stream(peerFinder.getFoundPeers().spliterator(), false); final Stream notifiedNodes = StreamSupport.stream(foundPeersFromNotification.spliterator(), false); diff --git a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java index f588767d5336d..f8f512e5aefc6 100644 --- a/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java +++ b/server/src/test/java/org/opensearch/index/query/GeoShapeQueryBuilderGeoShapeTests.java @@ -31,7 +31,6 @@ package org.opensearch.index.query; -import org.opensearch.LegacyESVersion; import org.opensearch.common.geo.ShapeRelation; import org.opensearch.common.geo.builders.ShapeBuilder; import org.opensearch.test.geo.RandomShapeGenerator; @@ -73,21 +72,12 @@ protected GeoShapeQueryBuilder doCreateTestQueryBuilder(boolean indexedShape) { } } if (randomBoolean()) { - QueryShardContext context = createShardContext(); - if (context.indexVersionCreated().onOrAfter(LegacyESVersion.V_7_5_0)) { // CONTAINS is only supported from version 7.5 - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); - } else { - builder.relation( - randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) - ); - } + if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { + builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.CONTAINS)); } else { - if (shapeType == RandomShapeGenerator.ShapeType.LINESTRING || shapeType == RandomShapeGenerator.ShapeType.MULTILINESTRING) { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS)); - } else { - builder.relation(randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN)); - } + builder.relation( + randomFrom(ShapeRelation.DISJOINT, ShapeRelation.INTERSECTS, ShapeRelation.WITHIN, ShapeRelation.CONTAINS) + ); } } diff --git a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java index 0f451fda7b9fb..22c10844028a9 100644 --- a/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/RangeQueryBuilderTests.java @@ -94,7 +94,7 @@ protected RangeQueryBuilder doCreateTestQueryBuilder() { if (createShardContext().getMapperService().fieldType(DATE_FIELD_NAME) != null) { if (randomBoolean()) { // drawing a truly random zoneId here can rarely fail under the following conditons: - // - index versionCreated before V_7_0_0 + // - index versionCreated before legacy V_7_0_0 // - no "forced" date parser through a format parameter // - one of the SystemV* time zones that Jodas DateTimeZone parser doesn't know about // thats why we exlude it here (see #58431) diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index 27c0437236f63..470eeea771f2b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -327,7 +327,8 @@ public void testShardStateMetaHashCodeEquals() { ShardStateMetadata meta = new ShardStateMetadata( randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), - allocationId + allocationId, + randomFrom(ShardStateMetadata.IndexDataLocation.values()) ); assertEquals(meta, new ShardStateMetadata(meta.primary, meta.indexUUID, meta.allocationId)); @@ -339,7 +340,12 @@ public void testShardStateMetaHashCodeEquals() { Set hashCodes = new HashSet<>(); for (int i = 0; i < 30; i++) { // just a sanity check that we impl hashcode allocationId = randomBoolean() ? null : randomAllocationId(); - meta = new ShardStateMetadata(randomBoolean(), randomRealisticUnicodeOfCodepointLengthBetween(1, 10), allocationId); + meta = new ShardStateMetadata( + randomBoolean(), + randomRealisticUnicodeOfCodepointLengthBetween(1, 10), + allocationId, + randomFrom(ShardStateMetadata.IndexDataLocation.values()) + ); hashCodes.add(meta.hashCode()); } assertTrue("more than one unique hashcode expected but got: " + hashCodes.size(), hashCodes.size() > 1); diff --git a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java index beda468b45fb0..25ec7c7987855 100644 --- a/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java +++ b/server/src/test/java/org/opensearch/index/shard/ShardPathTests.java @@ -35,6 +35,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.env.Environment; import org.opensearch.env.NodeEnvironment; +import org.opensearch.gateway.WriteStateException; import org.opensearch.index.Index; import org.opensearch.test.OpenSearchTestCase; @@ -50,7 +51,7 @@ public void testLoadShardPath() throws IOException { ShardId shardId = new ShardId("foo", "0xDEADBEEF", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, "0xDEADBEEF", AllocationId.newInitializing()), path); + writeShardStateMetadata("0xDEADBEEF", path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, ""); assertEquals(path, shardPath.getDataPath()); assertEquals("0xDEADBEEF", shardPath.getShardId().getIndex().getUUID()); @@ -66,7 +67,7 @@ public void testFailLoadShardPathOnMultiState() throws IOException { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); assumeTrue("This test tests multi data.path but we only got one", paths.length > 1); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing()), paths); + writeShardStateMetadata(indexUUID, paths); Exception e = expectThrows(IllegalStateException.class, () -> ShardPath.loadShardPath(logger, env, shardId, "")); assertThat(e.getMessage(), containsString("more than one shard state found")); } @@ -77,7 +78,7 @@ public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException { ShardId shardId = new ShardId("foo", "foobar", 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, "0xDEADBEEF", AllocationId.newInitializing()), path); + writeShardStateMetadata("0xDEADBEEF", path); Exception e = expectThrows(IllegalStateException.class, () -> ShardPath.loadShardPath(logger, env, shardId, "")); assertThat(e.getMessage(), containsString("expected: foobar on shard path")); } @@ -121,7 +122,7 @@ public void testGetRootPaths() throws IOException { ShardId shardId = new ShardId("foo", indexUUID, 0); Path[] paths = env.availableShardPaths(shardId); Path path = randomFrom(paths); - ShardStateMetadata.FORMAT.writeAndCleanup(new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing()), path); + writeShardStateMetadata(indexUUID, path); ShardPath shardPath = ShardPath.loadShardPath(logger, env, shardId, customDataPath); boolean found = false; for (Path p : env.nodeDataPaths()) { @@ -148,4 +149,10 @@ public void testGetRootPaths() throws IOException { } } + private static void writeShardStateMetadata(String indexUUID, Path... paths) throws WriteStateException { + ShardStateMetadata.FORMAT.writeAndCleanup( + new ShardStateMetadata(true, indexUUID, AllocationId.newInitializing(), ShardStateMetadata.IndexDataLocation.LOCAL), + paths + ); + } } diff --git a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java index eb666f1206c26..1d7b749433c65 100644 --- a/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java +++ b/server/src/test/java/org/opensearch/index/similarity/SimilarityServiceTests.java @@ -37,7 +37,7 @@ import org.apache.lucene.search.TermStatistics; import org.apache.lucene.search.similarities.BooleanSimilarity; import org.apache.lucene.search.similarities.Similarity; -import org.opensearch.LegacyESVersion; +import org.opensearch.Version; import org.opensearch.common.settings.Settings; import org.opensearch.index.IndexSettings; import org.opensearch.test.OpenSearchTestCase; @@ -97,7 +97,7 @@ public float score(float freq, long norm) { }; IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, negativeScoresSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, negativeScoresSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarities should not return negative scores")); @@ -122,7 +122,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, decreasingScoresWithFreqSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, decreasingScoresWithFreqSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not decrease when term frequency increases")); @@ -147,7 +147,7 @@ public float score(float freq, long norm) { }; e = expectThrows( IllegalArgumentException.class, - () -> SimilarityService.validateSimilarity(LegacyESVersion.V_7_0_0, increasingScoresWithNormSim) + () -> SimilarityService.validateSimilarity(Version.V_2_0_0, increasingScoresWithNormSim) ); assertThat(e.getMessage(), Matchers.containsString("Similarity scores should not increase when norm increases")); } diff --git a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java index cf8d6677b4227..ce40de0e9aa71 100644 --- a/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/store/FsDirectoryFactoryTests.java @@ -57,6 +57,8 @@ import java.util.Arrays; import java.util.Locale; +import static org.opensearch.test.store.MockFSDirectoryFactory.FILE_SYSTEM_BASED_STORE_TYPES; + public class FsDirectoryFactoryTests extends OpenSearchTestCase { public void testPreload() throws IOException { @@ -170,7 +172,7 @@ public void testStoreDirectory() throws IOException { // default doTestStoreDirectory(tempDir, null, IndexModule.Type.FS); // explicit directory impls - for (IndexModule.Type type : IndexModule.Type.values()) { + for (IndexModule.Type type : FILE_SYSTEM_BASED_STORE_TYPES) { doTestStoreDirectory(tempDir, type.name().toLowerCase(Locale.ROOT), type); } } diff --git a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java index ec7ab06ac86a6..9c8ad3917c23f 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesModuleTests.java @@ -32,7 +32,6 @@ package org.opensearch.indices; -import org.opensearch.Version; import org.opensearch.index.mapper.DocCountFieldMapper; import org.opensearch.index.mapper.DataStreamFieldMapper; import org.opensearch.index.mapper.FieldNamesFieldMapper; @@ -51,7 +50,6 @@ import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.plugins.MapperPlugin; import org.opensearch.test.OpenSearchTestCase; -import org.opensearch.test.VersionUtils; import java.util.ArrayList; import java.util.Arrays; @@ -105,11 +103,9 @@ public Map getMetadataMappers() { public void testBuiltinMappers() { IndicesModule module = new IndicesModule(Collections.emptyList()); { - Version version = VersionUtils.randomVersionBetween(random(), Version.V_2_0_0, Version.CURRENT); assertFalse(module.getMapperRegistry().getMapperParsers().isEmpty()); - assertFalse(module.getMapperRegistry().getMetadataMapperParsers(version).isEmpty()); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(version); + assertFalse(module.getMapperRegistry().getMetadataMapperParsers().isEmpty()); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); assertEquals(EXPECTED_METADATA_FIELDS.length, metadataMapperParsers.size()); int i = 0; for (String field : metadataMapperParsers.keySet()) { @@ -117,12 +113,7 @@ public void testBuiltinMappers() { } } { - Version version = VersionUtils.randomVersionBetween( - random(), - Version.V_1_0_0, - VersionUtils.getPreviousVersion(Version.V_2_0_0) - ); - assertEquals(EXPECTED_METADATA_FIELDS.length - 1, module.getMapperRegistry().getMetadataMapperParsers(version).size()); + assertEquals(EXPECTED_METADATA_FIELDS.length, module.getMapperRegistry().getMetadataMapperParsers().size()); } } @@ -132,11 +123,10 @@ public void testBuiltinWithPlugins() { MapperRegistry registry = module.getMapperRegistry(); assertThat(registry.getMapperParsers().size(), greaterThan(noPluginsModule.getMapperRegistry().getMapperParsers().size())); assertThat( - registry.getMetadataMapperParsers(Version.CURRENT).size(), - greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers(Version.CURRENT).size()) + registry.getMetadataMapperParsers().size(), + greaterThan(noPluginsModule.getMapperRegistry().getMetadataMapperParsers().size()) ); - Map metadataMapperParsers = module.getMapperRegistry() - .getMetadataMapperParsers(Version.CURRENT); + Map metadataMapperParsers = module.getMapperRegistry().getMetadataMapperParsers(); Iterator iterator = metadataMapperParsers.keySet().iterator(); assertEquals(IgnoredFieldMapper.NAME, iterator.next()); String last = null; @@ -213,15 +203,13 @@ public Map getMetadataMappers() { public void testFieldNamesIsLast() { IndicesModule module = new IndicesModule(Collections.emptyList()); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } public void testFieldNamesIsLastWithPlugins() { IndicesModule module = new IndicesModule(fakePlugins); - Version version = VersionUtils.randomCompatibleVersion(random(), Version.CURRENT); - List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers(version).keySet()); + List fieldNames = new ArrayList<>(module.getMapperRegistry().getMetadataMapperParsers().keySet()); assertEquals(FieldNamesFieldMapper.NAME, fieldNames.get(fieldNames.size() - 1)); } diff --git a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java index e481384c3d6f3..c39af60650657 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesServiceTests.java @@ -66,7 +66,6 @@ import org.opensearch.index.mapper.KeywordFieldMapper; import org.opensearch.index.mapper.Mapper; import org.opensearch.index.mapper.MapperService; -import org.opensearch.index.mapper.NestedPathFieldMapper; import org.opensearch.index.shard.IllegalIndexShardStateException; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardState; @@ -79,7 +78,6 @@ import org.opensearch.plugins.Plugin; import org.opensearch.test.OpenSearchSingleNodeTestCase; import org.opensearch.test.IndexSettingsModule; -import org.opensearch.test.VersionUtils; import org.opensearch.test.hamcrest.RegexMatcher; import java.io.IOException; @@ -565,16 +563,9 @@ public void testStatsByShardDoesNotDieFromExpectedExceptions() { public void testIsMetadataField() { IndicesService indicesService = getIndicesService(); - final Version randVersion = VersionUtils.randomIndexCompatibleVersion(random()); - assertFalse(indicesService.isMetadataField(randVersion, randomAlphaOfLengthBetween(10, 15))); + assertFalse(indicesService.isMetadataField(randomAlphaOfLengthBetween(10, 15))); for (String builtIn : IndicesModule.getBuiltInMetadataFields()) { - if (NestedPathFieldMapper.NAME.equals(builtIn) && randVersion.before(Version.V_2_0_0)) { - continue; // nested field mapper does not exist prior to 2.0 - } - assertTrue( - "Expected " + builtIn + " to be a metadata field for version " + randVersion, - indicesService.isMetadataField(randVersion, builtIn) - ); + assertTrue(indicesService.isMetadataField(builtIn)); } } diff --git a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java index 6a8999a205be2..da44643de98a5 100644 --- a/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/opensearch/repositories/RepositoriesServiceTests.java @@ -246,11 +246,6 @@ public void getRepositoryData(ActionListener listener) { listener.onResponse(null); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) { - - } - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java new file mode 100644 index 0000000000000..01f988efdf6eb --- /dev/null +++ b/server/src/test/java/org/opensearch/rest/action/admin/cluster/RestDeleteDecommissionStateActionTests.java @@ -0,0 +1,40 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.rest.action.admin.cluster; + +import org.junit.Before; +import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateRequest; +import org.opensearch.rest.RestHandler; +import org.opensearch.rest.RestRequest; +import org.opensearch.test.rest.RestActionTestCase; + +import java.util.List; + +public class RestDeleteDecommissionStateActionTests extends RestActionTestCase { + + private RestDeleteDecommissionStateAction action; + + @Before + public void setupAction() { + action = new RestDeleteDecommissionStateAction(); + controller().registerHandler(action); + } + + public void testRoutes() { + List routes = action.routes(); + RestHandler.Route route = routes.get(0); + assertEquals(route.getMethod(), RestRequest.Method.DELETE); + assertEquals("/_cluster/decommission/awareness", route.getPath()); + } + + public void testCreateRequest() { + DeleteDecommissionStateRequest request = action.createRequest(); + assertNotNull(request); + } +} diff --git a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java index 6b89eb92065b1..1cdc2f166224f 100644 --- a/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java +++ b/server/src/test/java/org/opensearch/search/query/QuerySearchResultTests.java @@ -35,16 +35,13 @@ import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.action.OriginalIndices; import org.opensearch.action.OriginalIndicesTests; import org.opensearch.action.search.SearchRequest; import org.opensearch.common.Strings; import org.opensearch.common.UUIDs; -import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; import org.opensearch.common.io.stream.NamedWriteableRegistry; -import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.lucene.search.TopDocsAndMaxScore; import org.opensearch.common.settings.Settings; import org.opensearch.index.shard.ShardId; @@ -52,7 +49,6 @@ import org.opensearch.search.SearchModule; import org.opensearch.search.SearchShardTarget; import org.opensearch.search.aggregations.Aggregations; -import org.opensearch.search.aggregations.InternalAggregations; import org.opensearch.search.aggregations.InternalAggregationsTests; import org.opensearch.search.internal.AliasFilter; import org.opensearch.search.internal.ShardSearchContextId; @@ -60,9 +56,6 @@ import org.opensearch.search.suggest.SuggestTests; import org.opensearch.test.OpenSearchTestCase; -import java.io.IOException; -import java.util.Base64; - import static java.util.Collections.emptyList; public class QuerySearchResultTests extends OpenSearchTestCase { @@ -127,32 +120,6 @@ public void testSerialization() throws Exception { assertEquals(querySearchResult.terminatedEarly(), deserialized.terminatedEarly()); } - public void testReadFromPre_7_1_0() throws IOException { - String message = "AAAAAAAAAGQAAAEAAAB/wAAAAAEBBnN0ZXJtcwVJblhNRgoDBVNhdWpvAAVrS3l3cwVHSVVZaAAFZXRUbEUFZGN0WVoABXhzYnVrAAEDAfoN" - + "A3JhdwUBAAJRAAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVkFhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hyd" - + "y0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2RMZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAA" - + "AAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZd3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXh" - + "DSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAAAEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NL" - + "U1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAApydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQ" - + "lFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksAClRJZHJlSkpVc1Y4AAAAAAAAA30DBnN0ZXJtcwVNdVVFRwoAAAEDAfoNA3JhdwUBAAdDAAAAAAAAA30AAApQVk" - + "FhaUxSdHh5TAAAAAAAAAN9AAAKTVRUeUxnd1hydy0AAAAAAAADfQAACnZRQXZ3cWp0SmwPAAAAAAAAA30AAApmYXNyUUhNVWZBCwAAAAAAAAN9AAAKT3FIQ2R" - + "MZ1JZUwUAAAAAAAADfQAACm9jT05aZmZ4ZmUmAAAAAAAAA30AAApvb0tJTkdvbHdzBnN0ZXJtcwVtRmlmZAoAAAEDAfoNA3JhdwUBAARXAAAAAAAAA30AAApZ" - + "d3BwQlpBZEhpMQAAAAAAAAN9AAAKREZ3UVpTSXhDSE4AAAAAAAADfQAAClVMZW1YZGtkSHUUAAAAAAAAA30AAApBUVdKVk1kTlF1BnN0ZXJtcwVxbkJGVgoAA" - + "AEDAfoNA3JhdwUBAAYJAAAAAAAAA30AAApBS2NLU1ZVS25EIQAAAAAAAAN9AAAKWGpCbXZBZmduRhsAAAAAAAADfQAACk54TkJEV3pLRmI7AAAAAAAAA30AAA" - + "pydkdaZnJycXhWSAAAAAAAAAN9AAAKSURVZ3JhQlFHSy4AAAAAAAADfQAACmJmZ0x5YlFlVksACm5rdExLUHp3cGgBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2t" - + "ldDH/A3JhdwEBCm1heF9idWNrZXQFbmFtZTEBB2J1Y2tldDH/A3JhdwEAAAIAAf////8AAAAAAAAAAAAAAAAAAAAAAAAAAAAA"; - byte[] bytes = Base64.getDecoder().decode(message); - try (NamedWriteableAwareStreamInput in = new NamedWriteableAwareStreamInput(StreamInput.wrap(bytes), namedWriteableRegistry)) { - in.setVersion(LegacyESVersion.V_7_0_0); - QuerySearchResult querySearchResult = new QuerySearchResult(in); - assertEquals(100, querySearchResult.getContextId().getId()); - assertTrue(querySearchResult.hasAggs()); - InternalAggregations aggs = querySearchResult.consumeAggs().expand(); - assertEquals(1, aggs.asList().size()); - // We deserialize and throw away top level pipeline aggs - } - } - public void testNullResponse() throws Exception { QuerySearchResult querySearchResult = QuerySearchResult.nullInstance(); QuerySearchResult deserialized = copyWriteable(querySearchResult, namedWriteableRegistry, QuerySearchResult::new, Version.CURRENT); diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 40e3a1dc0587d..d01db6376db42 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -40,11 +40,11 @@ dependencies { api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api "io.netty:netty-all:${versions.netty}" - api 'com.google.code.gson:gson:2.9.0' + api 'com.google.code.gson:gson:2.9.1' api "org.bouncycastle:bcpkix-jdk15on:${versions.bouncycastle}" api "com.fasterxml.jackson.jaxrs:jackson-jaxrs-json-provider:${versions.jackson}" api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'net.minidev:json-smart:2.4.8' api "org.mockito:mockito-core:${versions.mockito}" - api "com.google.protobuf:protobuf-java:3.21.2" + api "com.google.protobuf:protobuf-java:3.21.7" } diff --git a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java index 69710b8e5c848..eb5177bc0f39b 100644 --- a/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java +++ b/test/framework/src/main/java/org/opensearch/client/RestClientBuilderTestCase.java @@ -36,7 +36,8 @@ import java.util.Map; import joptsimple.internal.Strings; -import org.apache.http.Header; + +import org.apache.hc.core5.http.Header; import org.opensearch.test.OpenSearchTestCase; /** diff --git a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java index 5ed85fedc8cea..2a85fffa8699a 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/RestoreOnlyRepository.java @@ -55,7 +55,6 @@ import java.util.Collection; import java.util.Collections; -import java.util.List; import java.util.Map; import java.util.function.Consumer; import java.util.function.Function; @@ -116,9 +115,6 @@ public void getRepositoryData(ActionListener listener) { ); } - @Override - public void initializeSnapshot(SnapshotId snapshotId, List indices, Metadata metadata) {} - @Override public void finalizeSnapshot( ShardGenerations shardGenerations, diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java index d7b057e5479eb..adaf95ae67a8e 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/AbstractBlobContainerRetriesTestCase.java @@ -34,8 +34,6 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; -import org.apache.http.ConnectionClosedException; -import org.apache.http.HttpStatus; import org.opensearch.common.Nullable; import org.opensearch.common.SuppressForbidden; import org.opensearch.common.blobstore.BlobContainer; @@ -46,6 +44,8 @@ import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.concurrent.CountDown; import org.opensearch.test.OpenSearchTestCase; +import org.apache.hc.core5.http.ConnectionClosedException; +import org.apache.hc.core5.http.HttpStatus; import org.junit.After; import org.junit.Before; diff --git a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java index f082c7a45a207..28dbcf478eb86 100644 --- a/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/repositories/blobstore/OpenSearchMockAPIBasedRepositoryIntegTestCase.java @@ -35,7 +35,8 @@ import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpHandler; import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpStatus; + +import org.apache.hc.core5.http.HttpStatus; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; diff --git a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java index 1ab7785b17f5e..b20154fff9256 100644 --- a/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/OpenSearchIntegTestCase.java @@ -36,7 +36,8 @@ import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomNumbers; import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.search.Sort; import org.apache.lucene.search.TotalHits; import org.apache.lucene.tests.util.LuceneTestCase; @@ -2344,7 +2345,7 @@ protected static RestClient createRestClient( if (node.getInfo(HttpInfo.class) != null) { TransportAddress publishAddress = node.getInfo(HttpInfo.class).address().publishAddress(); InetSocketAddress address = publishAddress.address(); - hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); + hosts.add(new HttpHost(protocol, NetworkAddress.format(address.getAddress()), address.getPort())); } } RestClientBuilder builder = RestClient.builder(hosts.toArray(new HttpHost[hosts.size()])); diff --git a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java index f2b68b6fdaca0..348ea0a924b70 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/OpenSearchRestTestCase.java @@ -32,15 +32,19 @@ package org.opensearch.test.rest; -import org.apache.http.Header; -import org.apache.http.HttpHost; -import org.apache.http.HttpStatus; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.message.BasicHeader; -import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; -import org.apache.http.ssl.SSLContexts; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpPost; +import org.apache.hc.client5.http.classic.methods.HttpPut; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; +import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; +import org.apache.hc.client5.http.ssl.ClientTlsStrategyBuilder; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.HttpStatus; +import org.apache.hc.core5.http.io.entity.EntityUtils; +import org.apache.hc.core5.http.message.BasicHeader; +import org.apache.hc.core5.http.nio.ssl.TlsStrategy; +import org.apache.hc.core5.ssl.SSLContexts; +import org.apache.hc.core5.util.Timeout; import org.apache.lucene.util.SetOnce; import org.opensearch.LegacyESVersion; import org.opensearch.Version; @@ -136,7 +140,7 @@ public abstract class OpenSearchRestTestCase extends OpenSearchTestCase { * Convert the entity from a {@link Response} into a map of maps. */ public static Map entityAsMap(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -154,7 +158,7 @@ public static Map entityAsMap(Response response) throws IOExcept * Convert the entity from a {@link Response} into a list of maps. */ public static List entityAsList(Response response) throws IOException { - XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType xContentType = XContentType.fromMediaType(response.getEntity().getContentType()); // EMPTY and THROW are fine here because `.map` doesn't use named x content or deprecation try ( XContentParser parser = xContentType.xContent() @@ -344,7 +348,7 @@ public boolean warningsShouldFailRequest(List warnings) { * Construct an HttpHost from the given host and port */ protected HttpHost buildHttpHost(String host, int port) { - return new HttpHost(host, port, getProtocol()); + return new HttpHost(getProtocol(), host, port); } /** @@ -531,8 +535,8 @@ protected boolean waitForAllSnapshotsWiped() { private void wipeCluster() throws Exception { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping - if (nodeVersions.first().onOrAfter(LegacyESVersion.V_7_4_0) && nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced - // in version 7.4 + if (nodeVersions.first().before(Version.V_1_0_0)) { // SLM was introduced + // in version 7.4 if (preserveSLMPoliciesUponCompletion() == false) { // Clean up SLM policies before trying to wipe snapshots so that no new ones get started by SLM after wiping deleteAllSLMPolicies(); @@ -845,9 +849,16 @@ protected static void configureClient(RestClientBuilder builder, Settings settin try (InputStream is = Files.newInputStream(path)) { keyStore.load(is, keystorePass.toCharArray()); } - SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); - SSLIOSessionStrategy sessionStrategy = new SSLIOSessionStrategy(sslcontext); - builder.setHttpClientConfigCallback(httpClientBuilder -> httpClientBuilder.setSSLStrategy(sessionStrategy)); + final SSLContext sslcontext = SSLContexts.custom().loadTrustMaterial(keyStore, null).build(); + builder.setHttpClientConfigCallback(httpClientBuilder -> { + final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create().setSslContext(sslcontext).build(); + + final PoolingAsyncClientConnectionManager connectionManager = PoolingAsyncClientConnectionManagerBuilder.create() + .setTlsStrategy(tlsStrategy) + .build(); + + return httpClientBuilder.setConnectionManager(connectionManager); + }); } catch (KeyStoreException | NoSuchAlgorithmException | KeyManagementException | CertificateException e) { throw new RuntimeException("Error setting up ssl", e); } @@ -864,7 +875,9 @@ protected static void configureClient(RestClientBuilder builder, Settings settin socketTimeoutString == null ? "60s" : socketTimeoutString, CLIENT_SOCKET_TIMEOUT ); - builder.setRequestConfigCallback(conf -> conf.setSocketTimeout(Math.toIntExact(socketTimeout.getMillis()))); + builder.setRequestConfigCallback( + conf -> conf.setResponseTimeout(Timeout.ofMilliseconds(Math.toIntExact(socketTimeout.getMillis()))) + ); if (settings.hasValue(CLIENT_PATH_PREFIX)) { builder.setPathPrefix(settings.get(CLIENT_PATH_PREFIX)); } @@ -1082,7 +1095,7 @@ protected static Map getAsMap(final String endpoint) throws IOEx } protected static Map responseAsMap(Response response) throws IOException { - XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType().getValue()); + XContentType entityContentType = XContentType.fromMediaType(response.getEntity().getContentType()); Map responseEntity = XContentHelper.convertToMap( entityContentType.xContent(), response.getEntity().getContent(), diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java index cd5f1fe168b12..da71d0e078dc0 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlDocsTestClient.java @@ -32,8 +32,8 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.client.Request; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java index 56ccb91dc3331..13ede9d44f1ad 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestClient.java @@ -32,11 +32,13 @@ package org.opensearch.test.rest.yaml; import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.entity.ContentType; -import org.apache.http.util.EntityUtils; + +import org.apache.hc.client5.http.classic.methods.HttpGet; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.HttpHost; +import org.apache.hc.core5.http.ParseException; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.Version; @@ -188,16 +190,20 @@ public ClientYamlTestResponse callApi( if (false == restApi.isBodySupported()) { throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api"); } - String contentType = entity.getContentType().getValue(); + String contentType = entity.getContentType(); // randomly test the GET with source param instead of GET/POST with body - if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { - logger.debug("sending the request body as source param with GET method"); - queryStringParams.put("source", EntityUtils.toString(entity)); - queryStringParams.put("source_content_type", contentType); - requestMethod = HttpGet.METHOD_NAME; - entity = null; - } else { - requestMethod = RandomizedTest.randomFrom(supportedMethods); + try { + if (sendBodyAsSourceParam(supportedMethods, contentType, entity.getContentLength())) { + logger.debug("sending the request body as source param with GET method"); + queryStringParams.put("source", EntityUtils.toString(entity)); + queryStringParams.put("source_content_type", contentType); + requestMethod = HttpGet.METHOD_NAME; + entity = null; + } else { + requestMethod = RandomizedTest.randomFrom(supportedMethods); + } + } catch (final ParseException ex) { + throw new IOException(ex); } } else { if (restApi.isBodyRequired()) { diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java index 78818aefe44cc..780c43b6ccc11 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContext.java @@ -33,9 +33,9 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; -import org.apache.http.HttpEntity; -import org.apache.http.entity.ByteArrayEntity; -import org.apache.http.entity.ContentType; +import org.apache.hc.core5.http.ContentType; +import org.apache.hc.core5.http.HttpEntity; +import org.apache.hc.core5.http.io.entity.ByteArrayEntity; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java index 8fc0554e2b31e..1e441e01c5a69 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ClientYamlTestResponse.java @@ -31,9 +31,9 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.Header; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.util.EntityUtils; +import org.apache.hc.client5.http.classic.methods.HttpHead; +import org.apache.hc.core5.http.Header; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; import org.opensearch.common.Strings; import org.opensearch.common.bytes.BytesArray; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java index 473511825ef60..aa70f7883c4b8 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/ObjectPath.java @@ -31,7 +31,7 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.util.EntityUtils; +import org.apache.hc.core5.http.io.entity.EntityUtils; import org.opensearch.client.Response; import org.opensearch.common.bytes.BytesArray; import org.opensearch.common.bytes.BytesReference; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java index f228c87186afd..b5449480e38ff 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/OpenSearchClientYamlSuiteTestCase.java @@ -34,7 +34,8 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import com.carrotsearch.randomizedtesting.annotations.TimeoutSuite; -import org.apache.http.HttpHost; + +import org.apache.hc.core5.http.HttpHost; import org.apache.lucene.tests.util.TimeUnits; import org.opensearch.Version; import org.opensearch.client.Node; diff --git a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java index f71c67ce456bc..22f4fcc1fde3f 100644 --- a/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/opensearch/test/rest/yaml/section/DoSection.java @@ -34,7 +34,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.client.HasAttributeNodeSelector; import org.opensearch.client.Node; @@ -367,18 +366,6 @@ void checkWarningHeaders(final List warningHeaders, final Version cluste final boolean matches = matcher.matches(); if (matches) { final String message = HeaderWarning.extractWarningValueFromWarningHeader(header, true); - if (clusterManagerVersion.before(LegacyESVersion.V_7_0_0) - && message.equals( - "the default number of shards will change from [5] to [1] in 7.0.0; " - + "if you wish to continue using the default of [5] shards, " - + "you must manage this on the create index request or with an index template" - )) { - /* - * This warning header will come back in the vast majority of our tests that create an index when running against an - * older cluster-manager. Rather than rewrite our tests to assert this warning header, we assume that it is expected. - */ - continue; - } if (message.startsWith("[types removal]")) { // We skip warnings related to types deprecation because they are *everywhere*. continue; diff --git a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java index 47952af1cd06c..e38b62c419334 100644 --- a/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java +++ b/test/framework/src/main/java/org/opensearch/test/store/MockFSDirectoryFactory.java @@ -63,10 +63,15 @@ import java.io.PrintStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; +import java.util.List; import java.util.Random; import java.util.Set; +import java.util.stream.Collectors; public class MockFSDirectoryFactory implements IndexStorePlugin.DirectoryFactory { + public static final List FILE_SYSTEM_BASED_STORE_TYPES = Arrays.stream(IndexModule.Type.values()) + .filter(t -> (t == IndexModule.Type.REMOTE_SNAPSHOT) == false) + .collect(Collectors.toUnmodifiableList()); public static final Setting RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING = Setting.doubleSetting( "index.store.mock.random.io_exception_rate_on_open", @@ -168,7 +173,7 @@ private Directory randomDirectoryService(Random random, IndexSettings indexSetti .put(indexSettings.getIndexMetadata().getSettings()) .put( IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), - RandomPicks.randomFrom(random, IndexModule.Type.values()).getSettingsKey() + RandomPicks.randomFrom(random, FILE_SYSTEM_BASED_STORE_TYPES).getSettingsKey() ) ) .build(); diff --git a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java index d79e1730e16f6..9fb693efa9f8b 100644 --- a/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java +++ b/test/framework/src/main/java/org/opensearch/upgrades/AbstractFullClusterRestartTestCase.java @@ -32,7 +32,6 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; import org.opensearch.Version; import org.opensearch.common.Booleans; import org.opensearch.common.xcontent.support.XContentMapValues; @@ -52,10 +51,10 @@ public static boolean isRunningAgainstOldCluster() { /** * @return true if test is running against an old cluster before that last major, in this case - * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link LegacyESVersion#V_7_0_0} + * when System.getProperty("tests.is_old_cluster" == true) and oldClusterVersion is before {@link Version#V_2_0_0} */ protected final boolean isRunningAgainstAncientCluster() { - return isRunningAgainstOldCluster() && oldClusterVersion.before(LegacyESVersion.V_7_0_0); + return isRunningAgainstOldCluster() && oldClusterVersion.before(Version.V_2_0_0); } public static Version getOldClusterVersion() { diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java index 36186ea330021..1947982f19247 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/ClientYamlTestExecutionContextTests.java @@ -32,7 +32,7 @@ package org.opensearch.test.rest.yaml; -import org.apache.http.HttpEntity; +import org.apache.hc.core5.http.HttpEntity; import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.test.OpenSearchTestCase; diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java index 1fb08934c8b8b..eceb78a832710 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java @@ -32,7 +32,6 @@ package org.opensearch.test.rest.yaml.section; -import org.apache.http.HttpHost; import org.opensearch.Version; import org.opensearch.client.Node; import org.opensearch.client.NodeSelector; @@ -43,6 +42,7 @@ import org.opensearch.common.xcontent.yaml.YamlXContent; import org.opensearch.test.rest.yaml.ClientYamlTestExecutionContext; import org.opensearch.test.rest.yaml.ClientYamlTestResponse; +import org.apache.hc.core5.http.HttpHost; import org.hamcrest.MatcherAssert; import java.io.IOException;