diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8b63b291a8a54..3affbbd820774 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1 @@ -# This should match the owning team set up in https://github.com/orgs/opensearch-project/teams -* @opensearch-project/opensearch-core @reta - +* @reta @anasalkouz @andrross @reta @Bukhtawar @CEHENKLE @dblock @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @adnapibar @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @xuezhou25 diff --git a/.github/workflows/gradle-check.yml b/.github/workflows/gradle-check.yml index 5435da8419f5e..9567bcd63bc2e 100644 --- a/.github/workflows/gradle-check.yml +++ b/.github/workflows/gradle-check.yml @@ -8,8 +8,15 @@ on: pull_request_target: types: [opened, synchronize, reopened] +permissions: + contents: read # to fetch code (actions/checkout) + jobs: gradle-check: + permissions: + contents: read # to fetch code (actions/checkout) + pull-requests: write # to create or update comment (peter-evans/create-or-update-comment) + runs-on: ubuntu-latest timeout-minutes: 130 steps: diff --git a/.github/workflows/links.yml b/.github/workflows/links.yml index ca05aee8be378..ac94f5ef5ec5e 100644 --- a/.github/workflows/links.yml +++ b/.github/workflows/links.yml @@ -2,6 +2,8 @@ name: Link Checker on: schedule: - cron: '0 0 * * *' +permissions: + contents: read # to fetch code (actions/checkout) jobs: linkchecker: if: github.repository == 'opensearch-project/OpenSearch' diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml index 42c2d21d106ce..d1b5e90484ec4 100644 --- a/.github/workflows/version.yml +++ b/.github/workflows/version.yml @@ -5,6 +5,7 @@ on: tags: - '*.*.*' +permissions: {} jobs: build: runs-on: ubuntu-latest diff --git a/CHANGELOG.md b/CHANGELOG.md index 7fd16ff6af292..04f3fbeb4b068 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ## [Unreleased 3.0] ### Added +- Hardened token permissions in GitHub workflows ([#4587](https://github.com/opensearch-project/OpenSearch/pull/4587)) - Support for HTTP/2 (server-side) ([#3847](https://github.com/opensearch-project/OpenSearch/pull/3847)) - Add getter for path field in NestedQueryBuilder ([#4636](https://github.com/opensearch-project/OpenSearch/pull/4636)) - Apply reproducible builds configuration for OpenSearch plugins through gradle plugin ([#4746](https://github.com/opensearch-project/OpenSearch/pull/4746)) @@ -13,6 +14,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - [Test] Add IAE test for deprecated edgeNGram analyzer name ([#5040](https://github.com/opensearch-project/OpenSearch/pull/5040)) - Allow mmap to use new JDK-19 preview APIs in Apache Lucene 9.4+ ([#5151](https://github.com/opensearch-project/OpenSearch/pull/5151)) - Add feature flag for extensions ([#5211](https://github.com/opensearch-project/OpenSearch/pull/5211)) +- Added jackson dependency to server ([#5366] (https://github.com/opensearch-project/OpenSearch/pull/5366)) +- Added experimental extensions to main ([#5347](https://github.com/opensearch-project/OpenSearch/pull/5347)) ### Dependencies - Bumps `log4j-core` from 2.18.0 to 2.19.0 @@ -55,6 +58,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Migrate client transports to Apache HttpClient / Core 5.x ([#4459](https://github.com/opensearch-project/OpenSearch/pull/4459)) - Support remote translog transfer for request level durability([#4480](https://github.com/opensearch-project/OpenSearch/pull/4480)) - Changed http code on create index API with bad input raising NotXContentException from 500 to 400 ([#4773](https://github.com/opensearch-project/OpenSearch/pull/4773)) +- Change http code for DecommissioningFailedException from 500 to 400 ([#5283](https://github.com/opensearch-project/OpenSearch/pull/5283)) ### Deprecated @@ -77,12 +81,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), - Fix 'org.apache.hc.core5.http.ParseException: Invalid protocol version' under JDK 16+ ([#4827](https://github.com/opensearch-project/OpenSearch/pull/4827)) - Fixed compression support for h2c protocol ([#4944](https://github.com/opensearch-project/OpenSearch/pull/4944)) - Reject bulk requests with invalid actions ([#5299](https://github.com/opensearch-project/OpenSearch/issues/5299)) +- Support OpenSSL Provider with default Netty allocator ([#5460](https://github.com/opensearch-project/OpenSearch/pull/5460)) ### Security ## [Unreleased 2.x] ### Added - Prevent deletion of snapshots that are backing searchable snapshot indexes ([#5069](https://github.com/opensearch-project/OpenSearch/pull/5069)) +- Add max_shard_size parameter for shrink API ([#5229](https://github.com/opensearch-project/OpenSearch/pull/5229)) ### Dependencies - Bumps `bcpg-fips` from 1.0.5.1 to 1.0.7.1 @@ -95,6 +101,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), ### Deprecated ### Removed ### Fixed +- Fix 1.x compatibility bug with stored Tasks ([#5412](https://github.com/opensearch-project/OpenSearch/pull/5412)) +- Fix case sensitivity for wildcard queries ([#5462](https://github.com/opensearch-project/OpenSearch/pull/5462)) ### Security [Unreleased 3.0]: https://github.com/opensearch-project/OpenSearch/compare/2.4...HEAD diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 2f54656b2ab59..789e250e10d19 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,6 @@ | Maintainer | GitHub ID | Affiliation | | --------------- | --------- | ----------- | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross)| Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -22,8 +21,8 @@ | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon| | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | -| Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | -| Suraj Singh |[dreamer-89](https://github.com/dreamer-89) | Amazon | +| Shweta Thareja | [shwetathareja](https://github.com/shwetathareja) | Amazon | +| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | @@ -32,6 +31,7 @@ | Maintainer | GitHub ID | Affiliation | | --------------- | --------- | ----------- | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | | Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | [This document](https://github.com/opensearch-project/.github/blob/main/MAINTAINERS.md) explains what maintainers do in this repo, and how they should be doing it. If you're interested in contributing, see [CONTRIBUTING](CONTRIBUTING.md). diff --git a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java index 2a22c8d7d19e9..ebbd813c9fe15 100644 --- a/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java +++ b/client/rest-high-level/src/main/java/org/opensearch/client/indices/ResizeRequest.java @@ -39,6 +39,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Collections; @@ -58,6 +59,7 @@ public class ResizeRequest extends TimedRequest implements Validatable, ToXConte private final String targetIndex; private Settings settings = Settings.EMPTY; private Set aliases = new HashSet<>(); + private ByteSizeValue maxShardSize; /** * Creates a new resize request @@ -155,6 +157,24 @@ public ActiveShardCount getWaitForActiveShards() { return waitForActiveShards; } + /** + * Sets the maximum size of a primary shard in the new shrunken index. + * This parameter can be used to calculate the lowest factor of the source index's shards number + * which satisfies the maximum shard size requirement. + * + * @param maxShardSize the maximum size of a primary shard in the new shrunken index + */ + public void setMaxShardSize(ByteSizeValue maxShardSize) { + this.maxShardSize = maxShardSize; + } + + /** + * Returns the maximum size of a primary shard in the new shrunken index. + */ + public ByteSizeValue getMaxShardSize() { + return maxShardSize; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); diff --git a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java index 512cc058a64a7..7ed06129dc893 100644 --- a/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/opensearch/client/IndicesRequestConvertersTests.java @@ -79,6 +79,7 @@ import org.opensearch.common.xcontent.XContentType; import org.opensearch.test.OpenSearchTestCase; import org.junit.Assert; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Arrays; @@ -701,6 +702,8 @@ private void resizeTest(ResizeType resizeType, CheckedFunction buf) throws Exception { + SocketChannel ch = SocketUtils.accept(javaChannel()); + + try { + if (ch != null) { + buf.add(new Netty4NioSocketChannel(this, ch)); + return 1; + } + } catch (Throwable t) { + logger.warn("Failed to create a new channel from an accepted socket.", t); + + try { + ch.close(); + } catch (Throwable t2) { + logger.warn("Failed to close a socket.", t2); + } + } + + return 0; + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java index e25853d864813..f2f6538d305d9 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java @@ -39,7 +39,6 @@ import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ServerChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.Booleans; @@ -181,7 +180,7 @@ public static Class getServerChannelType() { if (ALLOCATOR instanceof NoDirectBuffers) { return CopyBytesServerSocketChannel.class; } else { - return NioServerSocketChannel.class; + return Netty4NioServerSocketChannel.class; } } diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 1766aa14ea9e9..8a7e48fc671ff 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -46,8 +46,6 @@ dependencies { api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } restResources { diff --git a/plugins/discovery-ec2/licenses/jackson-LICENSE b/plugins/discovery-ec2/licenses/jackson-LICENSE deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/discovery-ec2/licenses/jackson-NOTICE b/plugins/discovery-ec2/licenses/jackson-NOTICE deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/plugins/discovery-ec2/licenses/jackson-annotations-2.14.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-annotations-2.14.1.jar.sha1 deleted file mode 100644 index e43faef9e23ff..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-annotations-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a6ad504d591a7903ffdec76b5b7252819a2d162 \ No newline at end of file diff --git a/plugins/discovery-ec2/licenses/jackson-databind-2.14.1.jar.sha1 b/plugins/discovery-ec2/licenses/jackson-databind-2.14.1.jar.sha1 deleted file mode 100644 index 0e6726927ebac..0000000000000 --- a/plugins/discovery-ec2/licenses/jackson-databind-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -268524b9056cae1211b9f1f52560ef19347f4d17 \ No newline at end of file diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f42b44b56ccb8..0380b5f229838 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,10 +38,10 @@ opensearchplugin { } versions << [ - 'tika' : '2.5.0', - 'pdfbox': '2.0.25', - 'poi' : '5.2.2', - 'mime4j': '0.8.3' + 'tika' : '2.6.0', + 'pdfbox': '2.0.27', + 'poi' : '5.2.3', + 'mime4j': '0.8.8' ] dependencies { diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 deleted file mode 100644 index 464a34dd97643..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1179b56c9919c1a8e20d3a528ee4c6cee19bcbe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 new file mode 100644 index 0000000000000..77c36691d36b5 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 @@ -0,0 +1 @@ +7330de23c52f71617cbec7f1d2760dae32e687cd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 deleted file mode 100644 index 4f98753aa0af4..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e80733714eb6a70895bfc74a9528c658504c2c83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 new file mode 100644 index 0000000000000..fb9c5fed27162 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 @@ -0,0 +1 @@ +e76715563a6bd150f84ccb0adb920aec8faf4779 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 deleted file mode 100644 index 3191976e949f8..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6644a1eb1d165eded719a88bf7bdcff91740b98 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 new file mode 100644 index 0000000000000..d578dffbfa3f6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 @@ -0,0 +1 @@ +d08c064d18b2b149da937d15c0d1708cba03f29d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 deleted file mode 100644 index 165b3649e80bf..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c18cd03ff3a2dfc3c4a30d3a35173bd2690bcb92 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 new file mode 100644 index 0000000000000..4f670b7f95e8c --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 @@ -0,0 +1 @@ +416a9dfce3714116bfdf793b15368df04266845f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 deleted file mode 100644 index d9f58e72c9200..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5513d31545085c33809c4b6553c2009fd19a6016 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..3d8b3daf606ad --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 @@ -0,0 +1 @@ +2fb22ae74ad5aea6af1a9c64b9542f2ccf348604 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 deleted file mode 100644 index 7b3abffc1abd5..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a201b5bdc92c0fae4bed4b8e5546388c4c2f9eb0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..8371593cf0841 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 @@ -0,0 +1 @@ +02efd11c940adb18c03eb9ce7ad88fc40ee6a196 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 deleted file mode 100644 index f5137b1e5223e..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5df31b69375131fc2163a5557093cb112be90ce1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..5c6365876b7be --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 @@ -0,0 +1 @@ +db113c8e9051b0ff967f4911fa20336c8325a7c5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 deleted file mode 100644 index 568dde5125c3f..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c5cd5f1b3e7b3656ab983b73bbbf8bf5f14f793 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..3c8f92498f1a4 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 @@ -0,0 +1 @@ +2a7fce47e22b7fedb1b277347ff4fe36d6eda50d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 deleted file mode 100644 index 419f01c631375..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f9f35e4827726b062ac2b0ad0fd361837a50ac9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..c66c2f3f39401 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 @@ -0,0 +1 @@ +f6ed6356dd4a9bd269d873f65494376685e6192e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 deleted file mode 100644 index a9e47ff8a8a86..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -649574dca8f19d991ac25894c40284446dc5cf50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..e7bc59bb5ae49 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 @@ -0,0 +1 @@ +72b784a7bdab0ffde005fa64d15e3f077331d6fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 deleted file mode 100644 index d648183868034..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9268511c34d8a1098f0565438cb8077fcf845d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..83c0777fcbe8a --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 @@ -0,0 +1 @@ +00980e70b1df13c1236b750f0ca1462edd5d7417 \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 9dbfd5d3fb822..d1f83806607bd 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -61,8 +61,6 @@ dependencies { api 'io.projectreactor.netty:reactor-netty-core:1.0.24' api 'io.projectreactor.netty:reactor-netty-http:1.0.24' api "org.slf4j:slf4j-api:${versions.slf4j}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" diff --git a/plugins/repository-azure/licenses/jackson-LICENSE b/plugins/repository-azure/licenses/jackson-LICENSE deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/plugins/repository-azure/licenses/jackson-LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/distribution/tools/upgrade-cli/licenses/jackson-LICENSE b/plugins/repository-azure/licenses/jackson-LICENSE.txt similarity index 100% rename from distribution/tools/upgrade-cli/licenses/jackson-LICENSE rename to plugins/repository-azure/licenses/jackson-LICENSE.txt diff --git a/plugins/repository-azure/licenses/jackson-NOTICE b/plugins/repository-azure/licenses/jackson-NOTICE deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/plugins/repository-azure/licenses/jackson-NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/distribution/tools/upgrade-cli/licenses/jackson-NOTICE b/plugins/repository-azure/licenses/jackson-NOTICE.txt similarity index 100% rename from distribution/tools/upgrade-cli/licenses/jackson-NOTICE rename to plugins/repository-azure/licenses/jackson-NOTICE.txt diff --git a/plugins/repository-azure/licenses/jackson-annotations-2.14.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-annotations-2.14.1.jar.sha1 deleted file mode 100644 index e43faef9e23ff..0000000000000 --- a/plugins/repository-azure/licenses/jackson-annotations-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a6ad504d591a7903ffdec76b5b7252819a2d162 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/jackson-databind-2.14.1.jar.sha1 b/plugins/repository-azure/licenses/jackson-databind-2.14.1.jar.sha1 deleted file mode 100644 index 0e6726927ebac..0000000000000 --- a/plugins/repository-azure/licenses/jackson-databind-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -268524b9056cae1211b9f1f52560ef19347f4d17 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e5d65c9451c1f..2ff0b4e3765b0 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,7 +66,6 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.11.1' - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.10' runtimeOnly 'com.google.guava:guava:31.1-jre' api 'com.google.protobuf:protobuf-java:3.21.9' diff --git a/plugins/repository-hdfs/licenses/jackson-databind-2.14.1.jar.sha1 b/plugins/repository-hdfs/licenses/jackson-databind-2.14.1.jar.sha1 deleted file mode 100644 index 0e6726927ebac..0000000000000 --- a/plugins/repository-hdfs/licenses/jackson-databind-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -268524b9056cae1211b9f1f52560ef19347f4d17 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index e207b472ee665..591eb2502b1d8 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -54,9 +54,6 @@ dependencies { api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" api "joda-time:joda-time:${versions.joda}" @@ -173,9 +170,9 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath && !s3EKSBu processYamlRestTestResources { Map expansions = [ 'permanent_bucket': s3PermanentBucket, - 'permanent_base_path': s3PermanentBasePath + "_integration_tests", + 'permanent_base_path': s3PermanentBasePath + "_integration_tests_" + BuildParams.testSeed, 'temporary_bucket': s3TemporaryBucket, - 'temporary_base_path': s3TemporaryBasePath + "_integration_tests", + 'temporary_base_path': s3TemporaryBasePath + "_integration_tests_" + BuildParams.testSeed, 'ec2_bucket': s3EC2Bucket, 'ec2_base_path': s3EC2BasePath, 'ecs_bucket': s3ECSBucket, diff --git a/plugins/repository-s3/licenses/jackson-LICENSE b/plugins/repository-s3/licenses/jackson-LICENSE deleted file mode 100644 index f5f45d26a49d6..0000000000000 --- a/plugins/repository-s3/licenses/jackson-LICENSE +++ /dev/null @@ -1,8 +0,0 @@ -This copy of Jackson JSON processor streaming parser/generator is licensed under the -Apache (Software) License, version 2.0 ("the License"). -See the License for details about distribution rights, and the -specific rights regarding derivate works. - -You may obtain a copy of the License at: - -http://www.apache.org/licenses/LICENSE-2.0 diff --git a/plugins/repository-s3/licenses/jackson-NOTICE b/plugins/repository-s3/licenses/jackson-NOTICE deleted file mode 100644 index 4c976b7b4cc58..0000000000000 --- a/plugins/repository-s3/licenses/jackson-NOTICE +++ /dev/null @@ -1,20 +0,0 @@ -# Jackson JSON processor - -Jackson is a high-performance, Free/Open Source JSON processing library. -It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has -been in development since 2007. -It is currently developed by a community of developers, as well as supported -commercially by FasterXML.com. - -## Licensing - -Jackson core and extension components may licensed under different licenses. -To find the details that apply to this artifact see the accompanying LICENSE file. -For more information, including possible other licensing options, contact -FasterXML.com (http://fasterxml.com). - -## Credits - -A list of contributors may be found from CREDITS file, which is included -in some artifacts (usually source distributions); but is always available -from the source code management (SCM) system project uses. diff --git a/plugins/repository-s3/licenses/jackson-annotations-2.14.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-annotations-2.14.1.jar.sha1 deleted file mode 100644 index e43faef9e23ff..0000000000000 --- a/plugins/repository-s3/licenses/jackson-annotations-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2a6ad504d591a7903ffdec76b5b7252819a2d162 \ No newline at end of file diff --git a/plugins/repository-s3/licenses/jackson-databind-2.14.1.jar.sha1 b/plugins/repository-s3/licenses/jackson-databind-2.14.1.jar.sha1 deleted file mode 100644 index 0e6726927ebac..0000000000000 --- a/plugins/repository-s3/licenses/jackson-databind-2.14.1.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -268524b9056cae1211b9f1f52560ef19347f4d17 \ No newline at end of file diff --git a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java index 8bebb3881e3fd..2d26238763a09 100644 --- a/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/opensearch/upgrades/SystemIndicesUpgradeIT.java @@ -32,14 +32,16 @@ package org.opensearch.upgrades; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; +import org.hamcrest.MatcherAssert; import org.opensearch.client.Request; +import org.opensearch.client.Response; import org.opensearch.client.ResponseException; import org.opensearch.test.XContentTestUtils.JsonMapView; +import java.io.IOException; import java.util.Map; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -62,25 +64,7 @@ public void testSystemIndicesUpgrades() throws Exception { "{\"f1\": \"v1\", \"f2\": \"v2\"}\n"); client().performRequest(bulk); - // start a async reindex job - Request reindex = new Request("POST", "/_reindex"); - reindex.setJsonEntity( - "{\n" + - " \"source\":{\n" + - " \"index\":\"test_index_old\"\n" + - " },\n" + - " \"dest\":{\n" + - " \"index\":\"test_index_reindex\"\n" + - " }\n" + - "}"); - reindex.addParameter("wait_for_completion", "false"); - Map response = entityAsMap(client().performRequest(reindex)); - String taskId = (String) response.get("task"); - - // wait for task - Request getTask = new Request("GET", "/_tasks/" + taskId); - getTask.addParameter("wait_for_completion", "true"); - client().performRequest(getTask); + createAndVerifyStoredTask(); // make sure .tasks index exists Request getTasksIndex = new Request("GET", "/.tasks"); @@ -97,6 +81,8 @@ public void testSystemIndicesUpgrades() throws Exception { } }); } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + createAndVerifyStoredTask(); + assertBusy(() -> { Request clusterStateRequest = new Request("GET", "/_cluster/state/metadata"); Map indices = new JsonMapView(entityAsMap(client().performRequest(clusterStateRequest))) @@ -115,4 +101,29 @@ public void testSystemIndicesUpgrades() throws Exception { }); } } + + /** + * Completed tasks get persisted into the .tasks index, so this method waits + * until the task is completed in order to verify that it has been successfully + * written to the index and can be retrieved. + */ + private static void createAndVerifyStoredTask() throws Exception { + // Use update by query to create an async task + final Request updateByQueryRequest = new Request("POST", "/test_index_old/_update_by_query"); + updateByQueryRequest.addParameter("wait_for_completion", "false"); + final Response updateByQueryResponse = client().performRequest(updateByQueryRequest); + MatcherAssert.assertThat(updateByQueryResponse.getStatusLine().getStatusCode(), equalTo(200)); + final String taskId = (String) entityAsMap(updateByQueryResponse).get("task"); + + // wait for task to complete + waitUntil(() -> { + try { + final Response getTaskResponse = client().performRequest(new Request("GET", "/_tasks/" + taskId)); + MatcherAssert.assertThat(getTaskResponse.getStatusLine().getStatusCode(), equalTo(200)); + return (Boolean) entityAsMap(getTaskResponse).get("completed"); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } } diff --git a/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java b/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java new file mode 100644 index 0000000000000..4d9115b8962ea --- /dev/null +++ b/qa/smoke-test-http/src/test/java/org/opensearch/http/AwarenessAttributeDecommissionRestIT.java @@ -0,0 +1,101 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.http; + +import org.opensearch.action.admin.cluster.shards.routing.weighted.put.ClusterPutWeightedRoutingResponse; +import org.opensearch.client.Request; +import org.opensearch.client.Response; +import org.opensearch.client.ResponseException; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.routing.WeightedRouting; +import org.opensearch.common.settings.Settings; +import org.opensearch.rest.RestStatus; +import org.opensearch.test.OpenSearchIntegTestCase; + +import java.io.IOException; +import java.util.List; +import java.util.Map; + +import static org.opensearch.test.NodeRoles.onlyRole; + +@OpenSearchIntegTestCase.ClusterScope(scope = OpenSearchIntegTestCase.Scope.TEST, numDataNodes = 0) +public class AwarenessAttributeDecommissionRestIT extends HttpSmokeTestCase{ + + public void testRestStatusForDecommissioningFailedException() { + internalCluster().startNodes(3); + Request request = new Request("PUT", "/_cluster/decommission/awareness/zone/zone-1"); + ResponseException exception = expectThrows( + ResponseException.class, + () -> getRestClient().performRequest(request) + ); + assertEquals(exception.getResponse().getStatusLine().getStatusCode(), RestStatus.BAD_REQUEST.getStatus()); + assertTrue(exception.getMessage().contains("invalid awareness attribute requested for decommissioning")); + } + + public void testRestStatusForAcknowledgedDecommission() throws IOException { + Settings commonSettings = Settings.builder() + .put("cluster.routing.allocation.awareness.attributes", "zone") + .put("cluster.routing.allocation.awareness.force.zone.values", "a,b,c") + .build(); + + logger.info("--> start 3 cluster manager nodes on zones 'a' & 'b' & 'c'"); + List clusterManagerNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.CLUSTER_MANAGER_ROLE)) + .build() + ); + + logger.info("--> start 3 data nodes on zones 'a' & 'b' & 'c'"); + List dataNodes = internalCluster().startNodes( + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "a") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "b") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build(), + Settings.builder() + .put(commonSettings) + .put("node.attr.zone", "c") + .put(onlyRole(commonSettings, DiscoveryNodeRole.DATA_ROLE)) + .build() + ); + + ensureStableCluster(6); + logger.info("--> setting shard routing weights for weighted round robin"); + Map weights = Map.of("a", 1.0, "b", 1.0, "c", 0.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + + ClusterPutWeightedRoutingResponse weightedRoutingResponse = client().admin() + .cluster() + .prepareWeightedRouting() + .setWeightedRouting(weightedRouting) + .get(); + assertTrue(weightedRoutingResponse.isAcknowledged()); + + Request request = new Request("PUT", "/_cluster/decommission/awareness/zone/c"); + Response response = getRestClient().performRequest(request); + assertEquals(response.getStatusLine().getStatusCode(), RestStatus.OK.getStatus()); + } +} diff --git a/release-notes/opensearch.release-notes-1.3.7.md b/release-notes/opensearch.release-notes-1.3.7.md new file mode 100644 index 0000000000000..b8330b5bfcd7d --- /dev/null +++ b/release-notes/opensearch.release-notes-1.3.7.md @@ -0,0 +1,10 @@ +## 2022-11-30 Version 1.3.7 Release Notes + +### Upgrades +* Upgrade netty to 4.1.84.Final ([#4919](https://github.com/opensearch-project/OpenSearch/pull/4919)) +* OpenJDK Update (October 2022 Patch releases) ([#5016](https://github.com/opensearch-project/OpenSearch/pull/5016)) +* Upgrade com.netflix.nebula:nebula-publishing-plugin to 4.6.0 and gradle-docker-compose-plugin to 0.14.12 ([#5316](https://github.com/opensearch-project/OpenSearch/pull/5136)) +* Updated Jackson to 2.14.1 ([#5356](https://github.com/opensearch-project/OpenSearch/pull/5356)) + +### Bug Fixes +* Fixed error handling while reading analyzer mapping rules ([#5149](https://github.com/opensearch-project/OpenSearch/pull/5149)) diff --git a/release-notes/opensearch.release-notes-2.4.1.md b/release-notes/opensearch.release-notes-2.4.1.md new file mode 100644 index 0000000000000..cc4278ecf041e --- /dev/null +++ b/release-notes/opensearch.release-notes-2.4.1.md @@ -0,0 +1,22 @@ +## 2022-12-07 Version 2.4.1 Release Notes + +### Bug Fixes +* Fix 1.x compatibility bug with stored Tasks ([#5412](https://github.com/opensearch-project/opensearch/pull/5412)) ([#5440](https://github.com/opensearch-project/opensearch/pull/5440)) +* Use BuildParams.isCi() instead of checking env var ([#5368](https://github.com/opensearch-project/opensearch/pull/5368)) ([#5373](https://github.com/opensearch-project/opensearch/pull/5373)) +* [BUG] org.opensearch.repositories.s3.RepositoryS3ClientYamlTestSuiteIT/test {yaml=repository_s3/20_repository_permanent_credentials/Snapshot and Restore with repository-s3 using permanent credentials} flaky ([#5325](https://github.com/opensearch-project/opensearch/pull/5325)) ([#5336](https://github.com/opensearch-project/opensearch/pull/5336)) +* [BUG] Gradle Check Failed on Windows due to JDK19 pulling by gradle ([#5188](https://github.com/opensearch-project/opensearch/pull/5188)) ([#5192](https://github.com/opensearch-project/opensearch/pull/5192)) +* Fix test to use a file from another temp directory ([#5158](https://github.com/opensearch-project/opensearch/pull/5158)) ([#5163](https://github.com/opensearch-project/opensearch/pull/5163)) +* Fix boundary condition in indexing pressure test ([#5168](https://github.com/opensearch-project/opensearch/pull/5168)) ([#5182](https://github.com/opensearch-project/opensearch/pull/5182)) +* [Backport 2.x] Fix: org.opensearch.clustermanager.ClusterManagerTaskThrottlingIT is flaky. ([#5153](https://github.com/opensearch-project/opensearch/pull/5153)) ([#5171](https://github.com/opensearch-project/opensearch/pull/5171)) +* [Backport 2.4] Raise error on malformed CSV ([#5141](https://github.com/opensearch-project/opensearch/pull/5141)) + +### Features/Enhancements +* Change the output error message back to use OpenSearchException in the cause chain ([#5081](https://github.com/opensearch-project/opensearch/pull/5081)) ([#5085](https://github.com/opensearch-project/opensearch/pull/5085)) +* Revert changes in AbstractPointGeometryFieldMapper ([#5250](https://github.com/opensearch-project/opensearch/pull/5250)) +* Add support for skipping changelog ([#5088](https://github.com/opensearch-project/opensearch/pull/5088)) ([#5160](https://github.com/opensearch-project/opensearch/pull/5160)) +* [Backport 2.4]Revert "Cluster manager task throttling feature [Final PR] ([#5071](https://github.com/opensearch-project/opensearch/pull/5071)) ([#5203](https://github.com/opensearch-project/opensearch/pull/5203)) + +### Maintenance +* Update Apache Lucene to 9.4.2 ([#5354](https://github.com/opensearch-project/opensearch/pull/5354)) ([#5361](https://github.com/opensearch-project/opensearch/pull/5361)) +* Update Jackson to 2.14.1 ([#5346](https://github.com/opensearch-project/opensearch/pull/5346)) ([#5358](https://github.com/opensearch-project/opensearch/pull/5358)) +* Bump nebula-publishing-plugin from v4.4.0 to v4.6.0. ([#5127](https://github.com/opensearch-project/opensearch/pull/5127)) ([#5131](https://github.com/opensearch-project/opensearch/pull/5131)) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml new file mode 100644 index 0000000000000..b5fe43edcb003 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.shrink/40_max_shard_size.yml @@ -0,0 +1,82 @@ +--- +"Shrink index with max_shard_size": + # shrink index with max_shard_size parameter, which is used to generate an optimum + # number_of_shards for the target index. + + - skip: + version: " - 2.9.99" + reason: "only available in 3.0+" + features: allowed_warnings + + - do: + nodes.info: + node_id: data:true + - set: + nodes._arbitrary_key_: node_id + + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + # ensure everything is allocated on the same data node + index.routing.allocation.include._id: $node_id + index.number_of_shards: 3 + index.number_of_replicas: 0 + - do: + index: + index: source + id: "1" + body: { "foo": "hello world" } + + - do: + get: + index: source + id: "1" + + - match: { _index: source } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # shrink with max_shard_size + - do: + allowed_warnings: + - "Parameter [master_timeout] is deprecated and will be removed in 3.0. To support inclusive language, please use [cluster_manager_timeout] instead." + indices.shrink: + index: "source" + target: "new_shrunken_index" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + max_shard_size: "10gb" + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: "new_shrunken_index" + id: "1" + + - do: + indices.get_settings: + index: "new_shrunken_index" + + - match: { new_shrunken_index.settings.index.number_of_shards: "1" } diff --git a/scripts/build.sh b/scripts/build.sh index 16906bf39fbc7..a0917776507be 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -139,7 +139,7 @@ esac echo "Building OpenSearch for $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" -./gradlew :distribution:$TYPE:$TARGET:assemble :distribution:$TYPE:no-jdk-$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER +./gradlew :distribution:$TYPE:$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER # Copy artifact to dist folder in bundle build output [[ "$SNAPSHOT" == "true" ]] && IDENTIFIER="-SNAPSHOT" diff --git a/server/build.gradle b/server/build.gradle index 9c33199f99d4d..ae79ea2dabc29 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -135,6 +135,10 @@ dependencies { // jna api "net.java.dev.jna:jna:${versions.jna}" + // jackson + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' @@ -208,31 +212,12 @@ tasks.named("processResources").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( - // from com.fasterxml.jackson.dataformat.yaml.YAMLMapper (jackson-dataformat-yaml) - 'com.fasterxml.jackson.databind.ObjectMapper', // from log4j 'com.conversantmedia.util.concurrent.SpinPolicy', - 'com.fasterxml.jackson.annotation.JsonInclude$Include', - 'com.fasterxml.jackson.databind.DeserializationContext', - 'com.fasterxml.jackson.databind.DeserializationFeature', - 'com.fasterxml.jackson.databind.JsonMappingException', - 'com.fasterxml.jackson.databind.JsonNode', - 'com.fasterxml.jackson.databind.Module$SetupContext', - 'com.fasterxml.jackson.databind.ObjectReader', - 'com.fasterxml.jackson.databind.ObjectWriter', - 'com.fasterxml.jackson.databind.SerializerProvider', - 'com.fasterxml.jackson.databind.deser.std.StdDeserializer', - 'com.fasterxml.jackson.databind.deser.std.StdScalarDeserializer', - 'com.fasterxml.jackson.databind.module.SimpleModule', - 'com.fasterxml.jackson.databind.ser.impl.SimpleBeanPropertyFilter', - 'com.fasterxml.jackson.databind.ser.impl.SimpleFilterProvider', - 'com.fasterxml.jackson.databind.ser.std.StdScalarSerializer', - 'com.fasterxml.jackson.databind.ser.std.StdSerializer', 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', 'com.fasterxml.jackson.dataformat.xml.XmlMapper', 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', - 'com.fasterxml.jackson.databind.node.ObjectNode', 'org.fusesource.jansi.Ansi', 'org.fusesource.jansi.AnsiRenderer$Code', 'com.lmax.disruptor.EventFactory', @@ -292,9 +277,6 @@ tasks.named("thirdPartyAudit").configure { 'org.noggit.JSONParser', // from lucene-spatial - 'com.fasterxml.jackson.databind.JsonSerializer', - 'com.fasterxml.jackson.databind.JsonDeserializer', - 'com.fasterxml.jackson.databind.node.ArrayNode', 'com.google.common.geometry.S2Cell', 'com.google.common.geometry.S2CellId', 'com.google.common.geometry.S2Projections', diff --git a/distribution/tools/upgrade-cli/licenses/jackson-annotations-2.14.1.jar.sha1 b/server/licenses/jackson-annotations-2.14.1.jar.sha1 similarity index 100% rename from distribution/tools/upgrade-cli/licenses/jackson-annotations-2.14.1.jar.sha1 rename to server/licenses/jackson-annotations-2.14.1.jar.sha1 diff --git a/modules/ingest-geoip/licenses/jackson-annotations-LICENSE b/server/licenses/jackson-annotations-LICENSE.txt similarity index 100% rename from modules/ingest-geoip/licenses/jackson-annotations-LICENSE rename to server/licenses/jackson-annotations-LICENSE.txt diff --git a/modules/ingest-geoip/licenses/jackson-annotations-NOTICE b/server/licenses/jackson-annotations-NOTICE.txt similarity index 100% rename from modules/ingest-geoip/licenses/jackson-annotations-NOTICE rename to server/licenses/jackson-annotations-NOTICE.txt diff --git a/distribution/tools/upgrade-cli/licenses/jackson-databind-2.14.1.jar.sha1 b/server/licenses/jackson-databind-2.14.1.jar.sha1 similarity index 100% rename from distribution/tools/upgrade-cli/licenses/jackson-databind-2.14.1.jar.sha1 rename to server/licenses/jackson-databind-2.14.1.jar.sha1 diff --git a/plugins/repository-hdfs/licenses/jackson-databind-LICENSE.txt b/server/licenses/jackson-databind-LICENSE.txt similarity index 100% rename from plugins/repository-hdfs/licenses/jackson-databind-LICENSE.txt rename to server/licenses/jackson-databind-LICENSE.txt diff --git a/plugins/repository-hdfs/licenses/jackson-databind-NOTICE.txt b/server/licenses/jackson-databind-NOTICE.txt similarity index 100% rename from plugins/repository-hdfs/licenses/jackson-databind-NOTICE.txt rename to server/licenses/jackson-databind-NOTICE.txt diff --git a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java index daa124fab2220..3420074a0f60b 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/admin/indices/create/ShrinkIndexIT.java @@ -66,6 +66,7 @@ import org.opensearch.common.Priority; import org.opensearch.common.collect.ImmutableOpenMap; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.common.unit.TimeValue; import org.opensearch.common.xcontent.XContentType; import org.opensearch.index.Index; @@ -75,8 +76,8 @@ import org.opensearch.index.seqno.SeqNoStats; import org.opensearch.index.shard.IndexShard; import org.opensearch.indices.IndicesService; -import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.InternalTestCluster; +import org.opensearch.test.OpenSearchIntegTestCase; import org.opensearch.test.VersionUtils; import java.util.Arrays; @@ -760,4 +761,72 @@ public void testShrinkThenSplitWithFailedNode() throws Exception { ); ensureGreen("splitagain"); } + + public void testCreateShrinkIndexWithMaxShardSize() { + internalCluster().ensureAtLeastNumDataNodes(2); + final String shrinkNode = internalCluster().startDataOnlyNode(); + + final int shardCount = between(2, 5); + prepareCreate("source").setSettings( + Settings.builder() + .put(indexSettings()) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount) + ).get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + client().admin().indices().prepareFlush("source").get(); + ensureGreen(); + + client().admin() + .indices() + .prepareUpdateSettings("source") + .setSettings( + Settings.builder() + .put(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), shrinkNode) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + ) + .get(); + ensureGreen(); + + // Cannot set max_shard_size and index.number_of_shards at the same time + IllegalArgumentException exc = expectThrows( + IllegalArgumentException.class, + () -> client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ) + .setMaxShardSize(new ByteSizeValue(1)) + .setResizeType(ResizeType.SHRINK) + .get() + ); + assertEquals(exc.getMessage(), "Cannot set max_shard_size and index.number_of_shards at the same time!"); + + // use max_shard_size to calculate the target index's shards number + // set max_shard_size to 1 then the target index's shards number will be same with the source index's + assertAcked( + client().admin() + .indices() + .prepareResizeIndex("source", "target") + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .putNull(IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey()) + .build() + ) + .setMaxShardSize(new ByteSizeValue(1)) + .setResizeType(ResizeType.SHRINK) + .get() + ); + ensureGreen(); + + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(String.valueOf(shardCount), target.getIndexToSettings().get("target").get("index.number_of_shards")); + } } diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index e2a1363f163da..8236e6e90afc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -193,34 +193,29 @@ public void testDeleteIndexWhileIndexing() throws Exception { String index = "deleted_while_indexing"; createIndex(index); AtomicBoolean stopped = new AtomicBoolean(); - Thread[] threads = new Thread[between(1, 4)]; AtomicInteger docID = new AtomicInteger(); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - while (stopped.get() == false && docID.get() < 5000) { - String id = Integer.toString(docID.incrementAndGet()); - try { - IndexResponse response = client().prepareIndex(index) - .setId(id) - .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) - .get(); - assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); - logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); - } catch (OpenSearchException ignore) { - logger.info("--> fail to index id={}", id); - } + Thread thread = new Thread(() -> { + while (stopped.get() == false && docID.get() < 5000) { + String id = Integer.toString(docID.incrementAndGet()); + try { + IndexResponse response = client().prepareIndex(index) + .setId(id) + .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) + .get(); + assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); + logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); + } catch (OpenSearchException ignore) { + logger.info("--> fail to index id={}", id); } - }); - threads[i].start(); - } + } + }); + thread.start(); ensureGreen(index); assertBusy(() -> assertThat(docID.get(), greaterThanOrEqualTo(1))); assertAcked(client().admin().indices().prepareDelete(index)); stopped.set(true); - for (Thread thread : threads) { - thread.join(ReplicationRequest.DEFAULT_TIMEOUT.millis() / 2); - assertFalse(thread.isAlive()); - } + thread.join(ReplicationRequest.DEFAULT_TIMEOUT.millis() / 2); + assertFalse(thread.isAlive()); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 5c7e53fda3f23..9837c86cd8608 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -216,6 +216,39 @@ public void testKeywordWithWhitespace() throws Exception { assertHitCount(resp, 3L); } + public void testRegexCaseInsensitivity() throws Exception { + createIndex("messages"); + List indexRequests = new ArrayList<>(); + indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake")); + indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake")); + indexRandom(true, false, indexRequests); + + SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/tls/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/TCP/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "2"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/tcp/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "2"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/HANDSHAKE/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 2); + assertHits(response.getHits(), "1", "2"); + } + public void testAllFields() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index c51043f02174d..e90d4e8e12c10 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -89,12 +89,15 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.regex.Pattern; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -2089,8 +2092,14 @@ public void testWildcardQueryNormalizationOnTextField() { refresh(); { + // test default case insensitivity: false WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); SearchResponse searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); + assertHitCount(searchResponse, 0L); + + // test case insensitivity set to true + wildCardQuery = wildcardQuery("field1", "Bb*").caseInsensitive(true); + searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); assertHitCount(searchResponse, 1L); wildCardQuery = wildcardQuery("field1", "bb*"); @@ -2099,6 +2108,24 @@ public void testWildcardQueryNormalizationOnTextField() { } } + /** tests wildcard case sensitivity */ + public void testWildcardCaseSensitivity() { + assertAcked(prepareCreate("test").setMapping("field", "type=text")); + client().prepareIndex("test").setId("1").setSource("field", "lowercase text").get(); + refresh(); + + // test case sensitive + SearchResponse response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(false)).get(); + assertNoFailures(response); + assertHitCount(response, 0); + + // test case insensitive + response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(true)).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + } + /** * Reserved characters should be excluded when the normalization is applied for keyword fields. * See https://github.com/elastic/elasticsearch/issues/46300 for details. @@ -2175,4 +2202,16 @@ public void testIssueFuzzyInsideSpanMulti() { SearchResponse response = client().prepareSearch("test").setQuery(query).get(); assertHitCount(response, 1); } + + /** + * asserts the search response hits include the expected ids + */ + private void assertHits(SearchHits hits, String... ids) { + assertThat(hits.getTotalHits().value, equalTo((long) ids.length)); + Set hitIds = new HashSet<>(); + for (SearchHit hit : hits.getHits()) { + hitIds.add(hit.getId()); + } + assertThat(hitIds, containsInAnyOrder(ids)); + } } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index d2d7d843e19db..f65d15c5c64aa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -96,6 +96,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitForTimedOut); } + @Override + public String toString() { + return "ClusterStateResponse{" + "clusterState=" + clusterState + '}'; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index f5d9528422b58..9f9c85933f394 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -31,6 +31,7 @@ package org.opensearch.action.admin.indices.shrink; +import org.opensearch.Version; import org.opensearch.action.ActionRequestValidationException; import org.opensearch.action.IndicesRequest; import org.opensearch.action.admin.indices.alias.Alias; @@ -46,6 +47,7 @@ import org.opensearch.common.xcontent.ToXContentObject; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentParser; +import org.opensearch.common.unit.ByteSizeValue; import java.io.IOException; import java.util.Objects; @@ -60,6 +62,8 @@ public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest, ToXContentObject { public static final ObjectParser PARSER = new ObjectParser<>("resize_request"); + private static final ParseField MAX_SHARD_SIZE = new ParseField("max_shard_size"); + static { PARSER.declareField( (parser, request, context) -> request.getTargetIndexRequest().settings(parser.map()), @@ -71,12 +75,19 @@ public class ResizeRequest extends AcknowledgedRequest implements new ParseField("aliases"), ObjectParser.ValueType.OBJECT ); + PARSER.declareField( + ResizeRequest::setMaxShardSize, + (p, c) -> ByteSizeValue.parseBytesSizeValue(p.text(), MAX_SHARD_SIZE.getPreferredName()), + MAX_SHARD_SIZE, + ObjectParser.ValueType.STRING + ); } private CreateIndexRequest targetIndexRequest; private String sourceIndex; private ResizeType type = ResizeType.SHRINK; private Boolean copySettings = true; + private ByteSizeValue maxShardSize; public ResizeRequest(StreamInput in) throws IOException { super(in); @@ -84,6 +95,9 @@ public ResizeRequest(StreamInput in) throws IOException { sourceIndex = in.readString(); type = in.readEnum(ResizeType.class); copySettings = in.readOptionalBoolean(); + if (in.getVersion().onOrAfter(Version.V_2_5_0)) { + maxShardSize = in.readOptionalWriteable(ByteSizeValue::new); + } } ResizeRequest() {} @@ -108,6 +122,9 @@ public ActionRequestValidationException validate() { if (type == ResizeType.SPLIT && IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) { validationException = addValidationError("index.number_of_shards is required for split operations", validationException); } + if (maxShardSize != null && maxShardSize.getBytes() <= 0) { + validationException = addValidationError("max_shard_size must be greater than 0", validationException); + } assert copySettings == null || copySettings; return validationException; } @@ -123,6 +140,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeEnum(type); out.writeOptionalBoolean(copySettings); + if (out.getVersion().onOrAfter(Version.V_2_5_0)) { + out.writeOptionalWriteable(maxShardSize); + } } @Override @@ -205,6 +225,24 @@ public Boolean getCopySettings() { return copySettings; } + /** + * Sets the maximum size of a primary shard in the new shrunken index. + * This parameter can be used to calculate the lowest factor of the source index's shards number + * which satisfies the maximum shard size requirement. + * + * @param maxShardSize the maximum size of a primary shard in the new shrunken index + */ + public void setMaxShardSize(ByteSizeValue maxShardSize) { + this.maxShardSize = maxShardSize; + } + + /** + * Returns the maximum size of a primary shard in the new shrunken index. + */ + public ByteSizeValue getMaxShardSize() { + return maxShardSize; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -221,6 +259,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws } } builder.endObject(); + if (maxShardSize != null) { + builder.field(MAX_SHARD_SIZE.getPreferredName(), maxShardSize); + } } builder.endObject(); return builder; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 418e83a5431ec..eb05c0a69b78b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -37,6 +37,7 @@ import org.opensearch.action.support.master.AcknowledgedRequestBuilder; import org.opensearch.client.OpenSearchClient; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; /** * Transport request builder for resizing an index @@ -95,4 +96,12 @@ public ResizeRequestBuilder setResizeType(ResizeType type) { this.request.setResizeType(type); return this; } + + /** + * Sets the maximum size of a primary shard in the new shrunken index. + */ + public ResizeRequestBuilder setMaxShardSize(ByteSizeValue maxShardSize) { + this.request.setMaxShardSize(maxShardSize); + return this; + } } diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java index ba079aeb03921..7f55e5efe801b 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/TransportResizeAction.java @@ -57,6 +57,8 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportService; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.index.store.StoreStats; import java.io.IOException; import java.util.Locale; @@ -141,11 +143,12 @@ protected void clusterManagerOperation( .prepareStats(sourceIndex) .clear() .setDocs(true) + .setStore(true) .execute(ActionListener.delegateFailure(listener, (delegatedListener, indicesStatsResponse) -> { CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, i -> { IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); return shard == null ? null : shard.getPrimary().getDocs(); - }, sourceIndex, targetIndex); + }, indicesStatsResponse.getPrimaries().store, sourceIndex, targetIndex); createIndexService.createIndex( updateRequest, ActionListener.map( @@ -162,6 +165,7 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( final ResizeRequest resizeRequest, final ClusterState state, final IntFunction perShardDocStats, + final StoreStats primaryShardsStoreStats, String sourceIndexName, String targetIndexName ) { @@ -176,12 +180,22 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( targetIndexSettingsBuilder.remove(IndexMetadata.SETTING_HISTORY_UUID); final Settings targetIndexSettings = targetIndexSettingsBuilder.build(); final int numShards; + + // max_shard_size is only supported for shrink + ByteSizeValue maxShardSize = resizeRequest.getMaxShardSize(); + if (resizeRequest.getResizeType() != ResizeType.SHRINK && maxShardSize != null) { + throw new IllegalArgumentException("Unsupported parameter [max_shard_size]"); + } + if (IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { numShards = IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); + if (resizeRequest.getResizeType() == ResizeType.SHRINK && maxShardSize != null) { + throw new IllegalArgumentException("Cannot set max_shard_size and index.number_of_shards at the same time!"); + } } else { assert resizeRequest.getResizeType() != ResizeType.SPLIT : "split must specify the number of shards explicitly"; if (resizeRequest.getResizeType() == ResizeType.SHRINK) { - numShards = 1; + numShards = calculateTargetIndexShardsNum(maxShardSize, primaryShardsStoreStats, metadata); } else { assert resizeRequest.getResizeType() == ResizeType.CLONE; numShards = metadata.getNumberOfShards(); @@ -250,6 +264,46 @@ static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest( .copySettings(resizeRequest.getCopySettings() == null ? false : resizeRequest.getCopySettings()); } + /** + * Calculate target index's shards count according to max_shard_ize and the source index's storage(only primary shards included) + * for shrink. Target index's shards count is the lowest factor of the source index's primary shards count which satisfies the + * maximum shard size requirement. If max_shard_size is less than the source index's single shard size, then target index's shards count + * will be equal to the source index's shards count. + * @param maxShardSize the maximum size of a primary shard in the target index + * @param sourceIndexShardStoreStats primary shards' store stats of the source index + * @param sourceIndexMetaData source index's metadata + * @return target index's shards number + */ + protected static int calculateTargetIndexShardsNum( + ByteSizeValue maxShardSize, + StoreStats sourceIndexShardStoreStats, + IndexMetadata sourceIndexMetaData + ) { + if (maxShardSize == null + || sourceIndexShardStoreStats == null + || maxShardSize.getBytes() == 0 + || sourceIndexShardStoreStats.getSizeInBytes() == 0) { + return 1; + } + + int sourceIndexShardsNum = sourceIndexMetaData.getNumberOfShards(); + // calculate the minimum shards count according to source index's storage, ceiling ensures that the minimum shards count is never + // less than 1 + int minValue = (int) Math.ceil((double) sourceIndexShardStoreStats.getSizeInBytes() / maxShardSize.getBytes()); + // if minimum shards count is greater than the source index's shards count, then the source index's shards count will be returned + if (minValue >= sourceIndexShardsNum) { + return sourceIndexShardsNum; + } + + // find the lowest factor of the source index's shards count here, because minimum shards count may not be a factor + for (int i = minValue; i < sourceIndexShardsNum; i++) { + if (sourceIndexShardsNum % i == 0) { + return i; + } + } + return sourceIndexShardsNum; + } + @Override protected String getClusterManagerActionName(DiscoveryNode node) { return super.getClusterManagerActionName(node); diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 56fb688290002..59f9042ec4a85 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -94,6 +94,8 @@ import java.util.function.Function; import java.util.function.LongSupplier; +import org.opensearch.action.support.replication.ReplicationMode; + /** * Performs shard-level bulk (index, delete or update) operations * @@ -193,6 +195,14 @@ protected long primaryOperationSize(BulkShardRequest request) { return request.ramBytesUsed(); } + @Override + protected ReplicationMode getReplicationMode(IndexShard indexShard) { + if (indexShard.isRemoteTranslogEnabled()) { + return ReplicationMode.PRIMARY_TERM_VALIDATION; + } + return super.getReplicationMode(indexShard); + } + public static void performOnPrimary( BulkShardRequest request, IndexShard primary, diff --git a/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java new file mode 100644 index 0000000000000..2980df4c1c0af --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +/** + * This implementation of {@link ReplicationProxy} fans out the replication request to current shard routing if + * it is not the primary and has replication mode as {@link ReplicationMode#FULL_REPLICATION}. + * + * @opensearch.internal + */ +public class FanoutReplicationProxy extends ReplicationProxy { + + @Override + ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { + return shardRouting.isSameAllocation(primaryRouting) == false ? ReplicationMode.FULL_REPLICATION : ReplicationMode.NO_REPLICATION; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java new file mode 100644 index 0000000000000..f9b85cc4bd7aa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +/** + * The type of replication used for inter-node replication. + * + * @opensearch.internal + */ +public enum ReplicationMode { + /** + * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and is replicated logically. + * In short, this mode would replicate the {@link ReplicationRequest} to + * the replica shard along with primary term validation. + */ + FULL_REPLICATION, + /** + * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and used for + * primary term validation only. The request is not replicated logically. + */ + PRIMARY_TERM_VALIDATION, + /** + * In this mode, a {@code TransportReplicationAction} does not fan out to the underlying concerned shard. + */ + NO_REPLICATION; +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java new file mode 100644 index 0000000000000..fa28e99d5696f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +import java.util.Objects; + +/** + * This implementation of {@link ReplicationProxy} fans out the replication request to current shard routing basis + * the shard routing's replication mode and replication override policy. + * + * @opensearch.internal + */ +public class ReplicationModeAwareProxy extends ReplicationProxy { + + private final ReplicationMode replicationModeOverride; + + public ReplicationModeAwareProxy(ReplicationMode replicationModeOverride) { + assert Objects.nonNull(replicationModeOverride); + this.replicationModeOverride = replicationModeOverride; + } + + @Override + ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { + + // If the current routing is the primary, then it does not need to be replicated + if (shardRouting.isSameAllocation(primaryRouting)) { + return ReplicationMode.NO_REPLICATION; + } + + if (primaryRouting.relocating() && shardRouting.isSameAllocation(primaryRouting.getTargetRelocatingShard())) { + return ReplicationMode.FULL_REPLICATION; + } + + return replicationModeOverride; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index da37eee88a4e0..1a6a5a9245eb2 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -35,13 +35,14 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Assertions; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.RetryableAction; import org.opensearch.action.support.TransportActions; +import org.opensearch.action.support.replication.ReplicationProxyRequest.Builder; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -99,6 +100,7 @@ public class ReplicationOperation< private final TimeValue initialRetryBackoffBound; private final TimeValue retryTimeout; private final long primaryTerm; + private final ReplicationProxy replicationProxy; // exposed for tests private final ActionListener resultListener; @@ -117,7 +119,8 @@ public ReplicationOperation( String opType, long primaryTerm, TimeValue initialRetryBackoffBound, - TimeValue retryTimeout + TimeValue retryTimeout, + ReplicationProxy replicationProxy ) { this.replicasProxy = replicas; this.primary = primary; @@ -129,6 +132,7 @@ public ReplicationOperation( this.primaryTerm = primaryTerm; this.initialRetryBackoffBound = initialRetryBackoffBound; this.retryTimeout = retryTimeout; + this.replicationProxy = replicationProxy; } public void execute() throws Exception { @@ -226,20 +230,26 @@ private void performOnReplicas( final ShardRouting primaryRouting = primary.routingEntry(); - for (final ShardRouting shard : replicationGroup.getReplicationTargets()) { - if (shard.isSameAllocation(primaryRouting) == false) { - performOnReplica(shard, replicaRequest, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, pendingReplicationActions); - } + for (final ShardRouting shardRouting : replicationGroup.getReplicationTargets()) { + ReplicationProxyRequest proxyRequest = new Builder( + shardRouting, + primaryRouting, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + pendingReplicationActions, + replicaRequest + ).build(); + replicationProxy.performOnReplicaProxy(proxyRequest, this::performOnReplica); } } - private void performOnReplica( - final ShardRouting shard, - final ReplicaRequest replicaRequest, - final long globalCheckpoint, - final long maxSeqNoOfUpdatesOrDeletes, - final PendingReplicationActions pendingReplicationActions - ) { + private void performOnReplica(final ReplicationProxyRequest replicationProxyRequest) { + final ShardRouting shard = replicationProxyRequest.getShardRouting(); + final ReplicaRequest replicaRequest = replicationProxyRequest.getReplicaRequest(); + final long globalCheckpoint = replicationProxyRequest.getGlobalCheckpoint(); + final long maxSeqNoOfUpdatesOrDeletes = replicationProxyRequest.getMaxSeqNoOfUpdatesOrDeletes(); + final PendingReplicationActions pendingReplicationActions = replicationProxyRequest.getPendingReplicationActions(); + if (logger.isTraceEnabled()) { logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java new file mode 100644 index 0000000000000..e098ea1aed960 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +import java.util.function.Consumer; + +/** + * Used for performing any replication operation on replicas. Depending on the implementation, the replication call + * can fanout or stops here. + * + * @opensearch.internal + */ +public abstract class ReplicationProxy { + + /** + * Depending on the actual implementation and the passed {@link ReplicationMode}, the replication + * mode is determined using which the replication request is performed on the replica or not. + * + * @param proxyRequest replication proxy request + * @param originalPerformOnReplicaConsumer original performOnReplica method passed as consumer + */ + public void performOnReplicaProxy( + ReplicationProxyRequest proxyRequest, + Consumer> originalPerformOnReplicaConsumer + ) { + ReplicationMode replicationMode = determineReplicationMode(proxyRequest.getShardRouting(), proxyRequest.getPrimaryRouting()); + // If the replication modes are 1. Logical replication or 2. Primary term validation, we let the call get performed on the + // replica shard. + if (replicationMode == ReplicationMode.FULL_REPLICATION || replicationMode == ReplicationMode.PRIMARY_TERM_VALIDATION) { + originalPerformOnReplicaConsumer.accept(proxyRequest); + } + } + + /** + * Determines what is the replication mode basis the constructor arguments of the implementation and the current + * replication mode aware shard routing. + * + * @param shardRouting replication mode aware ShardRouting + * @param primaryRouting primary ShardRouting + * @return the determined replication mode. + */ + abstract ReplicationMode determineReplicationMode(final ShardRouting shardRouting, final ShardRouting primaryRouting); +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java new file mode 100644 index 0000000000000..a2bbf58fb9100 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.index.shard.IndexShard; + +/** + * Factory that returns the {@link ReplicationProxy} instance basis the {@link ReplicationMode}. + * + * @opensearch.internal + */ +public class ReplicationProxyFactory { + + public static ReplicationProxy create( + final IndexShard indexShard, + final ReplicationMode replicationModeOverride + ) { + if (indexShard.isRemoteTranslogEnabled()) { + return new ReplicationModeAwareProxy<>(replicationModeOverride); + } + return new FanoutReplicationProxy<>(); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java new file mode 100644 index 0000000000000..180efd6f423c3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +import java.util.Objects; + +/** + * This is proxy wrapper over the replication request whose object can be created using the Builder present inside. + * + * @opensearch.internal + */ +public class ReplicationProxyRequest { + + private final ShardRouting shardRouting; + + private final ShardRouting primaryRouting; + + private final long globalCheckpoint; + + private final long maxSeqNoOfUpdatesOrDeletes; + + private final PendingReplicationActions pendingReplicationActions; + + private final ReplicaRequest replicaRequest; + + private ReplicationProxyRequest( + ShardRouting shardRouting, + ShardRouting primaryRouting, + long globalCheckpoint, + long maxSeqNoOfUpdatesOrDeletes, + PendingReplicationActions pendingReplicationActions, + ReplicaRequest replicaRequest + ) { + this.shardRouting = Objects.requireNonNull(shardRouting); + this.primaryRouting = Objects.requireNonNull(primaryRouting); + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; + this.pendingReplicationActions = Objects.requireNonNull(pendingReplicationActions); + this.replicaRequest = Objects.requireNonNull(replicaRequest); + } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public ShardRouting getPrimaryRouting() { + return primaryRouting; + } + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + + public long getMaxSeqNoOfUpdatesOrDeletes() { + return maxSeqNoOfUpdatesOrDeletes; + } + + public PendingReplicationActions getPendingReplicationActions() { + return pendingReplicationActions; + } + + public ReplicaRequest getReplicaRequest() { + return replicaRequest; + } + + /** + * Builder of ReplicationProxyRequest. + * + * @opensearch.internal + */ + public static class Builder { + + private final ShardRouting shardRouting; + private final ShardRouting primaryRouting; + private final long globalCheckpoint; + private final long maxSeqNoOfUpdatesOrDeletes; + private final PendingReplicationActions pendingReplicationActions; + private final ReplicaRequest replicaRequest; + + public Builder( + ShardRouting shardRouting, + ShardRouting primaryRouting, + long globalCheckpoint, + long maxSeqNoOfUpdatesOrDeletes, + PendingReplicationActions pendingReplicationActions, + ReplicaRequest replicaRequest + ) { + this.shardRouting = shardRouting; + this.primaryRouting = primaryRouting; + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; + this.pendingReplicationActions = pendingReplicationActions; + this.replicaRequest = replicaRequest; + } + + public ReplicationProxyRequest build() { + return new ReplicationProxyRequest<>( + shardRouting, + primaryRouting, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + pendingReplicationActions, + replicaRequest + ); + } + + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 9d3ee8e49e8c2..0a0904a1b3aaa 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -258,6 +258,19 @@ protected ReplicationOperation.Replicas newReplicasProxy() { return new ReplicasProxy(); } + /** + * This method is used for defining the {@link ReplicationMode} override per {@link TransportReplicationAction}. + * + * @param indexShard index shard used to determining the policy. + * @return the overridden replication mode. + */ + protected ReplicationMode getReplicationMode(IndexShard indexShard) { + if (indexShard.isRemoteTranslogEnabled()) { + return ReplicationMode.NO_REPLICATION; + } + return ReplicationMode.FULL_REPLICATION; + } + protected abstract Response newResponseInstance(StreamInput in) throws IOException; /** @@ -533,7 +546,11 @@ public void handleException(TransportException exp) { actionName, primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, - retryTimeout + retryTimeout, + ReplicationProxyFactory.create( + primaryShardReference.indexShard, + getReplicationMode(primaryShardReference.indexShard) + ) ).execute(); } } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 749c146de4f16..0eab6f9cbcbf1 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -36,6 +36,7 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; @@ -316,6 +317,9 @@ static void addFilePermissions(Permissions policy, Environment environment) thro addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libDir(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesDir(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsDir(), "read,readlink", false); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.extensionDir(), "read,readlink", false); + } addDirectoryPath(policy, "path.conf'", environment.configDir(), "read,readlink", false); // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpDir(), "read,readlink,write,delete", false); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java new file mode 100644 index 0000000000000..e84c2c902abdd --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * PluginSettings Response for Extensibility + * + * @opensearch.internal + */ +public class ClusterSettingsResponse extends TransportResponse { + private final Settings clusterSettings; + + public ClusterSettingsResponse(ClusterService clusterService) { + this.clusterSettings = clusterService.getSettings(); + } + + public ClusterSettingsResponse(StreamInput in) throws IOException { + super(in); + this.clusterSettings = Settings.readSettingsFromStream(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Settings.writeSettingsToStream(clusterSettings, out); + } + + @Override + public String toString() { + return "ClusterSettingsResponse{" + "clusterSettings=" + clusterSettings + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterSettingsResponse that = (ClusterSettingsResponse) o; + return Objects.equals(clusterSettings, that.clusterSettings); + } + + @Override + public int hashCode() { + return Objects.hash(clusterSettings); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java b/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java new file mode 100644 index 0000000000000..ef1ef4a49ad62 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * LocalNode Response for Extensibility + * + * @opensearch.internal + */ +public class LocalNodeResponse extends TransportResponse { + private final DiscoveryNode localNode; + + public LocalNodeResponse(ClusterService clusterService) { + this.localNode = clusterService.localNode(); + } + + public LocalNodeResponse(StreamInput in) throws IOException { + super(in); + this.localNode = new DiscoveryNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + this.localNode.writeTo(out); + } + + @Override + public String toString() { + return "LocalNodeResponse{" + "localNode=" + localNode + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LocalNodeResponse that = (LocalNodeResponse) o; + return Objects.equals(localNode, that.localNode); + } + + @Override + public int hashCode() { + return Objects.hash(localNode); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java index fe1b9368ac712..9d1325ccf4912 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissioningFailedException.java @@ -11,6 +11,7 @@ import org.opensearch.OpenSearchException; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.rest.RestStatus; import java.io.IOException; @@ -52,4 +53,9 @@ public void writeTo(StreamOutput out) throws IOException { public DecommissionAttribute decommissionAttribute() { return decommissionAttribute; } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 0503db713258d..249b4ff5316d9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -108,10 +108,12 @@ public boolean isThrottlingEnabled() { } void validateSetting(final Settings settings) { - if (minNodeVersionSupplier.get().compareTo(Version.V_2_4_0) < 0) { - throw new IllegalArgumentException("All the nodes in cluster should be on version later than or equal to 2.4.0"); - } Map groups = settings.getAsGroups(); + if (groups.size() > 0) { + if (minNodeVersionSupplier.get().compareTo(Version.V_2_4_0) < 0) { + throw new IllegalArgumentException("All the nodes in cluster should be on version later than or equal to 2.4.0"); + } + } for (String key : groups.keySet()) { if (!THROTTLING_TASK_KEYS.containsKey(key)) { throw new IllegalArgumentException("Cluster manager task throttling is not configured for given task type: " + key); diff --git a/server/src/main/java/org/opensearch/common/bytes/BytesReference.java b/server/src/main/java/org/opensearch/common/bytes/BytesReference.java index 85dcf949d479e..97100f905315b 100644 --- a/server/src/main/java/org/opensearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/opensearch/common/bytes/BytesReference.java @@ -122,8 +122,13 @@ static BytesReference fromByteBuffers(ByteBuffer[] buffers) { * Returns BytesReference composed of the provided ByteBuffer. */ static BytesReference fromByteBuffer(ByteBuffer buffer) { - assert buffer.hasArray(); - return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + if (buffer.hasArray()) { + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + } else { + final byte[] array = new byte[buffer.remaining()]; + buffer.asReadOnlyBuffer().get(array, 0, buffer.remaining()); + return new BytesArray(array); + } } /** diff --git a/server/src/main/java/org/opensearch/discovery/PluginRequest.java b/server/src/main/java/org/opensearch/discovery/PluginRequest.java new file mode 100644 index 0000000000000..7992de4342d86 --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/PluginRequest.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.discovery; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * PluginRequest to intialize plugin + * + * @opensearch.internal + */ +public class PluginRequest extends TransportRequest { + private final DiscoveryNode sourceNode; + /* + * TODO change DiscoveryNode to Extension information + */ + private final List extensions; + + public PluginRequest(DiscoveryNode sourceNode, List extensions) { + this.sourceNode = sourceNode; + this.extensions = extensions; + } + + public PluginRequest(StreamInput in) throws IOException { + super(in); + sourceNode = new DiscoveryNode(in); + extensions = in.readList(DiscoveryExtensionNode::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + sourceNode.writeTo(out); + out.writeList(extensions); + } + + public List getExtensions() { + return extensions; + } + + public DiscoveryNode getSourceNode() { + return sourceNode; + } + + @Override + public String toString() { + return "PluginRequest{" + "sourceNode=" + sourceNode + ", extensions=" + extensions + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PluginRequest that = (PluginRequest) o; + return Objects.equals(sourceNode, that.sourceNode) && Objects.equals(extensions, that.extensions); + } + + @Override + public int hashCode() { + return Objects.hash(sourceNode, extensions); + } +} diff --git a/server/src/main/java/org/opensearch/discovery/PluginResponse.java b/server/src/main/java/org/opensearch/discovery/PluginResponse.java new file mode 100644 index 0000000000000..f8f20214e5846 --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/PluginResponse.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.discovery; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * PluginResponse to intialize plugin + * + * @opensearch.internal + */ +public class PluginResponse extends TransportResponse { + private String name; + + public PluginResponse(String name) { + this.name = name; + } + + public PluginResponse(StreamInput in) throws IOException { + name = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + } + + /** + * @return the node that is currently leading, according to the responding node. + */ + + public String getName() { + return this.name; + } + + @Override + public String toString() { + return "PluginResponse{" + "name" + name + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PluginResponse that = (PluginResponse) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index c9e75bcbb616f..938bca58c7081 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -93,6 +93,8 @@ public class Environment { private final Path pluginsDir; + private final Path extensionsDir; + private final Path modulesDir; private final Path sharedDataDir; @@ -137,6 +139,7 @@ public Environment(final Settings settings, final Path configPath, final boolean tmpDir = Objects.requireNonNull(tmpPath); pluginsDir = homeFile.resolve("plugins"); + extensionsDir = homeFile.resolve("extensions"); List dataPaths = PATH_DATA_SETTING.get(settings); if (nodeLocalStorage) { @@ -308,6 +311,10 @@ public Path pluginsDir() { return pluginsDir; } + public Path extensionDir() { + return extensionsDir; + } + public Path binDir() { return binDir; } diff --git a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java new file mode 100644 index 0000000000000..e4fa0d74f78f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.plugins.PluginInfo; + +import java.io.IOException; +import java.util.Map; + +/** + * Discover extensions running independently or in a separate process + * + * @opensearch.internal + */ +public class DiscoveryExtensionNode extends DiscoveryNode implements Writeable, ToXContentFragment { + + private final PluginInfo pluginInfo; + + public DiscoveryExtensionNode( + String name, + String id, + String ephemeralId, + String hostName, + String hostAddress, + TransportAddress address, + Map attributes, + Version version, + PluginInfo pluginInfo + ) { + super(name, id, ephemeralId, hostName, hostAddress, address, attributes, DiscoveryNodeRole.BUILT_IN_ROLES, version); + this.pluginInfo = pluginInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + pluginInfo.writeTo(out); + } + + /** + * Construct DiscoveryExtensionNode from a stream. + * + * @param in the stream + * @throws IOException if an I/O exception occurred reading the plugin info from the stream + */ + public DiscoveryExtensionNode(final StreamInput in) throws IOException { + super(in); + this.pluginInfo = new PluginInfo(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionRequest.java b/server/src/main/java/org/opensearch/extensions/ExtensionRequest.java new file mode 100644 index 0000000000000..924fce49a5dc2 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionRequest.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Objects; + +/** + * CLusterService Request for Extensibility + * + * @opensearch.internal + */ +public class ExtensionRequest extends TransportRequest { + private static final Logger logger = LogManager.getLogger(ExtensionRequest.class); + private ExtensionsManager.RequestType requestType; + + public ExtensionRequest(ExtensionsManager.RequestType requestType) { + this.requestType = requestType; + } + + public ExtensionRequest(StreamInput in) throws IOException { + super(in); + this.requestType = in.readEnum(ExtensionsManager.RequestType.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(requestType); + } + + public ExtensionsManager.RequestType getRequestType() { + return this.requestType; + } + + public String toString() { + return "ExtensionRequest{" + "requestType=" + requestType + '}'; + } + + @Override + public boolean equals(Object o) { + + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ExtensionRequest that = (ExtensionRequest) o; + return Objects.equals(requestType, that.requestType); + } + + @Override + public int hashCode() { + return Objects.hash(requestType); + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java new file mode 100644 index 0000000000000..b809f2e35a483 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -0,0 +1,440 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.ClusterSettingsResponse; +import org.opensearch.cluster.LocalNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.FileSystemUtils; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.TransportAddress; + +import org.opensearch.discovery.PluginRequest; +import org.opensearch.discovery.PluginResponse; +import org.opensearch.extensions.ExtensionsSettings.Extension; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.AcknowledgedResponse; +import org.opensearch.index.IndicesModuleRequest; +import org.opensearch.index.IndicesModuleResponse; +import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; + +/** + * The main class for Plugin Extensibility + * + * @opensearch.internal + */ +public class ExtensionsManager { + public static final String REQUEST_EXTENSION_ACTION_NAME = "internal:discovery/extensions"; + public static final String INDICES_EXTENSION_POINT_ACTION_NAME = "indices:internal/extensions"; + public static final String INDICES_EXTENSION_NAME_ACTION_NAME = "indices:internal/name"; + public static final String REQUEST_EXTENSION_CLUSTER_STATE = "internal:discovery/clusterstate"; + public static final String REQUEST_EXTENSION_LOCAL_NODE = "internal:discovery/localnode"; + public static final String REQUEST_EXTENSION_CLUSTER_SETTINGS = "internal:discovery/clustersettings"; + + private static final Logger logger = LogManager.getLogger(ExtensionsManager.class); + + /** + * Enum for Extension Requests + * + * @opensearch.internal + */ + public static enum RequestType { + REQUEST_EXTENSION_CLUSTER_STATE, + REQUEST_EXTENSION_LOCAL_NODE, + REQUEST_EXTENSION_CLUSTER_SETTINGS, + CREATE_COMPONENT, + ON_INDEX_MODULE, + GET_SETTINGS + }; + + private final Path extensionsPath; + private final List uninitializedExtensions; + private List extensions; + private TransportService transportService; + private ClusterService clusterService; + + public ExtensionsManager() { + this.extensionsPath = Path.of(""); + this.uninitializedExtensions = new ArrayList(); + } + + public ExtensionsManager(Settings settings, Path extensionsPath) throws IOException { + logger.info("ExtensionsManager initialized"); + this.extensionsPath = extensionsPath; + this.transportService = null; + this.uninitializedExtensions = new ArrayList(); + this.extensions = new ArrayList(); + this.clusterService = null; + + /* + * Now Discover extensions + */ + discover(); + + } + + public void setTransportService(TransportService transportService) { + this.transportService = transportService; + registerRequestHandler(); + } + + public void setClusterService(ClusterService clusterService) { + this.clusterService = clusterService; + } + + private void registerRequestHandler() { + transportService.registerRequestHandler( + REQUEST_EXTENSION_CLUSTER_STATE, + ThreadPool.Names.GENERIC, + false, + false, + ExtensionRequest::new, + ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) + ); + transportService.registerRequestHandler( + REQUEST_EXTENSION_LOCAL_NODE, + ThreadPool.Names.GENERIC, + false, + false, + ExtensionRequest::new, + ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) + ); + transportService.registerRequestHandler( + REQUEST_EXTENSION_CLUSTER_SETTINGS, + ThreadPool.Names.GENERIC, + false, + false, + ExtensionRequest::new, + ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) + ); + } + + /* + * Load and populate all extensions + */ + private void discover() throws IOException { + logger.info("Extensions Config Directory :" + extensionsPath.toString()); + if (!FileSystemUtils.isAccessibleDirectory(extensionsPath, logger)) { + return; + } + + List extensions = new ArrayList(); + if (Files.exists(extensionsPath.resolve("extensions.yml"))) { + try { + extensions = readFromExtensionsYml(extensionsPath.resolve("extensions.yml")).getExtensions(); + } catch (IOException e) { + throw new IOException("Could not read from extensions.yml", e); + } + for (Extension extension : extensions) { + loadExtension(extension); + } + if (!uninitializedExtensions.isEmpty()) { + logger.info("Loaded all extensions"); + } + } else { + logger.info("Extensions.yml file is not present. No extensions will be loaded."); + } + } + + /** + * Loads a single extension + * @param extension The extension to be loaded + */ + private void loadExtension(Extension extension) throws IOException { + try { + uninitializedExtensions.add( + new DiscoveryExtensionNode( + extension.getName(), + extension.getUniqueId(), + // placeholder for ephemeral id, will change with POC discovery + extension.getUniqueId(), + extension.getHostName(), + extension.getHostAddress(), + new TransportAddress(InetAddress.getByName(extension.getHostAddress()), Integer.parseInt(extension.getPort())), + new HashMap(), + Version.fromString(extension.getOpensearchVersion()), + new PluginInfo( + extension.getName(), + extension.getDescription(), + extension.getVersion(), + Version.fromString(extension.getOpensearchVersion()), + extension.getJavaVersion(), + extension.getClassName(), + new ArrayList(), + Boolean.parseBoolean(extension.hasNativeController()) + ) + ) + ); + logger.info("Loaded extension: " + extension); + } catch (IllegalArgumentException e) { + throw e; + } + } + + public void initialize() { + for (DiscoveryNode extensionNode : uninitializedExtensions) { + initializeExtension(extensionNode); + } + } + + private void initializeExtension(DiscoveryNode extensionNode) { + + final TransportResponseHandler pluginResponseHandler = new TransportResponseHandler() { + + @Override + public PluginResponse read(StreamInput in) throws IOException { + return new PluginResponse(in); + } + + @Override + public void handleResponse(PluginResponse response) { + for (DiscoveryExtensionNode extension : uninitializedExtensions) { + if (extension.getName().equals(response.getName())) { + extensions.add(extension); + break; + } + } + } + + @Override + public void handleException(TransportException exp) { + logger.error(new ParameterizedMessage("Plugin request failed"), exp); + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + }; + try { + transportService.connectToExtensionNode(extensionNode); + transportService.sendRequest( + extensionNode, + REQUEST_EXTENSION_ACTION_NAME, + new PluginRequest(transportService.getLocalNode(), new ArrayList(uninitializedExtensions)), + pluginResponseHandler + ); + } catch (Exception e) { + throw e; + } + } + + TransportResponse handleExtensionRequest(ExtensionRequest extensionRequest) throws Exception { + // Read enum + if (extensionRequest.getRequestType() == RequestType.REQUEST_EXTENSION_CLUSTER_STATE) { + ClusterStateResponse clusterStateResponse = new ClusterStateResponse( + clusterService.getClusterName(), + clusterService.state(), + false + ); + return clusterStateResponse; + } else if (extensionRequest.getRequestType() == RequestType.REQUEST_EXTENSION_LOCAL_NODE) { + LocalNodeResponse localNodeResponse = new LocalNodeResponse(clusterService); + return localNodeResponse; + } else if (extensionRequest.getRequestType() == RequestType.REQUEST_EXTENSION_CLUSTER_SETTINGS) { + ClusterSettingsResponse clusterSettingsResponse = new ClusterSettingsResponse(clusterService); + return clusterSettingsResponse; + } + throw new IllegalStateException("Handler not present for the provided request: " + extensionRequest.getRequestType()); + } + + public void onIndexModule(IndexModule indexModule) throws UnknownHostException { + for (DiscoveryNode extensionNode : uninitializedExtensions) { + onIndexModule(indexModule, extensionNode); + } + } + + private void onIndexModule(IndexModule indexModule, DiscoveryNode extensionNode) throws UnknownHostException { + logger.info("onIndexModule index:" + indexModule.getIndex()); + final CompletableFuture inProgressFuture = new CompletableFuture<>(); + final CompletableFuture inProgressIndexNameFuture = new CompletableFuture<>(); + final TransportResponseHandler acknowledgedResponseHandler = new TransportResponseHandler< + AcknowledgedResponse>() { + @Override + public void handleResponse(AcknowledgedResponse response) { + logger.info("ACK Response" + response); + inProgressIndexNameFuture.complete(response); + } + + @Override + public void handleException(TransportException exp) { + + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + public AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + }; + + final TransportResponseHandler indicesModuleResponseHandler = new TransportResponseHandler< + IndicesModuleResponse>() { + + @Override + public IndicesModuleResponse read(StreamInput in) throws IOException { + return new IndicesModuleResponse(in); + } + + @Override + public void handleResponse(IndicesModuleResponse response) { + logger.info("received {}", response); + if (response.getIndexEventListener() == true) { + indexModule.addIndexEventListener(new IndexEventListener() { + @Override + public void beforeIndexRemoved( + IndexService indexService, + IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason + ) { + logger.info("Index Event Listener is called"); + String indexName = indexService.index().getName(); + logger.info("Index Name" + indexName.toString()); + try { + logger.info("Sending request of index name to extension"); + transportService.sendRequest( + extensionNode, + INDICES_EXTENSION_NAME_ACTION_NAME, + new IndicesModuleRequest(indexModule), + acknowledgedResponseHandler + ); + /* + * Making async synchronous for now. + */ + inProgressIndexNameFuture.get(100, TimeUnit.SECONDS); + logger.info("Received ack response from Extension"); + } catch (Exception e) { + logger.error(e.toString()); + } + } + }); + } + inProgressFuture.complete(response); + } + + @Override + public void handleException(TransportException exp) { + logger.error(new ParameterizedMessage("IndicesModuleRequest failed"), exp); + inProgressFuture.completeExceptionally(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + }; + + try { + logger.info("Sending request to extension"); + transportService.sendRequest( + extensionNode, + INDICES_EXTENSION_POINT_ACTION_NAME, + new IndicesModuleRequest(indexModule), + indicesModuleResponseHandler + ); + /* + * Making async synchronous for now. + */ + inProgressFuture.get(100, TimeUnit.SECONDS); + logger.info("Received response from Extension"); + } catch (Exception e) { + logger.error(e.toString()); + } + } + + private ExtensionsSettings readFromExtensionsYml(Path filePath) throws IOException { + ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()); + InputStream input = Files.newInputStream(filePath); + ExtensionsSettings extensionSettings = objectMapper.readValue(input, ExtensionsSettings.class); + return extensionSettings; + } + + public static String getRequestExtensionActionName() { + return REQUEST_EXTENSION_ACTION_NAME; + } + + public static String getIndicesExtensionPointActionName() { + return INDICES_EXTENSION_POINT_ACTION_NAME; + } + + public static String getIndicesExtensionNameActionName() { + return INDICES_EXTENSION_NAME_ACTION_NAME; + } + + public static String getRequestExtensionClusterState() { + return REQUEST_EXTENSION_CLUSTER_STATE; + } + + public static String getRequestExtensionLocalNode() { + return REQUEST_EXTENSION_LOCAL_NODE; + } + + public static String getRequestExtensionClusterSettings() { + return REQUEST_EXTENSION_CLUSTER_SETTINGS; + } + + public static Logger getLogger() { + return logger; + } + + public Path getExtensionsPath() { + return extensionsPath; + } + + public List getUninitializedExtensions() { + return uninitializedExtensions; + } + + public List getExtensions() { + return extensions; + } + + public TransportService getTransportService() { + return transportService; + } + + public ClusterService getClusterService() { + return clusterService; + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java b/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java new file mode 100644 index 0000000000000..8b6226e578ea3 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java @@ -0,0 +1,202 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import java.util.ArrayList; +import java.util.List; + +/** + * List of extension configurations from extension.yml + * + * @opensearch.internal + */ +public class ExtensionsSettings { + + private List extensions; + + public ExtensionsSettings() { + extensions = new ArrayList(); + } + + /** + * Extension configuration used for extension discovery + * + * @opensearch.internal + */ + public static class Extension { + + private String name; + private String uniqueId; + private String hostName; + private String hostAddress; + private String port; + private String version; + private String description; + private String opensearchVersion; + private String jvmVersion; + private String className; + private String customFolderName; + private String hasNativeController; + + public Extension() { + name = ""; + uniqueId = ""; + hostName = ""; + hostAddress = ""; + port = ""; + version = ""; + description = ""; + opensearchVersion = ""; + jvmVersion = ""; + className = ""; + customFolderName = ""; + hasNativeController = "false"; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getUniqueId() { + return uniqueId; + } + + public void setUniqueId(String uniqueId) { + this.uniqueId = uniqueId; + } + + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public String getHostAddress() { + return hostAddress; + } + + public void setHostAddress(String hostAddress) { + this.hostAddress = hostAddress; + } + + public String getPort() { + return port; + } + + public void setPort(String port) { + this.port = port; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + @Override + public String toString() { + return "Extension [className=" + + className + + ", customFolderName=" + + customFolderName + + ", description=" + + description + + ", hasNativeController=" + + hasNativeController + + ", hostAddress=" + + hostAddress + + ", hostName=" + + hostName + + ", jvmVersion=" + + jvmVersion + + ", name=" + + name + + ", opensearchVersion=" + + opensearchVersion + + ", port=" + + port + + ", uniqueId=" + + uniqueId + + ", version=" + + version + + "]"; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getOpensearchVersion() { + return opensearchVersion; + } + + public void setOpensearchVersion(String opensearchVersion) { + this.opensearchVersion = opensearchVersion; + } + + public String getJavaVersion() { + return jvmVersion; + } + + public void setJavaVersion(String jvmVersion) { + this.jvmVersion = jvmVersion; + } + + public String getClassName() { + return className; + } + + public void setClassName(String className) { + this.className = className; + } + + public String getCustomFolderName() { + return customFolderName; + } + + public void setCustomFolderName(String customFolderName) { + this.customFolderName = customFolderName; + } + + public String hasNativeController() { + return hasNativeController; + } + + public void setHasNativeController(String hasNativeController) { + this.hasNativeController = hasNativeController; + } + + } + + public List getExtensions() { + return extensions; + } + + public void setExtensions(List extensions) { + this.extensions = extensions; + } + + @Override + public String toString() { + return "ExtensionsSettings [extensions=" + extensions + "]"; + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java new file mode 100644 index 0000000000000..24f71476dcb1e --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +/** + * Noop class for ExtensionsManager + * + * @opensearch.internal + */ +public class NoopExtensionsManager extends ExtensionsManager { + + public NoopExtensionsManager() { + super(); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/package-info.java b/server/src/main/java/org/opensearch/extensions/package-info.java new file mode 100644 index 0000000000000..c6efd42499240 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Main OpenSearch extensions package. OpenSearch extensions provide extensibility to OpenSearch.*/ +package org.opensearch.extensions; diff --git a/server/src/main/java/org/opensearch/index/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/index/AcknowledgedResponse.java new file mode 100644 index 0000000000000..5993a81158d30 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/AcknowledgedResponse.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +/** + * Response for index name of onIndexModule extension point + * + * @opensearch.internal + */ +public class AcknowledgedResponse extends TransportResponse { + private boolean requestAck; + + public AcknowledgedResponse(StreamInput in) throws IOException { + this.requestAck = in.readBoolean(); + } + + public AcknowledgedResponse(Boolean requestAck) { + this.requestAck = requestAck; + } + + public void AcknowledgedResponse(StreamInput in) throws IOException { + this.requestAck = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(requestAck); + } + +} diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java b/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java new file mode 100644 index 0000000000000..0e0fe87df76cd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request for onIndexModule extension point + * + * @opensearch.internal + */ +public class IndicesModuleRequest extends TransportRequest { + private final Index index; + private final Settings indexSettings; + + public IndicesModuleRequest(IndexModule indexModule) { + this.index = indexModule.getIndex(); + this.indexSettings = indexModule.getSettings(); + } + + public IndicesModuleRequest(StreamInput in) throws IOException { + super(in); + this.index = new Index(in); + this.indexSettings = Settings.readSettingsFromStream(in); + } + + public IndicesModuleRequest(Index index, Settings settings) { + this.index = index; + this.indexSettings = settings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + index.writeTo(out); + Settings.writeSettingsToStream(indexSettings, out); + } + + @Override + public String toString() { + return "IndicesModuleRequest{" + "index=" + index + ", indexSettings=" + indexSettings + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndicesModuleRequest that = (IndicesModuleRequest) o; + return Objects.equals(index, that.index) && Objects.equals(indexSettings, that.indexSettings); + } + + @Override + public int hashCode() { + return Objects.hash(index, indexSettings); + } +} diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java new file mode 100644 index 0000000000000..7b41f629e48ed --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response for onIndexModule extension point + * + * @opensearch.internal + */ +public class IndicesModuleResponse extends TransportResponse { + private boolean supportsIndexEventListener; + private boolean addIndexOperationListener; + private boolean addSearchOperationListener; + + public IndicesModuleResponse( + boolean supportsIndexEventListener, + boolean addIndexOperationListener, + boolean addSearchOperationListener + ) { + this.supportsIndexEventListener = supportsIndexEventListener; + this.addIndexOperationListener = addIndexOperationListener; + this.addSearchOperationListener = addSearchOperationListener; + } + + public IndicesModuleResponse(StreamInput in) throws IOException { + this.supportsIndexEventListener = in.readBoolean(); + this.addIndexOperationListener = in.readBoolean(); + this.addSearchOperationListener = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(supportsIndexEventListener); + out.writeBoolean(addIndexOperationListener); + out.writeBoolean(addSearchOperationListener); + } + + public boolean getIndexEventListener() { + return this.supportsIndexEventListener; + } + + public boolean getIndexOperationListener() { + return this.addIndexOperationListener; + } + + public boolean getSearchOperationListener() { + return this.addSearchOperationListener; + } + + @Override + public String toString() { + return "IndicesModuleResponse{" + + "supportsIndexEventListener" + + supportsIndexEventListener + + " addIndexOperationListener" + + addIndexOperationListener + + " addSearchOperationListener" + + addSearchOperationListener + + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndicesModuleResponse that = (IndicesModuleResponse) o; + return Objects.equals(supportsIndexEventListener, that.supportsIndexEventListener) + && Objects.equals(addIndexOperationListener, that.addIndexOperationListener) + && Objects.equals(addSearchOperationListener, that.addSearchOperationListener); + } + + @Override + public int hashCode() { + return Objects.hash(supportsIndexEventListener, addIndexOperationListener, addSearchOperationListener); + } +} diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 0b85ba0d2ccd8..42069ac165b25 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -38,7 +38,10 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.Nullable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; @@ -368,6 +371,18 @@ protected BytesRef indexedValueForSearch(Object value) { } return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitve, + QueryShardContext context + ) { + // keyword field types are always normalized, so ignore case sensitivity and force normalize the wildcard + // query text + return super.wildcardQuery(value, method, caseInsensitve, true, context); + } } private final boolean indexed; diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index ead901a25e6fd..0804ad1a524a9 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -281,7 +281,7 @@ public Query prefixQuery( ) { throw new QueryShardException( context, - "Can only use prefix queries on keyword, text and wildcard fields - not on [" + name + "] which is of type [" + typeName() + "]" + "Can only use prefix queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } @@ -290,6 +290,7 @@ public final Query wildcardQuery(String value, @Nullable MultiTermQuery.RewriteM return wildcardQuery(value, method, false, context); } + /** optionally normalize the wildcard pattern based on the value of {@code caseInsensitive} */ public Query wildcardQuery( String value, @Nullable MultiTermQuery.RewriteMethod method, @@ -298,11 +299,15 @@ public Query wildcardQuery( ) { throw new QueryShardException( context, - "Can only use wildcard queries on keyword, text and wildcard fields - not on [" - + name - + "] which is of type [" - + typeName() - + "]" + "Can only use wildcard queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" + ); + } + + /** always normalizes the wildcard pattern to lowercase */ + public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { + throw new QueryShardException( + context, + "Can only use wildcard queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } diff --git a/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java b/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java index fa9c02c3cf14e..fbfca44c3062a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java @@ -152,8 +152,34 @@ public static final String normalizeWildcardPattern(String fieldname, String val return sb.toBytesRef().utf8ToString(); } + /** optionally normalize the wildcard pattern based on the value of {@code caseInsensitive} */ @Override public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) { + return wildcardQuery(value, method, caseInsensitive, false, context); + } + + /** always normalizes the wildcard pattern to lowercase */ + @Override + public Query normalizedWildcardQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + return wildcardQuery(value, method, false, true, context); + } + + /** + * return a wildcard query + * + * @param value the pattern + * @param method rewrite method + * @param caseInsensitive should ignore case; note, only used if there is no analyzer, else we use the analyzer rules + * @param normalizeIfAnalyzed force normalize casing if an analyzer is used + * @param context the query shard context + */ + public Query wildcardQuery( + String value, + MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + boolean normalizeIfAnalyzed, + QueryShardContext context + ) { failIfNotIndexed(); if (context.allowExpensiveQueries() == false) { throw new OpenSearchException( @@ -162,7 +188,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo } Term term; - if (getTextSearchInfo().getSearchAnalyzer() != null) { + if (getTextSearchInfo().getSearchAnalyzer() != null && normalizeIfAnalyzed) { value = normalizeWildcardPattern(name(), value, getTextSearchInfo().getSearchAnalyzer()); term = new Term(name(), value); } else { diff --git a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java index 6d59e861eb32f..9a121fe55a7e7 100644 --- a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java @@ -729,7 +729,8 @@ private Query getWildcardQuerySingle(String field, String termStr) throws ParseE if (getAllowLeadingWildcard() == false && (termStr.startsWith("*") || termStr.startsWith("?"))) { throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery"); } - return currentFieldType.wildcardQuery(termStr, getMultiTermRewriteMethod(), context); + // query string query is always normalized + return currentFieldType.normalizedWildcardQuery(termStr, getMultiTermRewriteMethod(), context); } catch (RuntimeException e) { if (lenient) { return newLenientFieldQuery(field, e); diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 701dec069d946..a40048e7b9781 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -622,7 +622,9 @@ public synchronized void renewPeerRecoveryRetentionLeases() { * If this shard copy is tracked then we got here here via a rolling upgrade from an older version that doesn't * create peer recovery retention leases for every shard copy. */ - assert checkpoints.get(shardRouting.allocationId().getId()).tracked == false + assert (checkpoints.get(shardRouting.allocationId().getId()).tracked + && checkpoints.get(shardRouting.allocationId().getId()).replicated == false) + || checkpoints.get(shardRouting.allocationId().getId()).tracked == false || hasAllPeerRecoveryRetentionLeases == false; return false; } @@ -680,20 +682,29 @@ public static class CheckpointState implements Writeable { */ long globalCheckpoint; /** - * whether this shard is treated as in-sync and thus contributes to the global checkpoint calculation + * When a shard is in-sync, it is capable of being promoted as the primary during a failover. An in-sync shard + * contributes to global checkpoint calculation on the primary iff {@link CheckpointState#replicated} is true. */ boolean inSync; /** - * whether this shard is tracked in the replication group, i.e., should receive document updates from the primary. + * whether this shard is tracked in the replication group and has localTranslog, i.e., should receive document updates + * from the primary. Tracked shards with localTranslog would have corresponding retention leases on the primary shard's + * {@link ReplicationTracker}. */ boolean tracked; - public CheckpointState(long localCheckpoint, long globalCheckpoint, boolean inSync, boolean tracked) { + /** + * Whether the replication requests to the primary are replicated to the concerned shard or not. + */ + boolean replicated; + + public CheckpointState(long localCheckpoint, long globalCheckpoint, boolean inSync, boolean tracked, boolean replicated) { this.localCheckpoint = localCheckpoint; this.globalCheckpoint = globalCheckpoint; this.inSync = inSync; this.tracked = tracked; + this.replicated = replicated; } public CheckpointState(StreamInput in) throws IOException { @@ -701,6 +712,11 @@ public CheckpointState(StreamInput in) throws IOException { this.globalCheckpoint = in.readZLong(); this.inSync = in.readBoolean(); this.tracked = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + this.replicated = in.readBoolean(); + } else { + this.replicated = true; + } } @Override @@ -709,13 +725,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(globalCheckpoint); out.writeBoolean(inSync); out.writeBoolean(tracked); + out.writeBoolean(replicated); } /** * Returns a full copy of this object */ public CheckpointState copy() { - return new CheckpointState(localCheckpoint, globalCheckpoint, inSync, tracked); + return new CheckpointState(localCheckpoint, globalCheckpoint, inSync, tracked, replicated); } public long getLocalCheckpoint() { @@ -737,6 +754,8 @@ public String toString() { + inSync + ", tracked=" + tracked + + ", replicated=" + + replicated + '}'; } @@ -750,7 +769,8 @@ public boolean equals(Object o) { if (localCheckpoint != that.localCheckpoint) return false; if (globalCheckpoint != that.globalCheckpoint) return false; if (inSync != that.inSync) return false; - return tracked == that.tracked; + if (tracked != that.tracked) return false; + return replicated == that.replicated; } @Override @@ -759,6 +779,7 @@ public int hashCode() { result = 31 * result + Long.hashCode(globalCheckpoint); result = 31 * result + Boolean.hashCode(inSync); result = 31 * result + Boolean.hashCode(tracked); + result = 31 * result + Boolean.hashCode(replicated); return result; } } @@ -774,7 +795,7 @@ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { final ObjectLongMap globalCheckpoints = new ObjectLongHashMap<>(checkpoints.size()); // upper bound on the size checkpoints.entrySet() .stream() - .filter(e -> e.getValue().inSync) + .filter(e -> e.getValue().inSync && e.getValue().replicated) .forEach(e -> globalCheckpoints.put(e.getKey(), e.getValue().globalCheckpoint)); return globalCheckpoints; } @@ -833,6 +854,9 @@ private boolean invariant() { // the current shard is marked as in-sync when the global checkpoint tracker operates in primary mode assert !primaryMode || checkpoints.get(shardAllocationId).inSync; + // the current shard is marked as tracked when the global checkpoint tracker operates in primary mode + assert !primaryMode || checkpoints.get(shardAllocationId).tracked; + // the routing table and replication group is set when the global checkpoint tracker operates in primary mode assert !primaryMode || (routingTable != null && replicationGroup != null) : "primary mode but routing table is " + routingTable @@ -902,7 +926,8 @@ private boolean invariant() { if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { - if (checkpoints.get(shardRouting.allocationId().getId()).tracked && !indexSettings().isRemoteTranslogStoreEnabled()) { + final CheckpointState cps = checkpoints.get(shardRouting.allocationId().getId()); + if (cps.tracked && cps.replicated) { assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( @@ -926,7 +951,11 @@ private static long inSyncCheckpointStates( Function reducer ) { final OptionalLong value = reducer.apply( - checkpoints.values().stream().filter(cps -> cps.inSync).mapToLong(function).filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO) + checkpoints.values() + .stream() + .filter(cps -> cps.inSync && cps.replicated) + .mapToLong(function) + .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO) ); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } @@ -1028,6 +1057,11 @@ private ReplicationGroup calculateReplicationGroup() { } else { newVersion = replicationGroup.getVersion() + 1; } + + assert indexSettings().isRemoteTranslogStoreEnabled() + || checkpoints.entrySet().stream().filter(e -> e.getValue().tracked).allMatch(e -> e.getValue().replicated) + : "In absence of remote translog store, all tracked shards must have replication mode as LOGICAL_REPLICATION"; + return new ReplicationGroup( routingTable, checkpoints.entrySet().stream().filter(e -> e.getValue().inSync).map(Map.Entry::getKey).collect(Collectors.toSet()), @@ -1122,10 +1156,11 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { } /** - * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the - * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created - * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. + * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole + * shard copy with local translog in the replication group. If one does not already exist and yet there are other + * shard copies in this group then we must have just done a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, + * in which case the missing leases should be created asynchronously by the caller using + * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { assert primaryMode; @@ -1134,7 +1169,8 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final ShardRouting primaryShard = routingTable.primaryShard(); final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { - if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard))) { + if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) + || indexSettings().isRemoteTranslogStoreEnabled()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; @@ -1197,6 +1233,12 @@ public synchronized void updateFromClusterManager( boolean removedEntries = checkpoints.keySet() .removeIf(aid -> !inSyncAllocationIds.contains(aid) && !initializingAllocationIds.contains(aid)); + final ShardRouting primary = routingTable.primaryShard(); + final String primaryAllocationId = primary.allocationId().getId(); + final String primaryTargetAllocationId = primary.relocating() + ? primary.getTargetRelocatingShard().allocationId().getId() + : null; + if (primaryMode) { // add new initializingIds that are missing locally. These are fresh shard copies - and not in-sync for (String initializingId : initializingAllocationIds) { @@ -1207,7 +1249,16 @@ public synchronized void updateFromClusterManager( + " as in-sync but it does not exist locally"; final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; - checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync, inSync)); + checkpoints.put( + initializingId, + new CheckpointState( + localCheckpoint, + globalCheckpoint, + inSync, + inSync, + isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + ) + ); } } if (removedEntries) { @@ -1217,12 +1268,30 @@ public synchronized void updateFromClusterManager( for (String initializingId : initializingAllocationIds) { final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; - checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); + checkpoints.put( + initializingId, + new CheckpointState( + localCheckpoint, + globalCheckpoint, + false, + false, + isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + ) + ); } for (String inSyncId : inSyncAllocationIds) { final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; - checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); + checkpoints.put( + inSyncId, + new CheckpointState( + localCheckpoint, + globalCheckpoint, + true, + true, + isReplicated(inSyncId, primaryAllocationId, primaryTargetAllocationId) + ) + ); } } appliedClusterStateVersion = applyingClusterStateVersion; @@ -1237,6 +1306,26 @@ public synchronized void updateFromClusterManager( assert invariant(); } + /** + * Returns whether the requests are replicated considering the remote translog existence, current/primary/primary target allocation ids. + * + * @param allocationId given allocation id + * @param primaryAllocationId primary allocation id + * @param primaryTargetAllocationId primary target allocation id + * @return the replication mode. + */ + private boolean isReplicated(String allocationId, String primaryAllocationId, String primaryTargetAllocationId) { + // If remote translog is enabled, then returns replication mode checking current allocation id against the + // primary and primary target allocation id. + // If remote translog is enabled, then returns true if given allocation id matches the primary or it's relocation target allocation + // id. + if (indexSettings().isRemoteTranslogStoreEnabled()) { + return (allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId)); + } + // For other case which is local translog, return true as the requests are replicated to all shards in the replication group. + return true; + } + /** * Notifies the tracker of the current allocation IDs in the cluster state. * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the cluster-manager @@ -1298,13 +1387,14 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin updateLocalCheckpoint(allocationId, cps, localCheckpoint); // if it was already in-sync (because of a previously failed recovery attempt), global checkpoint must have been // stuck from advancing - assert !cps.inSync || (cps.localCheckpoint >= getGlobalCheckpoint()) : "shard copy " + assert !cps.inSync || cps.localCheckpoint >= getGlobalCheckpoint() || cps.replicated == false : "shard copy " + allocationId + " that's already in-sync should have a local checkpoint " + cps.localCheckpoint + " that's above the global checkpoint " - + getGlobalCheckpoint(); - if (cps.localCheckpoint < getGlobalCheckpoint()) { + + getGlobalCheckpoint() + + " or it's not replicated"; + if (cps.replicated && cps.localCheckpoint < getGlobalCheckpoint()) { pendingInSync.add(allocationId); try { while (true) { @@ -1375,7 +1465,7 @@ public synchronized void updateLocalCheckpoint(final String allocationId, final logger.trace("marked [{}] as in-sync", allocationId); notifyAllWaiters(); } - if (increasedLocalCheckpoint && pending == false) { + if (cps.replicated && increasedLocalCheckpoint && pending == false) { updateGlobalCheckpointOnPrimary(); } assert invariant(); @@ -1395,7 +1485,7 @@ private static long computeGlobalCheckpoint( return fallback; } for (final CheckpointState cps : localCheckpoints) { - if (cps.inSync) { + if (cps.inSync && cps.replicated) { if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b2f48ccdd389c..204bf4204511e 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -82,6 +82,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.iterable.Iterables; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -142,6 +143,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.node.Node; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; @@ -227,6 +229,7 @@ public class IndicesService extends AbstractLifecycleComponent */ private final Settings settings; private final PluginsService pluginsService; + private final ExtensionsManager extensionsManager; private final NodeEnvironment nodeEnv; private final NamedXContentRegistry xContentRegistry; private final TimeValue shardsClosedTimeout; @@ -299,6 +302,120 @@ public IndicesService( this.settings = settings; this.threadPool = threadPool; this.pluginsService = pluginsService; + this.extensionsManager = null; + this.nodeEnv = nodeEnv; + this.xContentRegistry = xContentRegistry; + this.valuesSourceRegistry = valuesSourceRegistry; + this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); + this.analysisRegistry = analysisRegistry; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.indicesRequestCache = new IndicesRequestCache(settings); + this.indicesQueryCache = new IndicesQueryCache(settings); + this.mapperRegistry = mapperRegistry; + this.namedWriteableRegistry = namedWriteableRegistry; + indexingMemoryController = new IndexingMemoryController( + settings, + threadPool, + // ensure we pull an iter with new shards - flatten makes a copy + () -> Iterables.flatten(this).iterator() + ); + this.indexScopedSettings = indexScopedSettings; + this.circuitBreakerService = circuitBreakerService; + this.bigArrays = bigArrays; + this.scriptService = scriptService; + this.clusterService = clusterService; + this.client = client; + this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled); + this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { + @Override + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { + assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or " + + "equal to 0 and not [" + + sizeInBytes + + "]"; + circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes); + } + }); + this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); + this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); + this.metaStateService = metaStateService; + this.engineFactoryProviders = engineFactoryProviders; + + this.directoryFactories = directoryFactories; + this.recoveryStateFactories = recoveryStateFactories; + // doClose() is called when shutting down a node, yet there might still be ongoing requests + // that we need to wait for before closing some resources such as the caches. In order to + // avoid closing these resources while ongoing requests are still being processed, we use a + // ref count which will only close them when both this service and all index services are + // actually closed + indicesRefCount = new AbstractRefCounted("indices") { + @Override + protected void closeInternal() { + try { + IOUtils.close( + analysisRegistry, + indexingMemoryController, + indicesFieldDataCache, + cacheCleaner, + indicesRequestCache, + indicesQueryCache + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + closeLatch.countDown(); + } + } + }; + + final String nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); + nodeWriteDanglingIndicesInfo = WRITE_DANGLING_INDICES_INFO_SETTING.get(settings); + danglingIndicesThreadPoolExecutor = nodeWriteDanglingIndicesInfo + ? OpenSearchExecutors.newScaling( + nodeName + "/" + DANGLING_INDICES_UPDATE_THREAD_NAME, + 1, + 1, + 0, + TimeUnit.MILLISECONDS, + daemonThreadFactory(nodeName, DANGLING_INDICES_UPDATE_THREAD_NAME), + threadPool.getThreadContext() + ) + : null; + + this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); + this.remoteDirectoryFactory = remoteDirectoryFactory; + } + + public IndicesService( + Settings settings, + PluginsService pluginsService, + ExtensionsManager extensionsManager, + NodeEnvironment nodeEnv, + NamedXContentRegistry xContentRegistry, + AnalysisRegistry analysisRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + MapperRegistry mapperRegistry, + NamedWriteableRegistry namedWriteableRegistry, + ThreadPool threadPool, + IndexScopedSettings indexScopedSettings, + CircuitBreakerService circuitBreakerService, + BigArrays bigArrays, + ScriptService scriptService, + ClusterService clusterService, + Client client, + MetaStateService metaStateService, + Collection>> engineFactoryProviders, + Map directoryFactories, + ValuesSourceRegistry valuesSourceRegistry, + Map recoveryStateFactories, + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory + ) { + this.settings = settings; + this.threadPool = threadPool; + this.pluginsService = pluginsService; + this.extensionsManager = extensionsManager; this.nodeEnv = nodeEnv; this.xContentRegistry = xContentRegistry; this.valuesSourceRegistry = valuesSourceRegistry; @@ -721,6 +838,9 @@ private synchronized IndexService createIndexService( indexModule.addIndexOperationListener(operationListener); } pluginsService.onIndexModule(indexModule); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + extensionsManager.onIndexModule(indexModule); + } for (IndexEventListener listener : builtInListeners) { indexModule.addIndexEventListener(listener); } diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index cc51082639cdb..d2fc354cf9298 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -40,6 +40,8 @@ import java.io.IOException; import java.util.Objects; +import org.opensearch.action.support.replication.ReplicationMode; + /** * Replication action responsible for publishing checkpoint to a replica shard. * @@ -93,6 +95,14 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList assert false : "use PublishCheckpointAction#publish"; } + @Override + protected ReplicationMode getReplicationMode(IndexShard indexShard) { + if (indexShard.isRemoteTranslogEnabled()) { + return ReplicationMode.FULL_REPLICATION; + } + return super.getReplicationMode(indexShard); + } + /** * Publish checkpoint request to shard */ diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 93de057285012..f204723709965 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -43,6 +43,8 @@ import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.NoopExtensionsManager; import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.tasks.TaskResourceTrackingService; @@ -340,6 +342,7 @@ public static class DiscoverySettings { private final Environment environment; private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; + private final ExtensionsManager extensionsManager; private final NodeClient client; private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; @@ -424,6 +427,13 @@ protected Node( initialEnvironment.pluginsDir(), classpathPlugins ); + + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + this.extensionsManager = new ExtensionsManager(tmpSettings, initialEnvironment.extensionDir()); + } else { + this.extensionsManager = new NoopExtensionsManager(); + } + final Settings settings = pluginsService.updatedSettings(); final Set additionalRoles = pluginsService.filterPlugins(Plugin.class) @@ -652,29 +662,58 @@ protected Node( repositoriesServiceReference::get ); - final IndicesService indicesService = new IndicesService( - settings, - pluginsService, - nodeEnvironment, - xContentRegistry, - analysisModule.getAnalysisRegistry(), - clusterModule.getIndexNameExpressionResolver(), - indicesModule.getMapperRegistry(), - namedWriteableRegistry, - threadPool, - settingsModule.getIndexScopedSettings(), - circuitBreakerService, - bigArrays, - scriptService, - clusterService, - client, - metaStateService, - engineFactoryProviders, - Map.copyOf(directoryFactories), - searchModule.getValuesSourceRegistry(), - recoveryStateFactories, - remoteDirectoryFactory - ); + final IndicesService indicesService; + + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + indicesService = new IndicesService( + settings, + pluginsService, + extensionsManager, + nodeEnvironment, + xContentRegistry, + analysisModule.getAnalysisRegistry(), + clusterModule.getIndexNameExpressionResolver(), + indicesModule.getMapperRegistry(), + namedWriteableRegistry, + threadPool, + settingsModule.getIndexScopedSettings(), + circuitBreakerService, + bigArrays, + scriptService, + clusterService, + client, + metaStateService, + engineFactoryProviders, + Map.copyOf(directoryFactories), + searchModule.getValuesSourceRegistry(), + recoveryStateFactories, + remoteDirectoryFactory + ); + } else { + indicesService = new IndicesService( + settings, + pluginsService, + nodeEnvironment, + xContentRegistry, + analysisModule.getAnalysisRegistry(), + clusterModule.getIndexNameExpressionResolver(), + indicesModule.getMapperRegistry(), + namedWriteableRegistry, + threadPool, + settingsModule.getIndexScopedSettings(), + circuitBreakerService, + bigArrays, + scriptService, + clusterService, + client, + metaStateService, + engineFactoryProviders, + Map.copyOf(directoryFactories), + searchModule.getValuesSourceRegistry(), + recoveryStateFactories, + remoteDirectoryFactory + ); + } final AliasValidator aliasValidator = new AliasValidator(); @@ -787,6 +826,10 @@ protected Node( settingsModule.getClusterSettings(), taskHeaders ); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + this.extensionsManager.setTransportService(transportService); + this.extensionsManager.setClusterService(clusterService); + } final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( @@ -1200,6 +1243,9 @@ public Node start() throws NodeValidationException { assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + extensionsManager.initialize(); + } discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index bff880e5a41d7..c336bf156f40c 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -305,6 +305,7 @@ public Collection> getGuiceServiceClasses() } public void onIndexModule(IndexModule indexModule) { + logger.info("PluginService:onIndexModule index:" + indexModule.getIndex()); for (Tuple plugin : plugins) { plugin.v2().onIndexModule(indexModule); } diff --git a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java index 66d3aeb748cf7..accc02624f71c 100644 --- a/server/src/main/java/org/opensearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/opensearch/tasks/TaskResultsService.java @@ -86,7 +86,7 @@ public class TaskResultsService { public static final String TASK_RESULT_MAPPING_VERSION_META_FIELD = "version"; - public static final int TASK_RESULT_MAPPING_VERSION = 3; // must match version in task-index-mapping.json + public static final int TASK_RESULT_MAPPING_VERSION = 4; // must match version in task-index-mapping.json /** * The backoff policy to use when saving a task result fails. The total wait diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index b9bf035a7fa77..1d94c5600818f 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -397,6 +397,11 @@ public void connectToNode(DiscoveryNode node) throws ConnectTransportException { connectToNode(node, (ConnectionProfile) null); } + // We are skipping node validation for extensibility as extensionNode and opensearchNode(LocalNode) will have different ephemeral id's + public void connectToExtensionNode(final DiscoveryNode node) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, (ConnectionProfile) null, ActionListener.map(fut, x -> null))); + } + /** * Connect to the specified node with the given connection profile * @@ -407,6 +412,10 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection PlainActionFuture.get(fut -> connectToNode(node, connectionProfile, ActionListener.map(fut, x -> null))); } + public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, connectionProfile, ActionListener.map(fut, x -> null))); + } + /** * Connect to the specified node with the given connection profile. * The ActionListener will be called on the calling thread or the generic thread pool. @@ -418,6 +427,10 @@ public void connectToNode(DiscoveryNode node, ActionListener listener) thr connectToNode(node, null, listener); } + public void connectToExtensionNode(DiscoveryNode node, ActionListener listener) throws ConnectTransportException { + connectToExtensionNode(node, null, listener); + } + /** * Connect to the specified node with the given connection profile. * The ActionListener will be called on the calling thread or the generic thread pool. @@ -434,14 +447,35 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection connectionManager.connectToNode(node, connectionProfile, connectionValidator(node), listener); } + public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + if (isLocalNode(node)) { + listener.onResponse(null); + return; + } + connectionManager.connectToNode(node, connectionProfile, extensionConnectionValidator(node), listener); + } + public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { final DiscoveryNode remote = resp.discoveryNode; + if (node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } + + return null; + })); + }; + } + + public ConnectionManager.ConnectionValidator extensionConnectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile, listener) -> { + // We don't validate cluster names to allow for CCS connections. + handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { + final DiscoveryNode remote = resp.discoveryNode; + logger.info("Connection validation was skipped"); return null; })); }; @@ -731,6 +765,7 @@ public final void sendRequest( final TransportResponseHandler handler ) { try { + logger.info("Action: " + action); final TransportResponseHandler delegate; if (request.getParentTask().isSet()) { // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index 3671782b9d12f..256a0df187723 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -100,6 +100,9 @@ grant { permission jdk.net.NetworkPermission "getOption.TCP_KEEPCOUNT"; permission jdk.net.NetworkPermission "setOption.TCP_KEEPCOUNT"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // Allow read access to all system properties permission java.util.PropertyPermission "*", "read"; diff --git a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json index 54e9d39902f03..58b6b2d3bc873 100644 --- a/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json +++ b/server/src/main/resources/org/opensearch/tasks/task-index-mapping.json @@ -1,7 +1,7 @@ { "_doc" : { "_meta": { - "version": 3 + "version": 4 }, "dynamic" : "strict", "properties" : { diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 72c7b5168fe15..a7ffde04314c3 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -32,10 +32,16 @@ package org.opensearch.action.admin.indices.close; import org.apache.lucene.util.SetOnce; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.mockito.ArgumentCaptor; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.FanoutReplicationProxy; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.action.support.replication.ReplicationResponse; @@ -65,11 +71,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.mockito.ArgumentCaptor; import java.util.Collections; import java.util.List; @@ -77,21 +78,21 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import static org.mockito.Mockito.doNothing; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; public class TransportVerifyShardBeforeCloseActionTests extends OpenSearchTestCase { @@ -290,7 +291,8 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { "test", primaryTerm, TimeValue.timeValueMillis(20), - TimeValue.timeValueSeconds(60) + TimeValue.timeValueSeconds(60), + new FanoutReplicationProxy<>() ); operation.execute(); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java index e4b79ac54f8fd..5705362cc73f4 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -38,8 +38,8 @@ import org.opensearch.action.support.ActiveShardCount; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; -import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.EmptyClusterInfoService; +import org.opensearch.cluster.OpenSearchAllocationTestCase; import org.opensearch.cluster.block.ClusterBlocks; import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.metadata.Metadata; @@ -52,7 +52,9 @@ import org.opensearch.cluster.routing.allocation.decider.AllocationDeciders; import org.opensearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeValue; import org.opensearch.index.shard.DocsStats; +import org.opensearch.index.store.StoreStats; import org.opensearch.snapshots.EmptySnapshotsInfoService; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.gateway.TestGatewayAllocator; @@ -107,6 +109,7 @@ public void testErrorCondition() { new ResizeRequest("target", "source"), state, (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ) @@ -121,6 +124,7 @@ public void testErrorCondition() { req, clusterState, (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null, + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -139,6 +143,7 @@ public void testErrorCondition() { req, clusterState, (i) -> new DocsStats(between(10, 1000), between(1, 10), between(1, 10000)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -167,6 +172,7 @@ public void testErrorCondition() { new ResizeRequest("target", "source"), clusterState, (i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), + new StoreStats(between(1, 10000), between(1, 10000)), "source", "target" ); @@ -193,13 +199,27 @@ public void testPassNumRoutingShards() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 2).build()); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); resizeRequest.getTargetIndexRequest() .settings( Settings.builder().put("index.number_of_routing_shards", randomIntBetween(2, 10)).put("index.number_of_shards", 2).build() ); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); } public void testPassNumRoutingShardsAndFail() { @@ -224,7 +244,14 @@ public void testPassNumRoutingShardsAndFail() { ResizeRequest resizeRequest = new ResizeRequest("target", "source"); resizeRequest.setResizeType(ResizeType.SPLIT); resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", numShards * 2).build()); - TransportResizeAction.prepareCreateIndexRequest(resizeRequest, clusterState, null, "source", "target"); + TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ); resizeRequest.getTargetIndexRequest() .settings( @@ -233,7 +260,14 @@ public void testPassNumRoutingShardsAndFail() { ClusterState finalState = clusterState; IllegalArgumentException iae = expectThrows( IllegalArgumentException.class, - () -> TransportResizeAction.prepareCreateIndexRequest(resizeRequest, finalState, null, "source", "target") + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + "source", + "target" + ) ); assertEquals("cannot provide index.number_of_routing_shards on resize", iae.getMessage()); } @@ -266,6 +300,7 @@ public void testShrinkIndexSettings() { target, clusterState, (i) -> stats, + new StoreStats(between(1, 10000), between(1, 10000)), indexName, "target" ); @@ -276,6 +311,206 @@ public void testShrinkIndexSettings() { assertEquals(request.waitForActiveShards(), activeShardCount); } + public void testShrinkWithMaxShardSize() { + String indexName = randomAlphaOfLength(10); + // create one that won't fail + ClusterState clusterState = ClusterState.builder( + createClusterState(indexName, 10, 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + + // Cannot set max_shard_size when split or clone + ResizeRequest resizeRequestForFailure = new ResizeRequest("target", indexName); + ResizeType resizeType = ResizeType.SPLIT; + if (randomBoolean()) { + resizeType = ResizeType.CLONE; + } + resizeRequestForFailure.setResizeType(resizeType); + resizeRequestForFailure.setMaxShardSize(new ByteSizeValue(100)); + resizeRequestForFailure.getTargetIndexRequest() + .settings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 100)).build()); + ClusterState finalState = clusterState; + IllegalArgumentException iae = expectThrows( + IllegalArgumentException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequestForFailure, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + indexName, + "target" + ) + ); + assertEquals("Unsupported parameter [max_shard_size]", iae.getMessage()); + + // Cannot set max_shard_size and index.number_of_shards at the same time + ResizeRequest resizeRequest = new ResizeRequest("target", indexName); + resizeRequest.setResizeType(ResizeType.SHRINK); + resizeRequest.setMaxShardSize(new ByteSizeValue(100)); + resizeRequest.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", randomIntBetween(1, 100)).build()); + iae = expectThrows( + IllegalArgumentException.class, + () -> TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + finalState, + null, + new StoreStats(between(1, 10000), between(1, 10000)), + indexName, + "target" + ) + ); + assertEquals("Cannot set max_shard_size and index.number_of_shards at the same time!", iae.getMessage()); + + AllocationService service = new AllocationService( + new AllocationDeciders(Collections.singleton(new MaxRetryAllocationDecider())), + new TestGatewayAllocator(), + new BalancedShardsAllocator(Settings.EMPTY), + EmptyClusterInfoService.INSTANCE, + EmptySnapshotsInfoService.INSTANCE + ); + RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + // now we start the shard + routingTable = OpenSearchAllocationTestCase.startInitializingShardsAndReroute(service, clusterState, indexName).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + int numSourceShards = clusterState.metadata().index(indexName).getNumberOfShards(); + DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000)); + + // target index's shards number must be the lowest factor of the source index's shards number + int expectedShardsNum = 5; + resizeRequest.setMaxShardSize(new ByteSizeValue(25)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + + // if max_shard_size is greater than whole of the source primary shards' storage, + // then the target index will only have one primary shard. + expectedShardsNum = 1; + resizeRequest.setMaxShardSize(new ByteSizeValue(1000)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + + // if max_shard_size is less than the primary shard's storage of the source index, + // then the target index's shards number will be equal to the source index's. + expectedShardsNum = numSourceShards; + resizeRequest.setMaxShardSize(new ByteSizeValue(1)); + // clear index settings + resizeRequest.getTargetIndexRequest().settings(Settings.builder().build()); + resizeRequest.setWaitForActiveShards(expectedShardsNum); + request = TransportResizeAction.prepareCreateIndexRequest( + resizeRequest, + clusterState, + (i) -> stats, + new StoreStats(100, between(1, 10000)), + indexName, + "target" + ); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); + assertEquals(String.valueOf(expectedShardsNum), request.settings().get("index.number_of_shards")); + assertEquals("shrink_index", request.cause()); + assertEquals(request.waitForActiveShards(), ActiveShardCount.from(expectedShardsNum)); + } + + public void testCalculateTargetIndexShardsNum() { + String indexName = randomAlphaOfLength(10); + ClusterState clusterState = ClusterState.builder( + createClusterState(indexName, randomIntBetween(2, 10), 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + IndexMetadata indexMetadata = clusterState.metadata().index(indexName); + + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(null, new StoreStats(100, between(1, 10000)), indexMetadata), 1); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(0), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 1 + ); + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(new ByteSizeValue(1), null, indexMetadata), 1); + assertEquals(TransportResizeAction.calculateTargetIndexShardsNum(new ByteSizeValue(1), new StoreStats(0, 0), indexMetadata), 1); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(1000), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 1 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(1), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + indexMetadata.getNumberOfShards() + ); + + clusterState = ClusterState.builder( + createClusterState(indexName, 10, 0, Settings.builder().put("index.blocks.write", true).build()) + ).nodes(DiscoveryNodes.builder().add(newNode("node1"))).build(); + indexMetadata = clusterState.metadata().index(indexName); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(10), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 10 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(12), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + indexMetadata.getNumberOfShards() + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(20), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 5 + ); + assertEquals( + TransportResizeAction.calculateTargetIndexShardsNum( + new ByteSizeValue(50), + new StoreStats(100, between(1, 10000)), + indexMetadata + ), + 2 + ); + } + private DiscoveryNode newNode(String nodeId) { final Set roles = Collections.unmodifiableSet( new HashSet<>(Arrays.asList(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE, DiscoveryNodeRole.DATA_ROLE)) diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index 2ebca16519258..acf46e2a63333 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -31,10 +31,13 @@ package org.opensearch.action.resync; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlocks; @@ -66,30 +69,29 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.MockNioTransport; -import org.junit.AfterClass; -import org.junit.BeforeClass; import java.nio.charset.Charset; import java.util.Collections; import java.util.HashSet; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; -import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; public class TransportResyncReplicationActionTests extends OpenSearchTestCase { @@ -156,23 +158,26 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { final AtomicInteger acquiredPermits = new AtomicInteger(); final IndexShard indexShard = mock(IndexShard.class); + final PendingReplicationActions replicationActions = new PendingReplicationActions(shardId, threadPool); when(indexShard.indexSettings()).thenReturn(new IndexSettings(indexMetadata, Settings.EMPTY)); when(indexShard.shardId()).thenReturn(shardId); when(indexShard.routingEntry()).thenReturn(primaryShardRouting); when(indexShard.getPendingPrimaryTerm()).thenReturn(primaryTerm); when(indexShard.getOperationPrimaryTerm()).thenReturn(primaryTerm); when(indexShard.getActiveOperationsCount()).then(i -> acquiredPermits.get()); + when(indexShard.getPendingReplicationActions()).thenReturn(replicationActions); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; acquiredPermits.incrementAndGet(); callback.onResponse(acquiredPermits::decrementAndGet); return null; }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), any(), eq(true)); + Set trackedAllocationIds = shardRoutingTable.getAllAllocationIds(); when(indexShard.getReplicationGroup()).thenReturn( new ReplicationGroup( shardRoutingTable, clusterService.state().metadata().index(index).inSyncAllocationIds(shardId.id()), - shardRoutingTable.getAllAllocationIds(), + trackedAllocationIds, 0 ) ); diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 8a4cdfc953bf8..3a689e356bbdf 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -45,6 +45,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -80,14 +81,18 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.opensearch.action.support.replication.ReplicationOperation.RetryOnPrimaryException; +import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; public class ReplicationOperationTests extends OpenSearchTestCase { @@ -157,7 +162,14 @@ public void testReplication() throws Exception { final TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + primaryTerm, + new FanoutReplicationProxy<>() + ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); @@ -179,6 +191,199 @@ public void testReplication() throws Exception { assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints)); } + public void testReplicationWithRemoteTranslogEnabled() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId)) + ); + activeIds.stream() + .filter(aId -> !aId.equals(primaryId)) + .forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId)) + ); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + assertEquals(0, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + assertEquals(1 + initializingIds.size(), shardInfo.getTotal()); + } + + public void testPrimaryToPrimaryReplicationWithRemoteTranslogEnabled() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId relocationTargetId = AllocationId.newTargetRelocation(primaryId); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + nodeIdFromAllocationId(relocationTargetId), + true, + ShardRoutingState.RELOCATING, + primaryId + ); + initializingIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId)) + ); + activeIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId)) + ); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + // Add primary and it's relocating target to activeIds + activeIds.add(primaryId); + activeIds.add(relocationTargetId); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + assertEquals(1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + assertEquals(2 + initializingIds.size(), shardInfo.getTotal()); + } + + public void testForceReplicationWithRemoteTranslogEnabled() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId)) + ); + activeIds.stream() + .filter(aId -> !aId.equals(primaryId)) + .forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId)) + ); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new FanoutReplicationProxy<>() + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + assertEquals(activeIds.size() - 1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + assertEquals(activeIds.size() + initializingIds.size(), shardInfo.getTotal()); + } + + static String nodeIdFromAllocationId(final AllocationId allocationId) { + return "n-" + allocationId.getId().substring(0, 8); + } + public void testRetryTransientReplicationFailure() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -242,7 +447,8 @@ public void testRetryTransientReplicationFailure() throws Exception { replicasProxy, primaryTerm, TimeValue.timeValueMillis(20), - TimeValue.timeValueSeconds(60) + TimeValue.timeValueSeconds(60), + new FanoutReplicationProxy<>() ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -379,7 +585,14 @@ public void failShard(String message, Exception exception) { assertTrue(primaryFailed.compareAndSet(false, true)); } }; - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + primaryTerm, + new FanoutReplicationProxy<>() + ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -389,7 +602,7 @@ public void failShard(String message, Exception exception) { } else { assertFalse(primaryFailed.get()); } - assertListenerThrows("should throw exception to trigger retry", listener, ReplicationOperation.RetryOnPrimaryException.class); + assertListenerThrows("should throw exception to trigger retry", listener, RetryOnPrimaryException.class); } public void testAddedReplicaAfterPrimaryOperation() throws Exception { @@ -438,7 +651,14 @@ public void perform(Request request, ActionListener listener) { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, new TestReplicaProxy(), primaryTerm); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + new TestReplicaProxy(), + primaryTerm, + new FanoutReplicationProxy<>() + ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -493,7 +713,8 @@ public void testWaitForActiveShards() throws Exception { logger, threadPool, "test", - primaryTerm + primaryTerm, + new FanoutReplicationProxy<>() ); if (passesActiveShardCheck) { @@ -554,7 +775,14 @@ public void updateLocalCheckpointForShard(String allocationId, long checkpoint) final PlainActionFuture listener = new PlainActionFuture<>(); final ReplicationOperation.Replicas replicas = new TestReplicaProxy(Collections.emptyMap()); - TestReplicationOperation operation = new TestReplicationOperation(request, primary, listener, replicas, primaryTerm); + TestReplicationOperation operation = new TestReplicationOperation( + request, + primary, + listener, + replicas, + primaryTerm, + new FanoutReplicationProxy<>() + ); operation.execute(); assertThat(primaryFailed.get(), equalTo(fatal)); @@ -841,7 +1069,8 @@ class TestReplicationOperation extends ReplicationOperation replicas, long primaryTerm, TimeValue initialRetryBackoffBound, - TimeValue retryTimeout + TimeValue retryTimeout, + ReplicationProxy replicationProxy ) { this( request, @@ -853,7 +1082,8 @@ class TestReplicationOperation extends ReplicationOperation primary, ActionListener listener, Replicas replicas, - long primaryTerm + long primaryTerm, + ReplicationProxy replicationProxy ) { - this(request, primary, listener, replicas, ReplicationOperationTests.this.logger, threadPool, "test", primaryTerm); + this( + request, + primary, + listener, + replicas, + ReplicationOperationTests.this.logger, + threadPool, + "test", + primaryTerm, + replicationProxy + ); } TestReplicationOperation( @@ -875,7 +1116,8 @@ class TestReplicationOperation extends ReplicationOperation replicationProxy ) { this( request, @@ -887,7 +1129,8 @@ class TestReplicationOperation extends ReplicationOperation replicationProxy ) { - super(request, primary, listener, replicas, logger, threadPool, opType, primaryTerm, initialRetryBackoffBound, retryTimeout); + super( + request, + primary, + listener, + replicas, + logger, + threadPool, + opType, + primaryTerm, + initialRetryBackoffBound, + retryTimeout, + replicationProxy + ); } } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 696958c340375..bde483e171d1e 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -33,6 +33,12 @@ package org.opensearch.action.support.replication; import org.apache.lucene.store.AlreadyClosedException; +import org.hamcrest.Matcher; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -99,12 +105,6 @@ import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; -import org.hamcrest.Matcher; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.util.Collections; @@ -121,11 +121,6 @@ import java.util.stream.Collectors; import static java.util.Collections.singleton; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -138,12 +133,17 @@ import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; public class TransportReplicationActionTests extends OpenSearchTestCase { @@ -950,7 +950,8 @@ public void testSeqNoIsSetOnPrimary() { Set inSyncIds = randomBoolean() ? singleton(routingEntry.allocationId().getId()) : clusterService.state().metadata().index(index).inSyncAllocationIds(0); - ReplicationGroup replicationGroup = new ReplicationGroup(shardRoutingTable, inSyncIds, shardRoutingTable.getAllAllocationIds(), 0); + Set trackedAllocationIds = shardRoutingTable.getAllAllocationIds(); + ReplicationGroup replicationGroup = new ReplicationGroup(shardRoutingTable, inSyncIds, trackedAllocationIds, 0); when(shard.getReplicationGroup()).thenReturn(replicationGroup); PendingReplicationActions replicationActions = new PendingReplicationActions(shardId, threadPool); replicationActions.accept(replicationGroup); diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index d20fed5c37361..c5e706e50c298 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -114,6 +114,15 @@ public void testValidateSettingsForDifferentVersion() { Settings newSettings = Settings.builder().put("cluster_manager.throttling.thresholds.put-mapping.value", newLimit).build(); assertThrows(IllegalArgumentException.class, () -> throttler.validateSetting(newSettings)); + + // validate for empty setting, it shouldn't throw exception + Settings emptySettings = Settings.builder().build(); + try { + throttler.validateSetting(emptySettings); + } catch (Exception e) { + // it shouldn't throw exception + throw new AssertionError(e); + } } public void testValidateSettingsForTaskWihtoutRetryOnDataNode() { diff --git a/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java b/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java new file mode 100644 index 0000000000000..4665a8e113ff2 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.bytes; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.function.Function; + +public class ByteBuffersBytesReferenceTests extends AbstractBytesReferenceTestCase { + @ParametersFactory + public static Collection allocator() { + return Arrays.asList( + new Object[] { (Function) ByteBuffer::allocateDirect }, + new Object[] { (Function) ByteBuffer::allocate } + ); + } + + private final Function allocator; + + public ByteBuffersBytesReferenceTests(Function allocator) { + this.allocator = allocator; + } + + @Override + protected BytesReference newBytesReference(int length) throws IOException { + return newBytesReference(length, randomInt(length)); + } + + @Override + protected BytesReference newBytesReferenceWithOffsetOfZero(int length) throws IOException { + return newBytesReference(length, 0); + } + + private BytesReference newBytesReference(int length, int offset) throws IOException { + // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content + final ByteBuffer buffer = allocator.apply(length + offset); + for (int i = 0; i < length + offset; i++) { + buffer.put((byte) random().nextInt(1 << 8)); + } + assertEquals(length + offset, buffer.limit()); + buffer.flip().position(offset); + + BytesReference ref = BytesReference.fromByteBuffer(buffer); + assertEquals(length, ref.length()); + assertTrue(ref instanceof BytesArray); + assertThat(ref.length(), Matchers.equalTo(length)); + return ref; + } + + public void testArray() throws IOException { + int[] sizes = { 0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5)) }; + + for (int i = 0; i < sizes.length; i++) { + BytesArray pbr = (BytesArray) newBytesReference(sizes[i]); + byte[] array = pbr.array(); + assertNotNull(array); + assertEquals(sizes[i], array.length - pbr.offset()); + assertSame(array, pbr.array()); + } + } + + public void testArrayOffset() throws IOException { + int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); + BytesArray pbr = (BytesArray) newBytesReferenceWithOffsetOfZero(length); + assertEquals(0, pbr.offset()); + } +} diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index a4f2b242564e2..b493771876b99 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -22,6 +22,7 @@ public class FeatureFlagTests extends OpenSearchTestCase { public static void enableFeature() { AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REMOTE_STORE, "true")); + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.EXTENSIONS, "true")); } public void testReplicationTypeFeatureFlag() { diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java new file mode 100644 index 0000000000000..cbd86378c0fac --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -0,0 +1,418 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.mock; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; + +import java.io.IOException; +import java.net.InetAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessControlException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.junit.After; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.ClusterSettingsResponse; +import org.opensearch.cluster.LocalNodeResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.util.FeatureFlagTests; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.index.engine.EngineConfigFactory; +import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.nio.MockNioTransport; + +public class ExtensionsManagerTests extends OpenSearchTestCase { + + private TransportService transportService; + private ClusterService clusterService; + private MockNioTransport transport; + private final ThreadPool threadPool = new TestThreadPool(ExtensionsManagerTests.class.getSimpleName()); + private final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + @Before + public void setup() throws Exception { + FeatureFlagTests.enableFeature(); + Settings settings = Settings.builder().put("cluster.name", "test").build(); + transport = new MockNioTransport( + settings, + Version.CURRENT, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService() + ); + transportService = new MockTransportService( + settings, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (boundAddress) -> new DiscoveryNode( + "test_node", + "test_node", + boundAddress.publishAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ), + null, + Collections.emptySet() + ); + clusterService = createClusterService(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + transportService.close(); + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + + public void testExtensionsDiscovery() throws Exception { + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList( + "extensions:", + " - name: firstExtension", + " uniqueId: uniqueid1", + " hostName: 'myIndependentPluginHost1'", + " hostAddress: '127.0.0.0'", + " port: '9300'", + " version: '0.0.7'", + " description: Fake description 1", + " opensearchVersion: '3.0.0'", + " javaVersion: '14'", + " className: fakeClass1", + " customFolderName: fakeFolder1", + " hasNativeController: false", + " - name: secondExtension", + " uniqueId: 'uniqueid2'", + " hostName: 'myIndependentPluginHost2'", + " hostAddress: '127.0.0.1'", + " port: '9301'", + " version: '3.14.16'", + " description: Fake description 2", + " opensearchVersion: '2.0.0'", + " javaVersion: '17'", + " className: fakeClass2", + " customFolderName: fakeFolder2", + " hasNativeController: true" + ); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + List expectedUninitializedExtensions = new ArrayList(); + + expectedUninitializedExtensions.add( + new DiscoveryExtensionNode( + "firstExtension", + "uniqueid1", + "uniqueid1", + "myIndependentPluginHost1", + "127.0.0.0", + new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), + new HashMap(), + Version.fromString("3.0.0"), + new PluginInfo( + "firstExtension", + "Fake description 1", + "0.0.7", + Version.fromString("3.0.0"), + "14", + "fakeClass1", + new ArrayList(), + false + ) + ) + ); + + expectedUninitializedExtensions.add( + new DiscoveryExtensionNode( + "secondExtension", + "uniqueid2", + "uniqueid2", + "myIndependentPluginHost2", + "127.0.0.1", + new TransportAddress(TransportAddress.META_ADDRESS, 9301), + new HashMap(), + Version.fromString("2.0.0"), + new PluginInfo( + "secondExtension", + "Fake description 2", + "3.14.16", + Version.fromString("2.0.0"), + "17", + "fakeClass2", + new ArrayList(), + true + ) + ) + ); + assertEquals(expectedUninitializedExtensions, extensionsManager.getUninitializedExtensions()); + } + + public void testNonAccessibleDirectory() throws Exception { + AccessControlException e = expectThrows( + + AccessControlException.class, + () -> new ExtensionsManager(settings, PathUtils.get("")) + ); + assertEquals("access denied (\"java.io.FilePermission\" \"\" \"read\")", e.getMessage()); + } + + public void testNoExtensionsFile() throws Exception { + Path extensionDir = createTempDir(); + + Settings settings = Settings.builder().build(); + + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "No Extensions File Present", + "org.opensearch.extensions.ExtensionsManager", + Level.INFO, + "Extensions.yml file is not present. No extensions will be loaded." + ) + ); + + new ExtensionsManager(settings, extensionDir); + + mockLogAppender.assertAllExpectationsMatched(); + } + } + + public void testEmptyExtensionsFile() throws Exception { + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList(); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + Settings settings = Settings.builder().build(); + + expectThrows(IOException.class, () -> new ExtensionsManager(settings, extensionDir)); + } + + public void testInitialize() throws Exception { + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList( + "extensions:", + " - name: firstExtension", + " uniqueId: uniqueid1", + " hostName: 'myIndependentPluginHost1'", + " hostAddress: '127.0.0.0'", + " port: '9300'", + " version: '0.0.7'", + " description: Fake description 1", + " opensearchVersion: '3.0.0'", + " javaVersion: '14'", + " className: fakeClass1", + " customFolderName: fakeFolder1", + " hasNativeController: false", + " - name: secondExtension", + " uniqueId: 'uniqueid2'", + " hostName: 'myIndependentPluginHost2'", + " hostAddress: '127.0.0.1'", + " port: '9301'", + " version: '3.14.16'", + " description: Fake description 2", + " opensearchVersion: '2.0.0'", + " javaVersion: '17'", + " className: fakeClass2", + " customFolderName: fakeFolder2", + " hasNativeController: true" + ); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + transportService.start(); + transportService.acceptIncomingRequests(); + extensionsManager.setTransportService(transportService); + + expectThrows(ConnectTransportException.class, () -> extensionsManager.initialize()); + + } + + public void testHandleExtensionRequest() throws Exception { + + Path extensionDir = createTempDir(); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + extensionsManager.setTransportService(transportService); + extensionsManager.setClusterService(clusterService); + ExtensionRequest clusterStateRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_CLUSTER_STATE); + assertEquals(extensionsManager.handleExtensionRequest(clusterStateRequest).getClass(), ClusterStateResponse.class); + + ExtensionRequest clusterSettingRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_CLUSTER_SETTINGS); + assertEquals(extensionsManager.handleExtensionRequest(clusterSettingRequest).getClass(), ClusterSettingsResponse.class); + + ExtensionRequest localNodeRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_LOCAL_NODE); + assertEquals(extensionsManager.handleExtensionRequest(localNodeRequest).getClass(), LocalNodeResponse.class); + + ExtensionRequest exceptionRequest = new ExtensionRequest(ExtensionsManager.RequestType.GET_SETTINGS); + Exception exception = expectThrows(IllegalStateException.class, () -> extensionsManager.handleExtensionRequest(exceptionRequest)); + assertEquals(exception.getMessage(), "Handler not present for the provided request: " + exceptionRequest.getRequestType()); + } + + public void testRegisterHandler() throws Exception { + Path extensionDir = createTempDir(); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet() + ) + ); + + extensionsManager.setTransportService(mockTransportService); + verify(mockTransportService, times(3)).registerRequestHandler(anyString(), anyString(), anyBoolean(), anyBoolean(), any(), any()); + + } + + public void testOnIndexModule() throws Exception { + + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList( + "extensions:", + " - name: firstExtension", + " uniqueId: uniqueid1", + " hostName: 'myIndependentPluginHost1'", + " hostAddress: '127.0.0.0'", + " port: '9300'", + " version: '0.0.7'", + " description: Fake description 1", + " opensearchVersion: '3.0.0'", + " javaVersion: '14'", + " className: fakeClass1", + " customFolderName: fakeFolder1", + " hasNativeController: false", + " - name: secondExtension", + " uniqueId: 'uniqueid2'", + " hostName: 'myIndependentPluginHost2'", + " hostAddress: '127.0.0.1'", + " port: '9301'", + " version: '3.14.16'", + " description: Fake description 2", + " opensearchVersion: '2.0.0'", + " javaVersion: '17'", + " className: fakeClass2", + " customFolderName: fakeFolder2", + " hasNativeController: true" + ); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + transportService.start(); + transportService.acceptIncomingRequests(); + extensionsManager.setTransportService(transportService); + + Environment environment = TestEnvironment.newEnvironment(settings); + AnalysisRegistry emptyAnalysisRegistry = new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ); + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + IndexModule indexModule = new IndexModule( + indexSettings, + emptyAnalysisRegistry, + new InternalEngineFactory(), + new EngineConfigFactory(indexSettings), + Collections.emptyMap(), + () -> true, + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + Collections.emptyMap() + ); + + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "IndicesModuleRequest Failure", + "org.opensearch.extensions.ExtensionsManager", + Level.ERROR, + "IndicesModuleRequest failed" + ) + ); + + extensionsManager.onIndexModule(indexModule); + mockLogAppender.assertAllExpectationsMatched(); + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index faab2f405010a..8757458e3317e 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -269,7 +269,13 @@ public void testCoordinatingPrimaryThreadedUpdateToShardLimitsAndRejections() th nodeStats = shardIndexingPressure.stats(); IndexingPressurePerShardStats shardStoreStats = shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1); - assertNull(shardStoreStats); + // If rejection count equals NUM_THREADS that means rejections happened until the last request, then we'll get shardStoreStats which + // was updated on the last request. In other cases, the shardStoreStats simply moves to the cold store and null is returned. + if (rejectionCount.get() == NUM_THREADS) { + assertEquals(10, shardStoreStats.getCurrentPrimaryAndCoordinatingLimits()); + } else { + assertNull(shardStoreStats); + } shardStats = shardIndexingPressure.coldStats(); if (randomBoolean) { assertEquals(rejectionCount.get(), nodeStats.getCoordinatingRejections()); @@ -331,7 +337,13 @@ public void testReplicaThreadedUpdateToShardLimitsAndRejections() throws Excepti assertEquals(0, nodeStats.getCurrentReplicaBytes()); IndexingPressurePerShardStats shardStoreStats = shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1); - assertNull(shardStoreStats); + // If rejection count equals NUM_THREADS that means rejections happened until the last request, then we'll get shardStoreStats which + // was updated on the last request. In other cases, the shardStoreStats simply moves to the cold store and null is returned. + if (rejectionCount.get() == NUM_THREADS) { + assertEquals(15, shardStoreStats.getCurrentReplicaLimits()); + } else { + assertNull(shardStoreStats); + } shardStats = shardIndexingPressure.coldStats(); assertEquals(rejectionCount.get(), shardStats.getIndexingPressureShardStats(shardId1).getReplicaNodeLimitsBreachedRejections()); diff --git a/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java index 48b309ea4eca3..8f4f70e96e2b4 100644 --- a/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java @@ -130,7 +130,7 @@ public void testNumeric() throws Exception { QueryShardContext context = createShardContext(); QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); assertEquals( - "Can only use prefix queries on keyword, text and wildcard fields - not on [mapped_int] which is of type [integer]", + "Can only use prefix queries on keyword and text fields - not on [mapped_int] which is of type [integer]", e.getMessage() ); } diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 393d4cb3f2121..611223e067cff 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -873,7 +873,7 @@ public void testPrefixNumeric() throws Exception { QueryShardContext context = createShardContext(); QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); assertEquals( - "Can only use prefix queries on keyword, text and wildcard fields - not on [mapped_int] which is of type [integer]", + "Can only use prefix queries on keyword and text fields - not on [mapped_int] which is of type [integer]", e.getMessage() ); query.lenient(true); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java index 34a2d1189d234..17a6bfc8fbd82 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java @@ -43,6 +43,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import java.util.Collections; import java.util.Set; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -55,12 +56,13 @@ public abstract class ReplicationTrackerTestCase extends OpenSearchTestCase { ReplicationTracker newTracker( final AllocationId allocationId, final LongConsumer updatedGlobalCheckpoint, - final LongSupplier currentTimeMillisSupplier + final LongSupplier currentTimeMillisSupplier, + final Settings settings ) { return new ReplicationTracker( new ShardId("test", "_na_", 0), allocationId.getId(), - IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + IndexSettingsModule.newIndexSettings("test", settings), randomNonNegativeLong(), UNASSIGNED_SEQ_NO, updatedGlobalCheckpoint, @@ -70,6 +72,14 @@ ReplicationTracker newTracker( ); } + ReplicationTracker newTracker( + final AllocationId allocationId, + final LongConsumer updatedGlobalCheckpoint, + final LongSupplier currentTimeMillisSupplier + ) { + return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, Settings.EMPTY); + } + static final Supplier OPS_BASED_RECOVERY_ALWAYS_REASONABLE = () -> SafeCommitInfo.EMPTY; static String nodeIdFromAllocationId(final AllocationId allocationId) { @@ -77,6 +87,14 @@ static String nodeIdFromAllocationId(final AllocationId allocationId) { } static IndexShardRoutingTable routingTable(final Set initializingIds, final AllocationId primaryId) { + return routingTable(initializingIds, Collections.singleton(primaryId), primaryId); + } + + static IndexShardRoutingTable routingTable( + final Set initializingIds, + final Set activeIds, + final AllocationId primaryId + ) { final ShardId shardId = new ShardId("test", "_na_", 0); final ShardRouting primaryShard = TestShardRouting.newShardRouting( shardId, @@ -86,11 +104,17 @@ static IndexShardRoutingTable routingTable(final Set initializingI ShardRoutingState.STARTED, primaryId ); - return routingTable(initializingIds, primaryShard); + return routingTable(initializingIds, activeIds, primaryShard); } - static IndexShardRoutingTable routingTable(final Set initializingIds, final ShardRouting primaryShard) { + static IndexShardRoutingTable routingTable( + final Set initializingIds, + final Set activeIds, + final ShardRouting primaryShard + ) { + assert initializingIds != null && activeIds != null; assert !initializingIds.contains(primaryShard.allocationId()); + assert activeIds.contains(primaryShard.allocationId()); final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); for (final AllocationId initializingId : initializingIds) { @@ -105,6 +129,21 @@ static IndexShardRoutingTable routingTable(final Set initializingI ) ); } + for (final AllocationId activeId : activeIds) { + if (activeId.equals(primaryShard.allocationId())) { + continue; + } + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(activeId), + null, + false, + ShardRoutingState.STARTED, + activeId + ) + ); + } builder.addShard(primaryShard); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 66c484cd40cce..8ea64e71fb9dc 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -437,6 +437,10 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { private AtomicLong updatedGlobalCheckpoint = new AtomicLong(UNASSIGNED_SEQ_NO); + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings) { + return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings); + } + private ReplicationTracker newTracker(final AllocationId allocationId) { return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L); } @@ -966,7 +970,11 @@ private static FakeClusterState initialState() { relocatingId ); - return new FakeClusterState(initialClusterStateVersion, activeAllocationIds, routingTable(initializingAllocationIds, primaryShard)); + return new FakeClusterState( + initialClusterStateVersion, + activeAllocationIds, + routingTable(initializingAllocationIds, Collections.singleton(primaryShard.allocationId()), primaryShard) + ); } private static void randomLocalCheckpointUpdate(ReplicationTracker gcp) { @@ -1007,6 +1015,7 @@ private static FakeClusterState randomUpdateClusterState(Set allocationI remainingInSyncIds.isEmpty() ? clusterState.inSyncIds : remainingInSyncIds, routingTable( Sets.difference(Sets.union(initializingIdsExceptRelocationTargets, initializingIdsToAdd), initializingIdsToRemove), + Collections.singleton(clusterState.routingTable.primaryShard().allocationId()), clusterState.routingTable.primaryShard() ) ); @@ -1046,9 +1055,20 @@ private static void markAsTrackingAndInSyncQuietly( final ReplicationTracker tracker, final String allocationId, final long localCheckpoint + ) { + markAsTrackingAndInSyncQuietly(tracker, allocationId, localCheckpoint, true); + } + + private static void markAsTrackingAndInSyncQuietly( + final ReplicationTracker tracker, + final String allocationId, + final long localCheckpoint, + final boolean addPRRL ) { try { - addPeerRecoveryRetentionLease(tracker, allocationId); + if (addPRRL) { + addPeerRecoveryRetentionLease(tracker, allocationId); + } tracker.initiateTracking(allocationId); tracker.markAllocationIdAsInSync(allocationId, localCheckpoint); } catch (final InterruptedException e) { @@ -1252,4 +1272,695 @@ public void testPeerRecoveryRetentionLeaseCreationAndRenewal() { ); } + /** + * This test checks that the global checkpoint update mechanism is honored and relies only on the shards that have + * translog stored locally. + */ + public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { + final long initialClusterStateVersion = randomNonNegativeLong(); + Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(1, 5); + Set active = new HashSet<>(activeWithCheckpoints.keySet()); + Map allocations = new HashMap<>(activeWithCheckpoints); + Map initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5); + Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); + allocations.putAll(initializingWithCheckpoints); + assertThat(allocations.size(), equalTo(active.size() + initializing.size())); + + final AllocationId primaryId = active.iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + + long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); + + logger.info("--> using allocations"); + allocations.keySet().forEach(aId -> { + final String type; + if (active.contains(aId)) { + type = "active"; + } else if (initializing.contains(aId)) { + type = "init"; + } else { + throw new IllegalStateException(aId + " not found in any map"); + } + logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); + }); + + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1)); + initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED, false)); + assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1 + initializing.size())); + Set replicationTargets = tracker.getReplicationGroup() + .getReplicationTargets() + .stream() + .map(ShardRouting::allocationId) + .collect(Collectors.toSet()); + assertTrue(replicationTargets.containsAll(initializing)); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + + // increment checkpoints + active.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + initializing.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + final long minLocalCheckpointAfterUpdates = allocations.values().stream().min(Long::compareTo).orElse(UNASSIGNED_SEQ_NO); + + // now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested. + final AllocationId extraId = AllocationId.newInitializing(); + + // first check that adding it without the cluster-manager blessing doesn't change anything. + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + assertNull(tracker.checkpoints.get(extraId.getId())); + expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); + + Set newInitializing = new HashSet<>(initializing); + newInitializing.add(extraId); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); + + tracker.initiateTracking(extraId.getId()); + + // now notify for the new id + if (randomBoolean()) { + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), randomInt((int) minLocalCheckpointAfterUpdates), false); + } else { + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4), false); + } + } + + public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { + final long initialClusterStateVersion = randomNonNegativeLong(); + Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(2, 5); + Set active = new HashSet<>(activeWithCheckpoints.keySet()); + Map allocations = new HashMap<>(activeWithCheckpoints); + Map initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5); + Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); + allocations.putAll(initializingWithCheckpoints); + assertThat(allocations.size(), equalTo(active.size() + initializing.size())); + + final AllocationId primaryId = active.iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + + long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); + + logger.info("--> using allocations"); + allocations.keySet().forEach(aId -> { + final String type; + if (active.contains(aId)) { + type = "active"; + } else if (initializing.contains(aId)) { + type = "init"; + } else { + throw new IllegalStateException(aId + " not found in any map"); + } + logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); + }); + + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, active, primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertEquals(tracker.getReplicationGroup().getReplicationTargets().size(), active.size()); + initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED, false)); + assertEquals(tracker.getReplicationGroup().getReplicationTargets().size(), active.size() + initializing.size()); + Set replicationTargets = tracker.getReplicationGroup() + .getReplicationTargets() + .stream() + .map(ShardRouting::allocationId) + .collect(Collectors.toSet()); + assertTrue(replicationTargets.containsAll(initializing)); + assertTrue(replicationTargets.containsAll(active)); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + + // increment checkpoints + active.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + initializing.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + final long minLocalCheckpointAfterUpdates = allocations.values().stream().min(Long::compareTo).orElse(UNASSIGNED_SEQ_NO); + + // now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested. + final AllocationId extraId = AllocationId.newInitializing(); + + // first check that adding it without the cluster-manager blessing doesn't change anything. + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + assertNull(tracker.checkpoints.get(extraId.getId())); + expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); + + Set newInitializing = new HashSet<>(initializing); + newInitializing.add(extraId); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); + + tracker.initiateTracking(extraId.getId()); + + // now notify for the new id + if (randomBoolean()) { + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), randomInt((int) minLocalCheckpointAfterUpdates), false); + } else { + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4), false); + } + } + + /** + * This test checks that updateGlobalCheckpointOnReplica with remote translog does not violate any of the invariants + */ + public void testUpdateGlobalCheckpointOnReplicaWithRemoteTranslogEnabled() { + final AllocationId active = AllocationId.newInitializing(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(active, settings); + final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE - 1); + tracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); + assertEquals(updatedGlobalCheckpoint.get(), globalCheckpoint); + final long nonUpdate = randomLongBetween(NO_OPS_PERFORMED, globalCheckpoint); + updatedGlobalCheckpoint.set(UNASSIGNED_SEQ_NO); + tracker.updateGlobalCheckpointOnReplica(nonUpdate, "test"); + assertEquals(updatedGlobalCheckpoint.get(), UNASSIGNED_SEQ_NO); + final long update = randomLongBetween(globalCheckpoint, Long.MAX_VALUE); + tracker.updateGlobalCheckpointOnReplica(update, "test"); + assertEquals(updatedGlobalCheckpoint.get(), update); + } + + public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Exception { + final long initialClusterStateVersion = randomNonNegativeLong(); + Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(1, 1); + Set active = new HashSet<>(activeWithCheckpoints.keySet()); + Map initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(1, 1); + Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); + final AllocationId primaryId = active.iterator().next(); + final AllocationId replicaId = initializing.iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); + tracker.activatePrimaryMode(localCheckpoint); + tracker.initiateTracking(replicaId.getId()); + tracker.markAllocationIdAsInSync(replicaId.getId(), randomLongBetween(NO_OPS_PERFORMED, localCheckpoint - 1)); + assertFalse(tracker.pendingInSync()); + final long updatedLocalCheckpoint = randomLongBetween(1 + localCheckpoint, Long.MAX_VALUE); + updatedGlobalCheckpoint.set(UNASSIGNED_SEQ_NO); + tracker.updateLocalCheckpoint(primaryId.getId(), updatedLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), updatedLocalCheckpoint); + tracker.updateLocalCheckpoint(replicaId.getId(), localCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), updatedLocalCheckpoint); + tracker.markAllocationIdAsInSync(replicaId.getId(), updatedLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), updatedLocalCheckpoint); + } + + public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() { + final Map active = randomAllocationsWithLocalCheckpoints(2, 5); + final Map initializing = randomAllocationsWithLocalCheckpoints(0, 5); + final Map assigned = new HashMap<>(); + assigned.putAll(active); + assigned.putAll(initializing); + AllocationId primaryId = active.keySet().iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + List initializingRandomSubset = randomSubsetOf(initializing.keySet()); + initializingRandomSubset.forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + final AllocationId missingActiveID = randomFrom(active.keySet()); + assigned.entrySet() + .stream() + .filter(e -> !e.getKey().equals(missingActiveID)) + .forEach(e -> updateLocalCheckpoint(tracker, e.getKey().getId(), e.getValue())); + long primaryLocalCheckpoint = active.get(primaryId); + + assertEquals(1 + initializingRandomSubset.size(), tracker.getReplicationGroup().getReplicationTargets().size()); + if (missingActiveID.equals(primaryId) == false) { + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), primaryLocalCheckpoint); + } + // now update all knowledge of all shards + assigned.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), 10 + localCP)); + assertEquals(tracker.getGlobalCheckpoint(), 10 + primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), 10 + primaryLocalCheckpoint); + } + + public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() { + final Map active = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializing = randomAllocationsWithLocalCheckpoints(2, 5); + logger.info("active: {}, initializing: {}", active, initializing); + + AllocationId primaryId = active.keySet().iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach( + aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED) + ); + long primaryLocalCheckpoint = active.get(primaryId); + + active.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP)); + + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), primaryLocalCheckpoint); + + // update again + initializing.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP)); + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), primaryLocalCheckpoint); + } + + public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManagerWithRemoteTranslogEnabled() { + final Map active = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); + final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); + final AllocationId primaryId = active.keySet().iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + initializing.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + nonApproved.keySet() + .forEach( + k -> expectThrows(IllegalStateException.class, () -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)) + ); + + List> allocations = Arrays.asList(active, initializing, nonApproved); + Collections.shuffle(allocations, random()); + allocations.forEach(a -> a.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP))); + + assertNotEquals(UNASSIGNED_SEQ_NO, tracker.getGlobalCheckpoint()); + } + + public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTranslogEnabled() { + final long initialClusterStateVersion = randomNonNegativeLong(); + final Map activeToStay = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5); + final Map activeToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializingToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5); + final Set active = Sets.union(activeToStay.keySet(), activeToBeRemoved.keySet()); + final Set initializing = Sets.union(initializingToStay.keySet(), initializingToBeRemoved.keySet()); + final Map allocations = new HashMap<>(); + final AllocationId primaryId = active.iterator().next(); + if (activeToBeRemoved.containsKey(primaryId)) { + activeToStay.put(primaryId, activeToBeRemoved.remove(primaryId)); + } + allocations.putAll(activeToStay); + if (randomBoolean()) { + allocations.putAll(activeToBeRemoved); + } + allocations.putAll(initializingToStay); + if (randomBoolean()) { + allocations.putAll(initializingToBeRemoved); + } + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + if (randomBoolean()) { + initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + } else { + initializing.forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + } + if (randomBoolean()) { + allocations.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP)); + } + + // now remove shards + if (randomBoolean()) { + tracker.updateFromClusterManager( + initialClusterStateVersion + 1, + ids(activeToStay.keySet()), + routingTable(initializingToStay.keySet(), primaryId) + ); + allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); + } else { + allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); + tracker.updateFromClusterManager( + initialClusterStateVersion + 2, + ids(activeToStay.keySet()), + routingTable(initializingToStay.keySet(), primaryId) + ); + } + + final long checkpoint = activeToStay.get(primaryId) + 10; + assertEquals(tracker.getGlobalCheckpoint(), checkpoint); + } + + public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() throws Exception { + final long initialClusterStateVersion = randomNonNegativeLong(); + final int numberOfActiveAllocationsIds = randomIntBetween(2, 16); + final int numberOfInitializingIds = randomIntBetween(2, 16); + final Tuple, Set> activeAndInitializingAllocationIds = randomActiveAndInitializingAllocationIds( + numberOfActiveAllocationsIds, + numberOfInitializingIds + ); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingIds = activeAndInitializingAllocationIds.v2(); + AllocationId primaryId = activeAllocationIds.iterator().next(); + IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + + // first we assert that the in-sync and tracking sets are set up correctly + assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + activeAllocationIds.stream() + .filter(a -> a.equals(primaryId) == false) + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + assertTrue(initializingIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + initializingIds.stream() + .filter(a -> a.equals(primaryId) == false) + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + + // now we will remove some allocation IDs from these and ensure that they propagate through + final Set removingActiveAllocationIds = new HashSet<>(randomSubsetOf(activeAllocationIds)); + removingActiveAllocationIds.remove(primaryId); + final Set newActiveAllocationIds = activeAllocationIds.stream() + .filter(a -> !removingActiveAllocationIds.contains(a)) + .collect(Collectors.toSet()); + final List removingInitializingAllocationIds = randomSubsetOf(initializingIds); + final Set newInitializingAllocationIds = initializingIds.stream() + .filter(a -> !removingInitializingAllocationIds.contains(a)) + .collect(Collectors.toSet()); + routingTable = routingTable(newInitializingAllocationIds, primaryId); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable); + assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue(removingActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); + assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue(removingInitializingAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); + assertThat( + tracker.getReplicationGroup().getInSyncAllocationIds(), + equalTo(ids(Sets.difference(Sets.union(activeAllocationIds, newActiveAllocationIds), removingActiveAllocationIds))) + ); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + + /* + * Now we will add an allocation ID to each of active and initializing and ensure they propagate through. Using different lengths + * than we have been using above ensures that we can not collide with a previous allocation ID + */ + newInitializingAllocationIds.add(AllocationId.newInitializing()); + tracker.updateFromClusterManager( + initialClusterStateVersion + 2, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId) + ); + assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + newActiveAllocationIds.stream() + .filter(a -> a.equals(primaryId) == false) + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + newInitializingAllocationIds.stream() + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + + // the tracking allocation IDs should play no role in determining the global checkpoint + final Map activeLocalCheckpoints = newActiveAllocationIds.stream() + .collect(Collectors.toMap(Function.identity(), a -> randomIntBetween(1, 1024))); + activeLocalCheckpoints.forEach((a, l) -> updateLocalCheckpoint(tracker, a.getId(), l)); + final Map initializingLocalCheckpoints = newInitializingAllocationIds.stream() + .collect(Collectors.toMap(Function.identity(), a -> randomIntBetween(1, 1024))); + initializingLocalCheckpoints.forEach((a, l) -> updateLocalCheckpoint(tracker, a.getId(), l)); + assertTrue( + activeLocalCheckpoints.entrySet() + .stream() + .allMatch(e -> tracker.getTrackedLocalCheckpointForShard(e.getKey().getId()).getLocalCheckpoint() == e.getValue()) + ); + assertTrue( + initializingLocalCheckpoints.entrySet() + .stream() + .allMatch(e -> tracker.getTrackedLocalCheckpointForShard(e.getKey().getId()).getLocalCheckpoint() == e.getValue()) + ); + final long primaryLocalCheckpoint = activeLocalCheckpoints.get(primaryId); + assertThat(tracker.getGlobalCheckpoint(), equalTo(primaryLocalCheckpoint)); + assertThat(updatedGlobalCheckpoint.get(), equalTo(primaryLocalCheckpoint)); + final long minimumInitailizingLocalCheckpoint = (long) initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get(); + + // now we are going to add a new allocation ID and bring it in sync which should move it to the in-sync allocation IDs + final long localCheckpoint = randomIntBetween( + 0, + Math.toIntExact(Math.min(primaryLocalCheckpoint, minimumInitailizingLocalCheckpoint) - 1) + ); + + // using a different length than we have been using above ensures that we can not collide with a previous allocation ID + final AllocationId newSyncingAllocationId = AllocationId.newInitializing(); + newInitializingAllocationIds.add(newSyncingAllocationId); + tracker.updateFromClusterManager( + initialClusterStateVersion + 3, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId) + ); + addPeerRecoveryRetentionLease(tracker, newSyncingAllocationId); + final CyclicBarrier barrier = new CyclicBarrier(2); + final Thread thread = new Thread(() -> { + try { + barrier.await(); + tracker.initiateTracking(newSyncingAllocationId.getId()); + tracker.markAllocationIdAsInSync(newSyncingAllocationId.getId(), localCheckpoint); + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + }); + + thread.start(); + + barrier.await(); + + assertBusy(() -> { + assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); + assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); + }); + + tracker.updateLocalCheckpoint(newSyncingAllocationId.getId(), randomIntBetween(Math.toIntExact(primaryLocalCheckpoint), 1024)); + + barrier.await(); + + assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); + assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); + + /* + * The new in-sync allocation ID is in the in-sync set now yet the cluster-manager does not know this; the allocation ID should still be in + * the in-sync set even if we receive a cluster state update that does not reflect this. + * + */ + tracker.updateFromClusterManager( + initialClusterStateVersion + 4, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId) + ); + assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); + assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); + } + + public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOException { + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + final ShardId shardId = new ShardId("test", "_na_", 0); + + FakeClusterState clusterState = initialState(); + final AllocationId aId = clusterState.routingTable.primaryShard().allocationId(); + final LongConsumer onUpdate = updatedGlobalCheckpoint -> {}; + final long primaryTerm = randomNonNegativeLong(); + final long globalCheckpoint = UNASSIGNED_SEQ_NO; + final BiConsumer> onNewRetentionLease = (leases, listener) -> {}; + ReplicationTracker oldPrimary = new ReplicationTracker( + shardId, + aId.getId(), + indexSettings, + primaryTerm, + globalCheckpoint, + onUpdate, + () -> 0L, + onNewRetentionLease, + OPS_BASED_RECOVERY_ALWAYS_REASONABLE + ); + ReplicationTracker newPrimary = new ReplicationTracker( + shardId, + aId.getRelocationId(), + indexSettings, + primaryTerm, + globalCheckpoint, + onUpdate, + () -> 0L, + onNewRetentionLease, + OPS_BASED_RECOVERY_ALWAYS_REASONABLE + ); + + Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); + + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + + oldPrimary.activatePrimaryMode(randomIntBetween(Math.toIntExact(NO_OPS_PERFORMED), 10)); + addPeerRecoveryRetentionLease(oldPrimary, newPrimary.shardAllocationId); + newPrimary.updateRetentionLeasesOnReplica(oldPrimary.getRetentionLeases()); + + final int numUpdates = randomInt(10); + for (int i = 0; i < numUpdates; i++) { + if (rarely()) { + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + } + if (randomBoolean()) { + randomLocalCheckpointUpdate(oldPrimary); + } + if (randomBoolean()) { + randomMarkInSync(oldPrimary, newPrimary); + } + } + + // simulate transferring the global checkpoint to the new primary after finalizing recovery before the handoff + markAsTrackingAndInSyncQuietly( + oldPrimary, + newPrimary.shardAllocationId, + Math.max(SequenceNumbers.NO_OPS_PERFORMED, oldPrimary.getGlobalCheckpoint() + randomInt(5)) + ); + oldPrimary.updateGlobalCheckpointForShard(newPrimary.shardAllocationId, oldPrimary.getGlobalCheckpoint()); + ReplicationTracker.PrimaryContext primaryContext = oldPrimary.startRelocationHandoff(newPrimary.shardAllocationId); + + if (randomBoolean()) { + // cluster state update after primary context handoff + if (randomBoolean()) { + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + } + + // abort handoff, check that we can continue updates and retry handoff + oldPrimary.abortRelocationHandoff(); + + if (rarely()) { + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + } + if (randomBoolean()) { + randomLocalCheckpointUpdate(oldPrimary); + } + if (randomBoolean()) { + randomMarkInSync(oldPrimary, newPrimary); + } + + // do another handoff + primaryContext = oldPrimary.startRelocationHandoff(newPrimary.shardAllocationId); + } + + // send primary context through the wire + BytesStreamOutput output = new BytesStreamOutput(); + primaryContext.writeTo(output); + StreamInput streamInput = output.bytes().streamInput(); + primaryContext = new ReplicationTracker.PrimaryContext(streamInput); + switch (randomInt(3)) { + case 0: { + // apply cluster state update on old primary while primary context is being transferred + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + // activate new primary + newPrimary.activateWithPrimaryContext(primaryContext); + // apply cluster state update on new primary so that the states on old and new primary are comparable + clusterState.apply(newPrimary); + break; + } + case 1: { + // apply cluster state update on new primary while primary context is being transferred + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(newPrimary); + // activate new primary + newPrimary.activateWithPrimaryContext(primaryContext); + // apply cluster state update on old primary so that the states on old and new primary are comparable + clusterState.apply(oldPrimary); + break; + } + case 2: { + // apply cluster state update on both copies while primary context is being transferred + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + newPrimary.activateWithPrimaryContext(primaryContext); + break; + } + case 3: { + // no cluster state update + newPrimary.activateWithPrimaryContext(primaryContext); + break; + } + } + + assertTrue(oldPrimary.primaryMode); + assertTrue(newPrimary.primaryMode); + assertThat(newPrimary.appliedClusterStateVersion, equalTo(oldPrimary.appliedClusterStateVersion)); + /* + * We can not assert on shared knowledge of the global checkpoint between the old primary and the new primary as the new primary + * will update its global checkpoint state without the old primary learning of it, and the old primary could have updated its + * global checkpoint state after the primary context was transferred. + */ + Map oldPrimaryCheckpointsCopy = new HashMap<>(oldPrimary.checkpoints); + oldPrimaryCheckpointsCopy.remove(oldPrimary.shardAllocationId); + oldPrimaryCheckpointsCopy.remove(newPrimary.shardAllocationId); + Map newPrimaryCheckpointsCopy = new HashMap<>(newPrimary.checkpoints); + newPrimaryCheckpointsCopy.remove(oldPrimary.shardAllocationId); + newPrimaryCheckpointsCopy.remove(newPrimary.shardAllocationId); + assertThat(newPrimaryCheckpointsCopy, equalTo(oldPrimaryCheckpointsCopy)); + // we can however assert that shared knowledge of the local checkpoint and in-sync status is equal + assertThat( + oldPrimary.checkpoints.get(oldPrimary.shardAllocationId).localCheckpoint, + equalTo(newPrimary.checkpoints.get(oldPrimary.shardAllocationId).localCheckpoint) + ); + assertThat( + oldPrimary.checkpoints.get(newPrimary.shardAllocationId).localCheckpoint, + equalTo(newPrimary.checkpoints.get(newPrimary.shardAllocationId).localCheckpoint) + ); + assertThat( + oldPrimary.checkpoints.get(oldPrimary.shardAllocationId).inSync, + equalTo(newPrimary.checkpoints.get(oldPrimary.shardAllocationId).inSync) + ); + assertThat( + oldPrimary.checkpoints.get(newPrimary.shardAllocationId).inSync, + equalTo(newPrimary.checkpoints.get(newPrimary.shardAllocationId).inSync) + ); + assertThat(newPrimary.getGlobalCheckpoint(), equalTo(oldPrimary.getGlobalCheckpoint())); + assertThat(newPrimary.routingTable, equalTo(oldPrimary.routingTable)); + assertThat(newPrimary.replicationGroup, equalTo(oldPrimary.replicationGroup)); + + assertFalse(oldPrimary.relocated); + oldPrimary.completeRelocationHandoff(); + assertFalse(oldPrimary.primaryMode); + assertTrue(oldPrimary.relocated); + } + + public void testIllegalStateExceptionIfUnknownAllocationIdWithRemoteTranslogEnabled() { + final AllocationId active = AllocationId.newInitializing(); + final AllocationId initializing = AllocationId.newInitializing(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(active, settings); + tracker.updateFromClusterManager( + randomNonNegativeLong(), + Collections.singleton(active.getId()), + routingTable(Collections.singleton(initializing), active) + ); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + + expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(randomAlphaOfLength(10))); + expectThrows(IllegalStateException.class, () -> tracker.markAllocationIdAsInSync(randomAlphaOfLength(10), randomNonNegativeLong())); + } + } diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index 6f3387a935c03..9104ab1a6882b 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -53,7 +53,6 @@ public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { @Before public void init() { - assumeFalse("Awaiting Windows fix https://github.com/opensearch-project/OpenSearch/issues/5396", Constants.WINDOWS); transferManager = mock(TransferManager.class); lockFactory = SimpleFSLockFactory.INSTANCE; path = LuceneTestCase.createTempDir("OnDemandBlockSnapshotIndexInputTests"); @@ -69,6 +68,7 @@ public void clean() { } public void testVariousBlockSize() throws Exception { + assumeFalse("Awaiting Windows fix https://github.com/opensearch-project/OpenSearch/issues/5396", Constants.WINDOWS); int fileSize = 29360128; int blockSizeShift; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index ff4005d9bcedf..663c325db12c2 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -156,6 +156,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; @@ -191,6 +192,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -1795,40 +1797,79 @@ public void onFailure(final Exception e) { ); final BigArrays bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test"); final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - indicesService = new IndicesService( - settings, - mock(PluginsService.class), - nodeEnv, - namedXContentRegistry, - new AnalysisRegistry( - environment, - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + indicesService = new IndicesService( + settings, + mock(PluginsService.class), + mock(ExtensionsManager.class), + nodeEnv, + namedXContentRegistry, + new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ), + indexNameExpressionResolver, + mapperRegistry, + namedWriteableRegistry, + threadPool, + indexScopedSettings, + new NoneCircuitBreakerService(), + bigArrays, + scriptService, + clusterService, + client, + new MetaStateService(nodeEnv, namedXContentRegistry), + Collections.emptyList(), emptyMap(), + null, emptyMap(), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + ); + } else { + indicesService = new IndicesService( + settings, + mock(PluginsService.class), + nodeEnv, + namedXContentRegistry, + new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ), + indexNameExpressionResolver, + mapperRegistry, + namedWriteableRegistry, + threadPool, + indexScopedSettings, + new NoneCircuitBreakerService(), + bigArrays, + scriptService, + clusterService, + client, + new MetaStateService(nodeEnv, namedXContentRegistry), + Collections.emptyList(), emptyMap(), + null, emptyMap(), - emptyMap() - ), - indexNameExpressionResolver, - mapperRegistry, - namedWriteableRegistry, - threadPool, - indexScopedSettings, - new NoneCircuitBreakerService(), - bigArrays, - scriptService, - clusterService, - client, - new MetaStateService(nodeEnv, namedXContentRegistry), - Collections.emptyList(), - emptyMap(), - null, - emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) - ); + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + ); + } + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( settings, diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java index 8463d9268e760..c0af5d6e76c59 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java @@ -41,12 +41,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.MockNioTransport; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -223,6 +226,36 @@ public void testNodeConnectWithDifferentNodeId() { assertFalse(handleA.transportService.nodeConnected(discoveryNode)); } + public void testNodeConnectWithDifferentNodeIDSkipValidation() throws IllegalAccessException { + Settings settings = Settings.builder().put("cluster.name", "test").build(); + NetworkHandle handleA = startServices("TS_A", settings, Version.CURRENT); + NetworkHandle handleB = startServices("TS_B", settings, Version.CURRENT); + DiscoveryNode discoveryNode = new DiscoveryNode( + randomAlphaOfLength(10), + handleB.discoveryNode.getAddress(), + emptyMap(), + emptySet(), + handleB.discoveryNode.getVersion() + ); + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(TransportService.class))) { + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "Validation Skipped", + "org.opensearch.transport.TransportService", + Level.INFO, + "Connection validation was skipped" + ) + ); + + handleA.transportService.connectToExtensionNode(discoveryNode, TestProfiles.LIGHT_PROFILE); + + mockLogAppender.assertAllExpectationsMatched(); + + assertTrue(handleA.transportService.nodeConnected(discoveryNode)); + } + } + private static class NetworkHandle { private TransportService transportService; private DiscoveryNode discoveryNode; diff --git a/server/src/test/resources/config/extensions.yml b/server/src/test/resources/config/extensions.yml new file mode 100644 index 0000000000000..6264e9630ad60 --- /dev/null +++ b/server/src/test/resources/config/extensions.yml @@ -0,0 +1,13 @@ +extensions: + - name: firstExtension + uniqueId: uniqueid1 + hostName: 'myIndependentPluginHost1' + hostAddress: '127.0.0.0' + port: '9300' + version: '3.0.0' + - name: "secondExtension" + uniqueId: 'uniqueid2' + hostName: 'myIndependentPluginHost2' + hostAddress: '127.0.0.1' + port: '9301' + version: '2.0.0' diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index b3f062aef4fbe..92c80ac1799ef 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -52,6 +52,7 @@ import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.replication.FanoutReplicationProxy; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.support.replication.ReplicationOperation; @@ -727,7 +728,8 @@ public void execute() { opType, primaryTerm, TimeValue.timeValueMillis(20), - TimeValue.timeValueSeconds(60) + TimeValue.timeValueSeconds(60), + new FanoutReplicationProxy<>() ).execute(); } catch (Exception e) { listener.onFailure(e);