From 8dd63a51a9c3c4781c0316cc256e0a2dc2c6817f Mon Sep 17 00:00:00 2001 From: Craig Perkins Date: Thu, 15 Dec 2022 11:30:02 -0500 Subject: [PATCH] [Feature/Identity] Merge remote-tracking branch 'origin/main' into feature/identity (#5581) * Fix flaky ShardIndexingPressureConcurrentExecutionTests (#5439) Add conditional check on assertNull to fix flaky tests. Signed-off-by: Rishikesh1159 * Fix bwc for cluster manager throttling settings (#5305) Signed-off-by: Dhwanil Patel * Update ingest-attachment plugin dependencies: Apache Tika 3.6.0, Apache Mime4j 0.8.8, Apache Poi 5.2.3, Apache PdfBox 2.0.27 (#5448) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko * Enhance CheckpointState to support no-op replication (#5282) * CheckpointState enhanced to support no-op replication Signed-off-by: Ashish Singh Co-authored-by: Bukhtawar Khan * [BUG] org.opensearch.repositories.s3.RepositoryS3ClientYamlTestSuiteIT/test {yaml=repository_s3/20_repository_permanent_credentials/Snapshot and Restore with repository-s3 using permanent credentials} flaky: randomizing basePath (#5482) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko * [Bug] fix case sensitivity for wildcard queries (#5462) Fixes the wildcard query to not normalize the pattern when case_insensitive is set by the user. This is achieved by creating a new normalizedWildcardQuery method so that query_string queries (which do not support case sensitivity) can still normalize the pattern when the default analyzer is used; maintaining existing behavior. Signed-off-by: Nicholas Walter Knize * Support OpenSSL Provider with default Netty allocator (#5460) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko * Revert "build no-jdk distributions as part of release build (#4902)" (#5465) This reverts commit 8c9ca4e858e6333265080972cf57809dbc086208. It seems that this wasn't entirely the correct way and is currently blocking us from removing the `build.sh` from the `opensearch-build` repository (i.e. this `build.sh` here is not yet being used). See the discussion in opensearch-project/opensearch-build#2835 for further details. Signed-off-by: Ralph Ursprung Signed-off-by: Ralph Ursprung * Add max_shard_size parameter for Shrink API (fix supported version after backport) (#5503) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko * Sync CODEOWNERS with MAINTAINERS. (#5501) Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Daniel (dB.) Doubrovkine * Added jackson dependency to server (#5366) * Added jackson dependency to server Signed-off-by: Ryan Bogan * Updated CHANGELOG Signed-off-by: Ryan Bogan * Update build.gradle files Signed-off-by: Ryan Bogan * Add RuntimePermission to fix errors Signed-off-by: Ryan Bogan Signed-off-by: Ryan Bogan * Fix flaky test BulkIntegrationIT.testDeleteIndexWhileIndexing (#5491) Signed-off-by: Poojita Raj Signed-off-by: Poojita Raj * Add release notes for 2.4.1 (#5488) Signed-off-by: Xue Zhou Signed-off-by: Xue Zhou * Properly skip OnDemandBlockSnapshotIndexInputTests.testVariousBlockSize on Windows. (#5511) PR https://github.com/opensearch-project/OpenSearch/pull/5397 skipped this test in @Before block but still frequently throws a TestCouldNotBeSkippedException. This is caused by the after block still executing and throwing an exception while cleaning the directory created at the path in @Before. Moving the assumption to the individual test prevents this exception by ensuring the path exists. Signed-off-by: Marc Handalian Signed-off-by: Marc Handalian * Merge first batch of feature/extensions into main (#5347) * Merge first batch of feature/extensions into main Signed-off-by: Ryan Bogan * Fixed CHANGELOG Signed-off-by: Ryan Bogan * Fixed newline errors Signed-off-by: Ryan Bogan * Renaming and CHANGELOG fixes Signed-off-by: Ryan Bogan * Refactor extension loading into private method Signed-off-by: Ryan Bogan * Removed skipValidation and added connectToExtensionNode method Signed-off-by: Ryan Bogan * Remove unnecessary feature flag calls Signed-off-by: Ryan Bogan * Renaming and exception handling Signed-off-by: Ryan Bogan * Change latches to CompletableFuture Signed-off-by: Ryan Bogan * Removed unnecessary validateSettingKey call Signed-off-by: Ryan Bogan * Fix azure-core dependency Signed-off-by: Ryan Bogan * Update SHAs Signed-off-by: Ryan Bogan * Remove unintended dependency changes Signed-off-by: Ryan Bogan * Removed dynamic settings regitration, removed info() method, and added NoopExtensionsManager Signed-off-by: Ryan Bogan * Add javadoc Signed-off-by: Ryan Bogan * Fixed spotless failure Signed-off-by: Ryan Bogan * Removed NoopExtensionsManager Signed-off-by: Ryan Bogan * Added functioning NoopExtensionsManager Signed-off-by: Ryan Bogan * Added missing javadoc Signed-off-by: Ryan Bogan * Remove forbiddenAPI Signed-off-by: Ryan Bogan * Fix spotless Signed-off-by: Ryan Bogan * Change logger.info to logger.error in handleException Signed-off-by: Ryan Bogan * Fix ExtensionsManagerTests Signed-off-by: Ryan Bogan * Removing unrelated change Signed-off-by: Ryan Bogan * Update SHAs Signed-off-by: Ryan Bogan Signed-off-by: Ryan Bogan * Bump commons-compress from 1.21 to 1.22 (#5520) Bumps commons-compress from 1.21 to 1.22. --- updated-dependencies: - dependency-name: org.apache.commons:commons-compress dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * [Segment Replication] Trigger a round of replication for replica shards during peer recovery when segment replication is enabled (#5332) * Fix new added replica shards falling behind primary. Signed-off-by: Rishikesh1159 * Trigger a round of replication during peer recovery when segment replication is enabled. Signed-off-by: Rishikesh1159 * Remove unnecessary start replication overloaded method. Signed-off-by: Rishikesh1159 * Add test for failure case and refactor some code. Signed-off-by: Rishikesh1159 * Apply spotless check. Signed-off-by: Rishikesh1159 * Addressing comments on the PR. Signed-off-by: Rishikesh1159 * Remove unnecessary condition check. Signed-off-by: Rishikesh1159 * Apply spotless check. Signed-off-by: Rishikesh1159 * Add step listeners to resolve forcing round of segment replication. Signed-off-by: Rishikesh1159 Signed-off-by: Rishikesh1159 * Adding support to register settings dynamically (#5495) * Adding support to register settings dynamically Signed-off-by: Ryan Bogan * Update CHANGELOG Signed-off-by: Ryan Bogan * Removed unnecessary registerSetting methods Signed-off-by: Ryan Bogan * Change setting registration order Signed-off-by: Ryan Bogan * Add unregisterSettings method Signed-off-by: Ryan Bogan * Remove unnecessary feature flag Signed-off-by: Ryan Bogan Signed-off-by: Ryan Bogan * Updated 1.3.7 release notes date (#5536) Signed-off-by: owaiskazi19 Signed-off-by: owaiskazi19 * Pre conditions check before updating weighted routing metadata (#4955) * Pre conditions check to allow weight updates for non decommissioned attribute Signed-off-by: Rishab Nahata * Atomically update cluster state with decommission status and corresponding action (#5093) * Atomically update the cluster state with decommission status and its corresponding action in the same execute call Signed-off-by: Rishab Nahata * Update Netty to 4.1.86.Final (#5529) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko * Update release date in 2.4.1 release notes (#5549) Signed-off-by: Suraj Singh Signed-off-by: Suraj Singh * Update 2.4.1 release notes (#5552) Signed-off-by: Andriy Redko Signed-off-by: Andriy Redko * Refactor fuzziness interface on query builders (#5433) * Refactor Object to Fuzziness type for all query builders Signed-off-by: noCharger * Revise on bwc Signed-off-by: noCharger * Update change log Signed-off-by: noCharger Signed-off-by: noCharger Co-authored-by: Daniel (dB.) Doubrovkine * Upgrade lucene version (#5570) * Added bwc version 2.4.2 Signed-off-by: Daniel (dB.) Doubrovkine * Added 2.4.2. Signed-off-by: Daniel (dB.) Doubrovkine * Update Lucene snapshot to 9.5.0-snapshot-d5cef1c Signed-off-by: Suraj Singh * Update changelog entry Signed-off-by: Suraj Singh * Add 2.4.2 bwc version Signed-off-by: Suraj Singh * Internal changes post lucene upgrade Signed-off-by: Suraj Singh Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Suraj Singh Co-authored-by: opensearch-ci-bot Co-authored-by: Daniel (dB.) Doubrovkine * Add CI bundle pattern to distribution download (#5348) * Add CI bundle pattern for ivy repo Signed-off-by: Zelin Hao * Gradle update Signed-off-by: Zelin Hao * Extract path Signed-off-by: Zelin Hao * Change with customDistributionDownloadType Signed-off-by: Zelin Hao * Add default for exception handle Signed-off-by: Zelin Hao * Add documentations Signed-off-by: Zelin Hao Signed-off-by: Zelin Hao * Bump protobuf-java from 3.21.9 to 3.21.11 in /plugins/repository-hdfs (#5519) * Bump protobuf-java from 3.21.9 to 3.21.11 in /plugins/repository-hdfs Bumps [protobuf-java](https://github.com/protocolbuffers/protobuf) from 3.21.9 to 3.21.11. - [Release notes](https://github.com/protocolbuffers/protobuf/releases) - [Changelog](https://github.com/protocolbuffers/protobuf/blob/main/generate_changelog.py) - [Commits](https://github.com/protocolbuffers/protobuf/compare/v3.21.9...v3.21.11) --- updated-dependencies: - dependency-name: com.google.protobuf:protobuf-java dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] * Updated changelog Signed-off-by: Owais Kazi Signed-off-by: dependabot[bot] Signed-off-by: Owais Kazi Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] Co-authored-by: Owais Kazi Co-authored-by: Suraj Singh Signed-off-by: Rishikesh1159 Signed-off-by: Dhwanil Patel Signed-off-by: Andriy Redko Signed-off-by: Ashish Singh Signed-off-by: Nicholas Walter Knize Signed-off-by: Ralph Ursprung Signed-off-by: Daniel (dB.) Doubrovkine Signed-off-by: Ryan Bogan Signed-off-by: Poojita Raj Signed-off-by: Xue Zhou Signed-off-by: Marc Handalian Signed-off-by: dependabot[bot] Signed-off-by: owaiskazi19 Signed-off-by: Rishab Nahata Signed-off-by: Suraj Singh Signed-off-by: noCharger Signed-off-by: Zelin Hao Signed-off-by: Owais Kazi Co-authored-by: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Co-authored-by: Dhwanil Patel Co-authored-by: Andriy Redko Co-authored-by: Ashish Co-authored-by: Nick Knize Co-authored-by: Ralph Ursprung <39383228+rursprung@users.noreply.github.com> Co-authored-by: Daniel (dB.) Doubrovkine Co-authored-by: Ryan Bogan <10944539+ryanbogan@users.noreply.github.com> Co-authored-by: Poojita Raj Co-authored-by: Xue Zhou <85715413+xuezhou25@users.noreply.github.com> Co-authored-by: Marc Handalian Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Owais Kazi Co-authored-by: Rishab Nahata Co-authored-by: Suraj Singh Co-authored-by: Louis Chu Co-authored-by: opensearch-ci-bot Co-authored-by: Zelin Hao <87548827+zelinh@users.noreply.github.com> Co-authored-by: dependabot[bot] --- .ci/bwcVersions | 1 + .github/CODEOWNERS | 4 +- MAINTAINERS.md | 6 +- TESTING.md | 4 + buildSrc/build.gradle | 2 +- .../gradle/DistributionDownloadPlugin.java | 43 +- buildSrc/version.properties | 4 +- modules/ingest-geoip/build.gradle | 2 - ...xpressions-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...xpressions-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + modules/transport-netty4/build.gradle | 3 +- .../netty-buffer-4.1.84.Final.jar.sha1 | 1 - .../netty-buffer-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.86.Final.jar.sha1 | 1 + .../netty-common-4.1.84.Final.jar.sha1 | 1 - .../netty-common-4.1.86.Final.jar.sha1 | 1 + .../netty-handler-4.1.84.Final.jar.sha1 | 1 - .../netty-handler-4.1.86.Final.jar.sha1 | 1 + .../netty-resolver-4.1.84.Final.jar.sha1 | 1 - .../netty-resolver-4.1.86.Final.jar.sha1 | 1 + .../netty-transport-4.1.84.Final.jar.sha1 | 1 - .../netty-transport-4.1.86.Final.jar.sha1 | 1 + ...t-native-unix-common-4.1.84.Final.jar.sha1 | 1 - ...t-native-unix-common-4.1.86.Final.jar.sha1 | 1 + .../Netty4NioServerSocketChannel.java | 62 ++ .../opensearch/transport/NettyAllocator.java | 3 +- ...alysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...alysis-icu-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...s-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...s-kuromoji-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...lysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...lysis-nori-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...s-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...s-phonetic-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...is-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...is-smartcn-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...is-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...is-stempel-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...morfologik-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + plugins/discovery-ec2/build.gradle | 2 - plugins/ingest-attachment/build.gradle | 8 +- .../apache-mime4j-core-0.8.3.jar.sha1 | 1 - .../apache-mime4j-core-0.8.8.jar.sha1 | 1 + .../licenses/apache-mime4j-dom-0.8.3.jar.sha1 | 1 - .../licenses/apache-mime4j-dom-0.8.8.jar.sha1 | 1 + .../licenses/fontbox-2.0.25.jar.sha1 | 1 - .../licenses/fontbox-2.0.27.jar.sha1 | 1 + .../licenses/pdfbox-2.0.25.jar.sha1 | 1 - .../licenses/pdfbox-2.0.27.jar.sha1 | 1 + .../licenses/poi-5.2.2.jar.sha1 | 1 - .../licenses/poi-5.2.3.jar.sha1 | 1 + .../licenses/poi-ooxml-5.2.2.jar.sha1 | 1 - .../licenses/poi-ooxml-5.2.3.jar.sha1 | 1 + .../licenses/poi-ooxml-lite-5.2.2.jar.sha1 | 1 - .../licenses/poi-ooxml-lite-5.2.3.jar.sha1 | 1 + .../licenses/poi-scratchpad-5.2.2.jar.sha1 | 1 - .../licenses/poi-scratchpad-5.2.3.jar.sha1 | 1 + .../licenses/tika-core-2.5.0.jar.sha1 | 1 - .../licenses/tika-core-2.6.0.jar.sha1 | 1 + .../tika-langdetect-optimaize-2.5.0.jar.sha1 | 1 - .../tika-langdetect-optimaize-2.6.0.jar.sha1 | 1 + ...ka-parsers-standard-package-2.5.0.jar.sha1 | 1 - ...ka-parsers-standard-package-2.6.0.jar.sha1 | 1 + plugins/repository-azure/build.gradle | 2 - .../{jackson-LICENSE => jackson-LICENSE.txt} | 0 .../{jackson-NOTICE => jackson-NOTICE.txt} | 0 .../netty-codec-dns-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-dns-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-http2-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-http2-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-socks-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-socks-4.1.86.Final.jar.sha1 | 1 + .../netty-handler-proxy-4.1.84.Final.jar.sha1 | 1 - .../netty-handler-proxy-4.1.86.Final.jar.sha1 | 1 + .../netty-resolver-dns-4.1.84.Final.jar.sha1 | 1 - .../netty-resolver-dns-4.1.86.Final.jar.sha1 | 1 + ...t-native-unix-common-4.1.84.Final.jar.sha1 | 1 - ...t-native-unix-common-4.1.86.Final.jar.sha1 | 1 + plugins/repository-hdfs/build.gradle | 3 +- .../licenses/netty-all-4.1.84.Final.jar.sha1 | 1 - .../licenses/netty-all-4.1.86.Final.jar.sha1 | 1 + .../licenses/protobuf-java-3.21.11.jar.sha1 | 1 + .../licenses/protobuf-java-3.21.9.jar.sha1 | 1 - plugins/repository-s3/build.gradle | 7 +- plugins/transport-nio/build.gradle | 3 +- .../netty-buffer-4.1.84.Final.jar.sha1 | 1 - .../netty-buffer-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-4.1.86.Final.jar.sha1 | 1 + .../netty-codec-http-4.1.84.Final.jar.sha1 | 1 - .../netty-codec-http-4.1.86.Final.jar.sha1 | 1 + .../netty-common-4.1.84.Final.jar.sha1 | 1 - .../netty-common-4.1.86.Final.jar.sha1 | 1 + .../netty-handler-4.1.84.Final.jar.sha1 | 1 - .../netty-handler-4.1.86.Final.jar.sha1 | 1 + .../netty-resolver-4.1.84.Final.jar.sha1 | 1 - .../netty-resolver-4.1.86.Final.jar.sha1 | 1 + .../netty-transport-4.1.84.Final.jar.sha1 | 1 - .../netty-transport-4.1.86.Final.jar.sha1 | 1 + .../opensearch.release-notes-1.3.7.md | 2 +- .../opensearch.release-notes-2.4.1.md | 23 + scripts/build.sh | 2 +- server/build.gradle | 7 +- .../jackson-annotations-2.14.1.jar.sha1 | 1 + .../licenses/jackson-annotations-LICENSE.txt | 8 + .../licenses/jackson-annotations-NOTICE.txt | 20 + .../licenses/jackson-databind-2.14.1.jar.sha1 | 1 + server/licenses/jackson-databind-LICENSE.txt | 8 + server/licenses/jackson-databind-NOTICE.txt | 20 + ...sis-common-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...sis-common-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ard-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ard-codecs-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ucene-core-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...e-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...e-grouping-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ighlighter-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ighlighter-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ucene-join-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ene-memory-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ucene-misc-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ne-queries-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ne-queries-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ueryparser-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ueryparser-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ne-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ne-sandbox-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ial-extras-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...-spatial3d-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + ...ne-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 | 1 - ...ne-suggest-9.5.0-snapshot-d5cef1c.jar.sha1 | 1 + .../action/bulk/BulkIntegrationIT.java | 39 +- .../AwarenessAttributeDecommissionIT.java | 23 +- .../replication/SegmentReplicationIT.java | 86 ++- .../search/query/MultiMatchQueryIT.java | 3 +- .../search/query/QueryStringIT.java | 33 + .../search/query/SearchQueryIT.java | 50 +- .../org/opensearch/OpenSearchException.java | 8 + .../src/main/java/org/opensearch/Version.java | 11 +- ...nsportAddVotingConfigExclusionsAction.java | 25 +- ...portClearVotingConfigExclusionsAction.java | 12 +- .../VotingConfigExclusionsHelper.java | 81 ++ .../put/ClusterPutWeightedRoutingRequest.java | 12 +- .../cluster/state/ClusterStateResponse.java | 5 + .../admin/indices/shrink/ResizeRequest.java | 4 +- .../action/bulk/TransportShardBulkAction.java | 10 + .../replication/FanoutReplicationProxy.java | 25 + .../support/replication/ReplicationMode.java | 32 + .../ReplicationModeAwareProxy.java | 44 ++ .../replication/ReplicationOperation.java | 36 +- .../support/replication/ReplicationProxy.java | 51 ++ .../replication/ReplicationProxyFactory.java | 29 + .../replication/ReplicationProxyRequest.java | 116 +++ .../TransportReplicationAction.java | 19 +- .../org/opensearch/bootstrap/Security.java | 4 + .../cluster/ClusterSettingsResponse.java | 60 ++ .../opensearch/cluster/LocalNodeResponse.java | 60 ++ .../cluster/coordination/Coordinator.java | 2 +- .../coordination/JoinTaskExecutor.java | 2 +- .../decommission/DecommissionController.java | 94 +-- .../decommission/DecommissionHelper.java | 124 +++ .../decommission/DecommissionService.java | 410 ++++------ ...upportedWeightedRoutingStateException.java | 35 + .../routing/WeightedRoutingService.java | 93 ++- .../service/ClusterManagerTaskThrottler.java | 8 +- .../common/bytes/BytesReference.java | 9 +- .../org/opensearch/common/lucene/Lucene.java | 2 +- .../settings/AbstractScopedSettings.java | 21 +- .../common/settings/SettingsModule.java | 39 + .../org/opensearch/common/unit/Fuzziness.java | 10 + .../opensearch/discovery/PluginRequest.java | 76 ++ .../opensearch/discovery/PluginResponse.java | 88 +++ .../java/org/opensearch/env/Environment.java | 7 + .../extensions/DiscoveryExtensionNode.java | 70 ++ .../extensions/ExtensionRequest.java | 66 ++ .../extensions/ExtensionsManager.java | 440 +++++++++++ .../extensions/ExtensionsSettings.java | 202 +++++ .../extensions/NoopExtensionsManager.java | 21 + .../opensearch/extensions/package-info.java | 10 + .../index/AcknowledgedResponse.java | 42 + .../index/IndicesModuleRequest.java | 68 ++ .../index/IndicesModuleResponse.java | 89 +++ .../opensearch/index/codec/CodecService.java | 8 +- .../PerFieldMappingPostingFormatCodec.java | 4 +- .../RecoverySourcePruneMergePolicy.java | 11 +- .../index/engine/TranslogLeafReader.java | 17 + .../index/mapper/KeywordFieldMapper.java | 15 + .../index/mapper/MappedFieldType.java | 17 +- .../index/mapper/StringFieldType.java | 28 +- .../query/MatchBoolPrefixQueryBuilder.java | 30 +- .../index/query/MatchQueryBuilder.java | 11 +- .../index/query/MultiMatchQueryBuilder.java | 9 + .../index/query/QueryStringQueryBuilder.java | 3 +- .../opensearch/index/search/MatchQuery.java | 8 +- .../index/search/MultiMatchQuery.java | 4 +- .../index/search/QueryStringQueryParser.java | 3 +- .../index/seqno/ReplicationTracker.java | 134 +++- .../opensearch/indices/IndicesService.java | 120 +++ .../cluster/IndicesClusterStateService.java | 93 ++- .../checkpoint/PublishCheckpointAction.java | 10 + .../main/java/org/opensearch/node/Node.java | 92 ++- .../opensearch/plugins/PluginsService.java | 1 + .../opensearch/search/fetch/FetchPhase.java | 2 +- .../search/lookup/SourceLookup.java | 2 +- .../transport/TransportService.java | 35 + .../org/opensearch/bootstrap/security.policy | 3 + .../ExceptionSerializationTests.java | 2 + .../VotingConfigExclusionsHelperTests.java | 123 +++ ...ClusterPutWeightedRoutingRequestTests.java | 9 - ...portVerifyShardBeforeCloseActionTests.java | 22 +- ...TransportResyncReplicationActionTests.java | 21 +- .../ReplicationOperationTests.java | 290 ++++++- .../TransportReplicationActionTests.java | 27 +- .../DecommissionControllerTests.java | 42 - .../decommission/DecommissionHelperTests.java | 142 ++++ .../DecommissionServiceTests.java | 83 +- .../routing/WeightedRoutingServiceTests.java | 71 +- .../ClusterManagerTaskThrottlerTests.java | 9 + .../bytes/ByteBuffersBytesReferenceTests.java | 79 ++ .../common/settings/SettingsModuleTests.java | 44 ++ .../common/unit/FuzzinessTests.java | 12 + .../common/util/FeatureFlagTests.java | 2 + .../extensions/ExtensionsManagerTests.java | 418 ++++++++++ ...exingPressureConcurrentExecutionTests.java | 16 +- .../opensearch/index/codec/CodecTests.java | 12 +- .../engine/CompletionStatsCacheTests.java | 4 +- .../MatchBoolPrefixQueryBuilderTests.java | 5 + .../index/query/MatchQueryBuilderTests.java | 5 + .../query/MultiMatchQueryBuilderTests.java | 5 + .../index/query/PrefixQueryBuilderTests.java | 2 +- .../query/QueryStringQueryBuilderTests.java | 12 +- .../seqno/ReplicationTrackerTestCase.java | 47 +- .../index/seqno/ReplicationTrackerTests.java | 715 +++++++++++++++++- .../OnDemandBlockSnapshotIndexInputTests.java | 2 +- .../snapshots/SnapshotResiliencyTests.java | 101 ++- .../TransportServiceHandshakeTests.java | 33 + .../src/test/resources/config/extensions.yml | 13 + ...enSearchIndexLevelReplicationTestCase.java | 4 +- 250 files changed, 5413 insertions(+), 890 deletions(-) delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-buffer-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-common-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-handler-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-resolver-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-4.1.86.Final.jar.sha1 delete mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 create mode 100644 modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 create mode 100644 modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 delete mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 create mode 100644 plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 rename plugins/repository-azure/licenses/{jackson-LICENSE => jackson-LICENSE.txt} (100%) rename plugins/repository-azure/licenses/{jackson-NOTICE => jackson-NOTICE.txt} (100%) delete mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-dns-4.1.86.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-codec-socks-4.1.86.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-handler-proxy-4.1.86.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-resolver-dns-4.1.86.Final.jar.sha1 delete mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/netty-all-4.1.86.Final.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.11.jar.sha1 delete mode 100644 plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-buffer-4.1.86.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-4.1.86.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-codec-http-4.1.86.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-common-4.1.86.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-handler-4.1.86.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-resolver-4.1.86.Final.jar.sha1 delete mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 create mode 100644 plugins/transport-nio/licenses/netty-transport-4.1.86.Final.jar.sha1 create mode 100644 release-notes/opensearch.release-notes-2.4.1.md create mode 100644 server/licenses/jackson-annotations-2.14.1.jar.sha1 create mode 100644 server/licenses/jackson-annotations-LICENSE.txt create mode 100644 server/licenses/jackson-annotations-NOTICE.txt create mode 100644 server/licenses/jackson-databind-2.14.1.jar.sha1 create mode 100644 server/licenses/jackson-databind-LICENSE.txt create mode 100644 server/licenses/jackson-databind-NOTICE.txt delete mode 100644 server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-core-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-join-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.5.0-snapshot-d5cef1c.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.5.0-snapshot-d5cef1c.jar.sha1 create mode 100644 server/src/main/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelper.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java create mode 100644 server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java create mode 100644 server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java create mode 100644 server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java create mode 100644 server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java create mode 100644 server/src/main/java/org/opensearch/cluster/routing/UnsupportedWeightedRoutingStateException.java create mode 100644 server/src/main/java/org/opensearch/discovery/PluginRequest.java create mode 100644 server/src/main/java/org/opensearch/discovery/PluginResponse.java create mode 100644 server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java create mode 100644 server/src/main/java/org/opensearch/extensions/ExtensionRequest.java create mode 100644 server/src/main/java/org/opensearch/extensions/ExtensionsManager.java create mode 100644 server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java create mode 100644 server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java create mode 100644 server/src/main/java/org/opensearch/extensions/package-info.java create mode 100644 server/src/main/java/org/opensearch/index/AcknowledgedResponse.java create mode 100644 server/src/main/java/org/opensearch/index/IndicesModuleRequest.java create mode 100644 server/src/main/java/org/opensearch/index/IndicesModuleResponse.java create mode 100644 server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java create mode 100644 server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java create mode 100644 server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java create mode 100644 server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java create mode 100644 server/src/test/resources/config/extensions.yml diff --git a/.ci/bwcVersions b/.ci/bwcVersions index 65fd9e7281ad1..ebc6db6939bb0 100644 --- a/.ci/bwcVersions +++ b/.ci/bwcVersions @@ -11,4 +11,5 @@ BWC_VERSION: - "2.3.1" - "2.4.0" - "2.4.1" + - "2.4.2" - "2.5.0" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8b63b291a8a54..3affbbd820774 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1 @@ -# This should match the owning team set up in https://github.com/orgs/opensearch-project/teams -* @opensearch-project/opensearch-core @reta - +* @reta @anasalkouz @andrross @reta @Bukhtawar @CEHENKLE @dblock @setiah @kartg @kotwanikunal @mch2 @nknize @owaiskazi19 @adnapibar @Rishikesh1159 @ryanbogan @saratvemulapalli @shwetathareja @dreamer-89 @tlfeng @VachaShah @xuezhou25 diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 2f54656b2ab59..789e250e10d19 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,6 @@ | Maintainer | GitHub ID | Affiliation | | --------------- | --------- | ----------- | -| Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | | Anas Alkouz | [anasalkouz](https://github.com/anasalkouz) | Amazon | | Andrew Ross | [andrross](https://github.com/andrross)| Amazon | | Andriy Redko | [reta](https://github.com/reta) | Aiven | @@ -22,8 +21,8 @@ | Rishikesh Pasham | [Rishikesh1159](https://github.com/Rishikesh1159) | Amazon| | Ryan Bogan | [ryanbogan](https://github.com/ryanbogan) | Amazon | | Sarat Vemulapalli | [saratvemulapalli](https://github.com/saratvemulapalli) | Amazon | -| Shweta Thareja |[shwetathareja](https://github.com/shwetathareja) | Amazon | -| Suraj Singh |[dreamer-89](https://github.com/dreamer-89) | Amazon | +| Shweta Thareja | [shwetathareja](https://github.com/shwetathareja) | Amazon | +| Suraj Singh | [dreamer-89](https://github.com/dreamer-89) | Amazon | | Tianli Feng | [tlfeng](https://github.com/tlfeng) | Amazon | | Vacha Shah | [VachaShah](https://github.com/VachaShah) | Amazon | | Xue Zhou | [xuezhou25](https://github.com/xuezhou25) | Amazon | @@ -32,6 +31,7 @@ | Maintainer | GitHub ID | Affiliation | | --------------- | --------- | ----------- | +| Abbas Hussain | [abbashus](https://github.com/abbashus) | Amazon | | Megha Sai Kavikondala | [meghasaik](https://github.com/meghasaik) | Amazon | [This document](https://github.com/opensearch-project/.github/blob/main/MAINTAINERS.md) explains what maintainers do in this repo, and how they should be doing it. If you're interested in contributing, see [CONTRIBUTING](CONTRIBUTING.md). diff --git a/TESTING.md b/TESTING.md index de88a2bfec6c7..93b4615da6f0b 100644 --- a/TESTING.md +++ b/TESTING.md @@ -383,6 +383,10 @@ Use -Dtest.class and -Dtests.method to run a specific bwcTest test. For example -Dtests.class=org.opensearch.upgrades.RecoveryIT \ -Dtests.method=testHistoryUUIDIsGenerated +Use `-PcustomDistributionDownloadType=bundle` to run the bwcTest against the test cluster with latest CI distribution bundle set up for the specified version; this property is default to min and exclusive choices between `bundle` and `min`: + + ./gradlew bwcTest -PcustomDistributionDownloadType=bundle + When running `./gradlew check`, minimal bwc checks are also run against compatible versions that are not yet released. ## BWC Testing against a specific remote/branch diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 6d055ee9805fc..5242630d0f20a 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -103,7 +103,7 @@ dependencies { api localGroovy() api 'commons-codec:commons-codec:1.15' - api 'org.apache.commons:commons-compress:1.21' + api 'org.apache.commons:commons-compress:1.22' api 'org.apache.ant:ant:1.10.12' api 'com.netflix.nebula:gradle-extra-configurations-plugin:8.0.0' api 'com.netflix.nebula:nebula-publishing-plugin:4.6.0' diff --git a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java index 87a565e6f4431..0cb6e6f044559 100644 --- a/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java +++ b/buildSrc/src/main/java/org/opensearch/gradle/DistributionDownloadPlugin.java @@ -73,6 +73,8 @@ public class DistributionDownloadPlugin implements Plugin { private static final String RELEASE_PATTERN_LAYOUT = "/core/opensearch/[revision]/[module]-min-[revision](-[classifier]).[ext]"; private static final String SNAPSHOT_PATTERN_LAYOUT = "/snapshots/core/opensearch/[revision]/[module]-min-[revision](-[classifier])-latest.[ext]"; + private static final String BUNDLE_PATTERN_LAYOUT = + "/ci/dbc/distribution-build-opensearch/[revision]/latest/linux/x64/tar/dist/opensearch/[module]-[revision](-[classifier]).[ext]"; private NamedDomainObjectContainer distributionsContainer; private NamedDomainObjectContainer distributionsResolutionStrategiesContainer; @@ -174,20 +176,39 @@ private static void setupDownloadServiceRepo(Project project) { return; } Object customDistributionUrl = project.findProperty("customDistributionUrl"); - // checks if custom Distribution Url has been passed by user from plugins + Object customDistributionDownloadType = project.findProperty("customDistributionDownloadType"); + // distributionDownloadType is default min if is not specified; download the distribution from CI if is bundle + String distributionDownloadType = customDistributionDownloadType != null + && customDistributionDownloadType.toString().equals("bundle") ? "bundle" : "min"; if (customDistributionUrl != null) { addIvyRepo(project, DOWNLOAD_REPO_NAME, customDistributionUrl.toString(), FAKE_IVY_GROUP, ""); addIvyRepo(project, SNAPSHOT_REPO_NAME, customDistributionUrl.toString(), FAKE_SNAPSHOT_IVY_GROUP, ""); - } else { - addIvyRepo( - project, - DOWNLOAD_REPO_NAME, - "https://artifacts.opensearch.org", - FAKE_IVY_GROUP, - "/releases" + RELEASE_PATTERN_LAYOUT, - "/release-candidates" + RELEASE_PATTERN_LAYOUT - ); - addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://artifacts.opensearch.org", FAKE_SNAPSHOT_IVY_GROUP, SNAPSHOT_PATTERN_LAYOUT); + return; + } + switch (distributionDownloadType) { + case "bundle": + addIvyRepo(project, DOWNLOAD_REPO_NAME, "https://ci.opensearch.org", FAKE_IVY_GROUP, BUNDLE_PATTERN_LAYOUT); + addIvyRepo(project, SNAPSHOT_REPO_NAME, "https://ci.opensearch.org", FAKE_SNAPSHOT_IVY_GROUP, BUNDLE_PATTERN_LAYOUT); + break; + case "min": + addIvyRepo( + project, + DOWNLOAD_REPO_NAME, + "https://artifacts.opensearch.org", + FAKE_IVY_GROUP, + "/releases" + RELEASE_PATTERN_LAYOUT, + "/release-candidates" + RELEASE_PATTERN_LAYOUT + ); + addIvyRepo( + project, + SNAPSHOT_REPO_NAME, + "https://artifacts.opensearch.org", + FAKE_SNAPSHOT_IVY_GROUP, + SNAPSHOT_PATTERN_LAYOUT + ); + break; + default: + throw new IllegalArgumentException("Unsupported property argument: " + distributionDownloadType); } } diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 51a191efca2fb..284c868e5e5d3 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.5.0-snapshot-a4ef70f +lucene = 9.5.0-snapshot-d5cef1c bundled_jdk_vendor = adoptium bundled_jdk = 19.0.1+10 @@ -24,7 +24,7 @@ kotlin = 1.7.10 # when updating the JNA version, also update the version in buildSrc/build.gradle jna = 5.12.1 -netty = 4.1.84.Final +netty = 4.1.86.Final joda = 2.10.13 # client dependencies diff --git a/modules/ingest-geoip/build.gradle b/modules/ingest-geoip/build.gradle index fb056192dcbec..4e4186c4888f6 100644 --- a/modules/ingest-geoip/build.gradle +++ b/modules/ingest-geoip/build.gradle @@ -41,8 +41,6 @@ opensearchplugin { dependencies { api('com.maxmind.geoip2:geoip2:3.0.2') // geoip2 dependencies: - api("com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}") - api("com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}") api('com.maxmind.db:maxmind-db:2.1.0') testImplementation 'org.elasticsearch:geolite2-databases:20191119' diff --git a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 0e1f3e37f508a..0000000000000 --- a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c92a0928724b04224157ce2d3e105953f57f94db \ No newline at end of file diff --git a/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-d5cef1c.jar.sha1 b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..16409bfd2404c --- /dev/null +++ b/modules/lang-expression/licenses/lucene-expressions-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +afe877ebf5ec4be7d17636b695015c449a523a3b \ No newline at end of file diff --git a/modules/transport-netty4/build.gradle b/modules/transport-netty4/build.gradle index 0aa92f0da0276..15e3eb8f2a292 100644 --- a/modules/transport-netty4/build.gradle +++ b/modules/transport-netty4/build.gradle @@ -125,7 +125,8 @@ thirdPartyAudit { 'com.aayushatharva.brotli4j.Brotli4jLoader', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', - 'com.aayushatharva.brotli4j.encoder.Encoders', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', // classes are missing diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 deleted file mode 100644 index 25a6f9ecf50b6..0000000000000 --- a/modules/transport-netty4/licenses/netty-buffer-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6b8cf57cfffc28d8e33f8175788a99401f576d9 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-buffer-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-buffer-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..c477a0d3b0ee9 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-buffer-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +a66fa0ed2687eb33a2e53a17a6df61bfe3b3f2bd \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 deleted file mode 100644 index 032a8f1ed954e..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f60f56c4cd17db114f01dc64aa465a2905240f5 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..b2bd305825d88 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +ee126da926ea202da3b21eb737788ef83b1db772 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 deleted file mode 100644 index 1e985edfce65e..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78628e78087d3da6c3d2b22aa67798d3f3adcd68 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..60affc4a1faed --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +23674593f004959ae002ec348626eecf677191ae \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 deleted file mode 100644 index 5fe8c5420cd74..0000000000000 --- a/modules/transport-netty4/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5a0178b9689493fd612cd40481034469f4bd14cc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..e0fb5c637d571 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +ac0ca067e4118533ad1038776fcd9d5f3058b7d4 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 deleted file mode 100644 index beaa2cce654c3..0000000000000 --- a/modules/transport-netty4/licenses/netty-common-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -90c84ec7f1108ae164810cf46694a5ec7ce738fc \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-common-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-common-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..48c07b3c9f5df --- /dev/null +++ b/modules/transport-netty4/licenses/netty-common-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +1dceab4662a9cc93faf87b237bb41103b1bc7f0e \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 deleted file mode 100644 index afd28b451ba12..0000000000000 --- a/modules/transport-netty4/licenses/netty-handler-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69cd93e2c321f04fc0a18eb5af519565373d0873 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-handler-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-handler-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..7c036b195f091 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-handler-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +0bcb65230218286e6456b5d085cb42e67776eb70 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 deleted file mode 100644 index 07aa37fc76524..0000000000000 --- a/modules/transport-netty4/licenses/netty-resolver-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6f808e331cf843d2a7ff62042cf9b5343e2ff25 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-resolver-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-resolver-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..f5258c46ebd6a --- /dev/null +++ b/modules/transport-netty4/licenses/netty-resolver-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +bad83d479f7bd8ea84eefd77c316435be4c97270 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 deleted file mode 100644 index 5e12ada3f5c10..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acd9947d0a951b1f7021c7adc393df936b1ecbf0 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..1fa4ab0281ca1 --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +7c7739c41fd110c3576e9faace332ee957f27203 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 deleted file mode 100644 index 6273c55f3acbd..0000000000000 --- a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e51601ddb88ee646a97ff04db38d45c22c29aee8 \ No newline at end of file diff --git a/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..3701a94dc9aec --- /dev/null +++ b/modules/transport-netty4/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +5e0e7fc1c337485cabcf7971faefe692b76f93a2 \ No newline at end of file diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java new file mode 100644 index 0000000000000..8a8b1da6ef5dd --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/Netty4NioServerSocketChannel.java @@ -0,0 +1,62 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.transport; + +import io.netty.channel.socket.InternetProtocolFamily; +import io.netty.channel.socket.nio.NioServerSocketChannel; +import io.netty.util.internal.SocketUtils; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.nio.channels.spi.SelectorProvider; +import java.util.List; + +public class Netty4NioServerSocketChannel extends NioServerSocketChannel { + private static final InternalLogger logger = InternalLoggerFactory.getInstance(Netty4NioServerSocketChannel.class); + + public Netty4NioServerSocketChannel() { + super(); + } + + public Netty4NioServerSocketChannel(SelectorProvider provider) { + super(provider); + } + + public Netty4NioServerSocketChannel(SelectorProvider provider, InternetProtocolFamily family) { + super(provider, family); + } + + public Netty4NioServerSocketChannel(ServerSocketChannel channel) { + super(channel); + } + + @Override + protected int doReadMessages(List buf) throws Exception { + SocketChannel ch = SocketUtils.accept(javaChannel()); + + try { + if (ch != null) { + buf.add(new Netty4NioSocketChannel(this, ch)); + return 1; + } + } catch (Throwable t) { + logger.warn("Failed to create a new channel from an accepted socket.", t); + + try { + ch.close(); + } catch (Throwable t2) { + logger.warn("Failed to close a socket.", t2); + } + } + + return 0; + } +} diff --git a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java index e25853d864813..f2f6538d305d9 100644 --- a/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java +++ b/modules/transport-netty4/src/main/java/org/opensearch/transport/NettyAllocator.java @@ -39,7 +39,6 @@ import io.netty.buffer.UnpooledByteBufAllocator; import io.netty.channel.Channel; import io.netty.channel.ServerChannel; -import io.netty.channel.socket.nio.NioServerSocketChannel; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.opensearch.common.Booleans; @@ -181,7 +180,7 @@ public static Class getServerChannelType() { if (ALLOCATOR instanceof NoDirectBuffers) { return CopyBytesServerSocketChannel.class; } else { - return NioServerSocketChannel.class; + return Netty4NioServerSocketChannel.class; } } diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index a49a0749a9e4a..0000000000000 --- a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a7c38619d8f2cc48f792e007aa25b430f4f25698 \ No newline at end of file diff --git a/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..0e7abb03dc38d --- /dev/null +++ b/plugins/analysis-icu/licenses/lucene-analysis-icu-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +670d8f48ea9cba542e263d3ec6c3e2a33accc561 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 709bcf84faf06..0000000000000 --- a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6243383e5fbcf87551ded4c1b48b69a4276bb748 \ No newline at end of file diff --git a/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..f9bf4c20a1119 --- /dev/null +++ b/plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +b1f42bad26470c8ef88096e0c8564a74223c52c9 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 0c4d7b7a2755c..0000000000000 --- a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -91d1560bc927f1a431bb92e47fda9395d3b3e551 \ No newline at end of file diff --git a/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..3d0efa0aa878b --- /dev/null +++ b/plugins/analysis-nori/licenses/lucene-analysis-nori-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +f3cf74fa91da5133667f8916f93071fed231f2ee \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 82524cbdb4ada..0000000000000 --- a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -26bbfd1a796d62006dff9c7e32d31a0397a8025e \ No newline at end of file diff --git a/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..10e6d36daebd1 --- /dev/null +++ b/plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +e633c2d0cd677e4f1cef5aadc6bdc65e8e898d98 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index af6b600d22090..0000000000000 --- a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a1a26c04e24d9a8573e6bd9a0bacad184821dd33 \ No newline at end of file diff --git a/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..ea35d7f71329c --- /dev/null +++ b/plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +c12a2943e6f4977f15d489ac3e9802c5dfb3c4cc \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index ea5680869c187..0000000000000 --- a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -19aa9eff0e0671fd91eb435a2e2fa29dec52cf5c \ No newline at end of file diff --git a/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..652c406f21009 --- /dev/null +++ b/plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +5d257928a34e586a7de9fc7d4a013868f7a1db74 \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 4f81941a1746e..0000000000000 --- a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -05ff979dfe3ded901ccd72d5a5d66349286c44bf \ No newline at end of file diff --git a/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-d5cef1c.jar.sha1 b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..2fd10c33dcd80 --- /dev/null +++ b/plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +3d88f80ad07421b9470cb44a6f5b67dd47047b13 \ No newline at end of file diff --git a/plugins/discovery-ec2/build.gradle b/plugins/discovery-ec2/build.gradle index 1766aa14ea9e9..8a7e48fc671ff 100644 --- a/plugins/discovery-ec2/build.gradle +++ b/plugins/discovery-ec2/build.gradle @@ -46,8 +46,6 @@ dependencies { api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" } restResources { diff --git a/plugins/ingest-attachment/build.gradle b/plugins/ingest-attachment/build.gradle index f42b44b56ccb8..0380b5f229838 100644 --- a/plugins/ingest-attachment/build.gradle +++ b/plugins/ingest-attachment/build.gradle @@ -38,10 +38,10 @@ opensearchplugin { } versions << [ - 'tika' : '2.5.0', - 'pdfbox': '2.0.25', - 'poi' : '5.2.2', - 'mime4j': '0.8.3' + 'tika' : '2.6.0', + 'pdfbox': '2.0.27', + 'poi' : '5.2.3', + 'mime4j': '0.8.8' ] dependencies { diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 deleted file mode 100644 index 464a34dd97643..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -1179b56c9919c1a8e20d3a528ee4c6cee19bcbe0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 new file mode 100644 index 0000000000000..77c36691d36b5 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-core-0.8.8.jar.sha1 @@ -0,0 +1 @@ +7330de23c52f71617cbec7f1d2760dae32e687cd \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 deleted file mode 100644 index 4f98753aa0af4..0000000000000 --- a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e80733714eb6a70895bfc74a9528c658504c2c83 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 new file mode 100644 index 0000000000000..fb9c5fed27162 --- /dev/null +++ b/plugins/ingest-attachment/licenses/apache-mime4j-dom-0.8.8.jar.sha1 @@ -0,0 +1 @@ +e76715563a6bd150f84ccb0adb920aec8faf4779 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 deleted file mode 100644 index 3191976e949f8..0000000000000 --- a/plugins/ingest-attachment/licenses/fontbox-2.0.25.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6644a1eb1d165eded719a88bf7bdcff91740b98 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 new file mode 100644 index 0000000000000..d578dffbfa3f6 --- /dev/null +++ b/plugins/ingest-attachment/licenses/fontbox-2.0.27.jar.sha1 @@ -0,0 +1 @@ +d08c064d18b2b149da937d15c0d1708cba03f29d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 deleted file mode 100644 index 165b3649e80bf..0000000000000 --- a/plugins/ingest-attachment/licenses/pdfbox-2.0.25.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c18cd03ff3a2dfc3c4a30d3a35173bd2690bcb92 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 new file mode 100644 index 0000000000000..4f670b7f95e8c --- /dev/null +++ b/plugins/ingest-attachment/licenses/pdfbox-2.0.27.jar.sha1 @@ -0,0 +1 @@ +416a9dfce3714116bfdf793b15368df04266845f \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 deleted file mode 100644 index d9f58e72c9200..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5513d31545085c33809c4b6553c2009fd19a6016 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..3d8b3daf606ad --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-5.2.3.jar.sha1 @@ -0,0 +1 @@ +2fb22ae74ad5aea6af1a9c64b9542f2ccf348604 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 deleted file mode 100644 index 7b3abffc1abd5..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a201b5bdc92c0fae4bed4b8e5546388c4c2f9eb0 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..8371593cf0841 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-5.2.3.jar.sha1 @@ -0,0 +1 @@ +02efd11c940adb18c03eb9ce7ad88fc40ee6a196 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 deleted file mode 100644 index f5137b1e5223e..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5df31b69375131fc2163a5557093cb112be90ce1 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..5c6365876b7be --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-ooxml-lite-5.2.3.jar.sha1 @@ -0,0 +1 @@ +db113c8e9051b0ff967f4911fa20336c8325a7c5 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 deleted file mode 100644 index 568dde5125c3f..0000000000000 --- a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8c5cd5f1b3e7b3656ab983b73bbbf8bf5f14f793 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 new file mode 100644 index 0000000000000..3c8f92498f1a4 --- /dev/null +++ b/plugins/ingest-attachment/licenses/poi-scratchpad-5.2.3.jar.sha1 @@ -0,0 +1 @@ +2a7fce47e22b7fedb1b277347ff4fe36d6eda50d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 deleted file mode 100644 index 419f01c631375..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-core-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -7f9f35e4827726b062ac2b0ad0fd361837a50ac9 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..c66c2f3f39401 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-core-2.6.0.jar.sha1 @@ -0,0 +1 @@ +f6ed6356dd4a9bd269d873f65494376685e6192e \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 deleted file mode 100644 index a9e47ff8a8a86..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -649574dca8f19d991ac25894c40284446dc5cf50 \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..e7bc59bb5ae49 --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-langdetect-optimaize-2.6.0.jar.sha1 @@ -0,0 +1 @@ +72b784a7bdab0ffde005fa64d15e3f077331d6fc \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 deleted file mode 100644 index d648183868034..0000000000000 --- a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.5.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2b9268511c34d8a1098f0565438cb8077fcf845d \ No newline at end of file diff --git a/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 new file mode 100644 index 0000000000000..83c0777fcbe8a --- /dev/null +++ b/plugins/ingest-attachment/licenses/tika-parsers-standard-package-2.6.0.jar.sha1 @@ -0,0 +1 @@ +00980e70b1df13c1236b750f0ca1462edd5d7417 \ No newline at end of file diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 9dbfd5d3fb822..d1f83806607bd 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -61,8 +61,6 @@ dependencies { api 'io.projectreactor.netty:reactor-netty-core:1.0.24' api 'io.projectreactor.netty:reactor-netty-http:1.0.24' api "org.slf4j:slf4j-api:${versions.slf4j}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api "com.fasterxml.jackson.datatype:jackson-datatype-jsr310:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-xml:${versions.jackson}" api "com.fasterxml.jackson.module:jackson-module-jaxb-annotations:${versions.jackson}" diff --git a/plugins/repository-azure/licenses/jackson-LICENSE b/plugins/repository-azure/licenses/jackson-LICENSE.txt similarity index 100% rename from plugins/repository-azure/licenses/jackson-LICENSE rename to plugins/repository-azure/licenses/jackson-LICENSE.txt diff --git a/plugins/repository-azure/licenses/jackson-NOTICE b/plugins/repository-azure/licenses/jackson-NOTICE.txt similarity index 100% rename from plugins/repository-azure/licenses/jackson-NOTICE rename to plugins/repository-azure/licenses/jackson-NOTICE.txt diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 deleted file mode 100644 index f27ecd081f65d..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-dns-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -48ce1da1bc12b830f6ffcdc5f0329639eb11e2fb \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-dns-4.1.86.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-dns-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..9a8ebe2fb1be3 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-dns-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +c8de479f36a8457541fcbb0016c851bde3e67693 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 deleted file mode 100644 index 5fe8c5420cd74..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-http2-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5a0178b9689493fd612cd40481034469f4bd14cc \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..e0fb5c637d571 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-http2-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +ac0ca067e4118533ad1038776fcd9d5f3058b7d4 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 deleted file mode 100644 index 1eef1b7841930..0000000000000 --- a/plugins/repository-azure/licenses/netty-codec-socks-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8cef741b42de5a1b21a8313fffcf2b518138c00b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-codec-socks-4.1.86.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-codec-socks-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..6544ba9942c96 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-codec-socks-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +854264e7ad75887bc25b82eb38e4ee65c8b44dc3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 deleted file mode 100644 index 0c3ed9425f8b7..0000000000000 --- a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8d9f2282f4da2486eed7797bc8622437eda7ce65 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-handler-proxy-4.1.86.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..5f8a3056159f5 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-handler-proxy-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +2515d76be9671cc248bab77352edddd16bfa9436 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 deleted file mode 100644 index 2835332c51158..0000000000000 --- a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3755d26967afca20b925c07d41e6ed3ec38c6822 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-resolver-dns-4.1.86.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..b73e612b2a8c6 --- /dev/null +++ b/plugins/repository-azure/licenses/netty-resolver-dns-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +a1e2ef79e4944b5d38092328c36c68e677a4b5f3 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 deleted file mode 100644 index 6273c55f3acbd..0000000000000 --- a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e51601ddb88ee646a97ff04db38d45c22c29aee8 \ No newline at end of file diff --git a/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..3701a94dc9aec --- /dev/null +++ b/plugins/repository-azure/licenses/netty-transport-native-unix-common-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +5e0e7fc1c337485cabcf7971faefe692b76f93a2 \ No newline at end of file diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index e5d65c9451c1f..73d59a16bb07a 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -66,10 +66,9 @@ dependencies { api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" api 'org.apache.avro:avro:1.11.1' - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.10' runtimeOnly 'com.google.guava:guava:31.1-jre' - api 'com.google.protobuf:protobuf-java:3.21.9' + api 'com.google.protobuf:protobuf-java:3.21.11' api "commons-logging:commons-logging:${versions.commonslogging}" api 'commons-cli:commons-cli:1.5.0' api "commons-codec:commons-codec:${versions.commonscodec}" diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 deleted file mode 100644 index 14003104a623f..0000000000000 --- a/plugins/repository-hdfs/licenses/netty-all-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f1a994d19e9971ba6f1b8abf4ebf912a21cec983 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/netty-all-4.1.86.Final.jar.sha1 b/plugins/repository-hdfs/licenses/netty-all-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..75cb32ca4b323 --- /dev/null +++ b/plugins/repository-hdfs/licenses/netty-all-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +a6395c3d2f8699e8dc4fd1e38171f82045f4af7b \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.11.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.11.jar.sha1 new file mode 100644 index 0000000000000..d0e50b4b7838c --- /dev/null +++ b/plugins/repository-hdfs/licenses/protobuf-java-3.21.11.jar.sha1 @@ -0,0 +1 @@ +c94f1937debcacbbeff48208bc2f7279088cbcdc \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 b/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 deleted file mode 100644 index 2e03dbe5dafd0..0000000000000 --- a/plugins/repository-hdfs/licenses/protobuf-java-3.21.9.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -ed1240d9231044ce6ccf1978512f6e44416bb7e7 \ No newline at end of file diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index e207b472ee665..591eb2502b1d8 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -54,9 +54,6 @@ dependencies { api "commons-logging:commons-logging:${versions.commonslogging}" api "org.apache.logging.log4j:log4j-1.2-api:${versions.log4j}" api "commons-codec:commons-codec:${versions.commonscodec}" - api "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" - api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" - api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" api "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor:${versions.jackson}" api "joda-time:joda-time:${versions.joda}" @@ -173,9 +170,9 @@ if (!s3EC2Bucket && !s3EC2BasePath && !s3ECSBucket && !s3ECSBasePath && !s3EKSBu processYamlRestTestResources { Map expansions = [ 'permanent_bucket': s3PermanentBucket, - 'permanent_base_path': s3PermanentBasePath + "_integration_tests", + 'permanent_base_path': s3PermanentBasePath + "_integration_tests_" + BuildParams.testSeed, 'temporary_bucket': s3TemporaryBucket, - 'temporary_base_path': s3TemporaryBasePath + "_integration_tests", + 'temporary_base_path': s3TemporaryBasePath + "_integration_tests_" + BuildParams.testSeed, 'ec2_bucket': s3EC2Bucket, 'ec2_base_path': s3EC2BasePath, 'ecs_bucket': s3ECSBucket, diff --git a/plugins/transport-nio/build.gradle b/plugins/transport-nio/build.gradle index 3ed1ecff68b03..0655b5465f5c8 100644 --- a/plugins/transport-nio/build.gradle +++ b/plugins/transport-nio/build.gradle @@ -61,7 +61,8 @@ thirdPartyAudit { 'com.aayushatharva.brotli4j.Brotli4jLoader', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Status', 'com.aayushatharva.brotli4j.decoder.DecoderJNI$Wrapper', - 'com.aayushatharva.brotli4j.encoder.Encoders', + 'com.aayushatharva.brotli4j.encoder.BrotliEncoderChannel', + 'com.aayushatharva.brotli4j.encoder.Encoder', 'com.aayushatharva.brotli4j.encoder.Encoder$Mode', 'com.aayushatharva.brotli4j.encoder.Encoder$Parameters', diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 deleted file mode 100644 index 25a6f9ecf50b6..0000000000000 --- a/plugins/transport-nio/licenses/netty-buffer-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a6b8cf57cfffc28d8e33f8175788a99401f576d9 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-buffer-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-buffer-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..c477a0d3b0ee9 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-buffer-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +a66fa0ed2687eb33a2e53a17a6df61bfe3b3f2bd \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 deleted file mode 100644 index 032a8f1ed954e..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4f60f56c4cd17db114f01dc64aa465a2905240f5 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..b2bd305825d88 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +ee126da926ea202da3b21eb737788ef83b1db772 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 deleted file mode 100644 index 1e985edfce65e..0000000000000 --- a/plugins/transport-nio/licenses/netty-codec-http-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -78628e78087d3da6c3d2b22aa67798d3f3adcd68 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-codec-http-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-codec-http-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..60affc4a1faed --- /dev/null +++ b/plugins/transport-nio/licenses/netty-codec-http-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +23674593f004959ae002ec348626eecf677191ae \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 deleted file mode 100644 index beaa2cce654c3..0000000000000 --- a/plugins/transport-nio/licenses/netty-common-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -90c84ec7f1108ae164810cf46694a5ec7ce738fc \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-common-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-common-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..48c07b3c9f5df --- /dev/null +++ b/plugins/transport-nio/licenses/netty-common-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +1dceab4662a9cc93faf87b237bb41103b1bc7f0e \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 deleted file mode 100644 index afd28b451ba12..0000000000000 --- a/plugins/transport-nio/licenses/netty-handler-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -69cd93e2c321f04fc0a18eb5af519565373d0873 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-handler-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-handler-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..7c036b195f091 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-handler-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +0bcb65230218286e6456b5d085cb42e67776eb70 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 deleted file mode 100644 index 07aa37fc76524..0000000000000 --- a/plugins/transport-nio/licenses/netty-resolver-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b6f808e331cf843d2a7ff62042cf9b5343e2ff25 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-resolver-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-resolver-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..f5258c46ebd6a --- /dev/null +++ b/plugins/transport-nio/licenses/netty-resolver-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +bad83d479f7bd8ea84eefd77c316435be4c97270 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 deleted file mode 100644 index 5e12ada3f5c10..0000000000000 --- a/plugins/transport-nio/licenses/netty-transport-4.1.84.Final.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -acd9947d0a951b1f7021c7adc393df936b1ecbf0 \ No newline at end of file diff --git a/plugins/transport-nio/licenses/netty-transport-4.1.86.Final.jar.sha1 b/plugins/transport-nio/licenses/netty-transport-4.1.86.Final.jar.sha1 new file mode 100644 index 0000000000000..1fa4ab0281ca1 --- /dev/null +++ b/plugins/transport-nio/licenses/netty-transport-4.1.86.Final.jar.sha1 @@ -0,0 +1 @@ +7c7739c41fd110c3576e9faace332ee957f27203 \ No newline at end of file diff --git a/release-notes/opensearch.release-notes-1.3.7.md b/release-notes/opensearch.release-notes-1.3.7.md index b8330b5bfcd7d..bcd1732595dfc 100644 --- a/release-notes/opensearch.release-notes-1.3.7.md +++ b/release-notes/opensearch.release-notes-1.3.7.md @@ -1,4 +1,4 @@ -## 2022-11-30 Version 1.3.7 Release Notes +## 2022-12-13 Version 1.3.7 Release Notes ### Upgrades * Upgrade netty to 4.1.84.Final ([#4919](https://github.com/opensearch-project/OpenSearch/pull/4919)) diff --git a/release-notes/opensearch.release-notes-2.4.1.md b/release-notes/opensearch.release-notes-2.4.1.md new file mode 100644 index 0000000000000..a2e885f1f1282 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.4.1.md @@ -0,0 +1,23 @@ +## 2022-12-13 Version 2.4.1 Release Notes + +### Bug Fixes +* Fix 1.x compatibility bug with stored Tasks ([#5412](https://github.com/opensearch-project/opensearch/pull/5412)) ([#5440](https://github.com/opensearch-project/opensearch/pull/5440)) +* Use BuildParams.isCi() instead of checking env var ([#5368](https://github.com/opensearch-project/opensearch/pull/5368)) ([#5373](https://github.com/opensearch-project/opensearch/pull/5373)) +* [BUG] org.opensearch.repositories.s3.RepositoryS3ClientYamlTestSuiteIT/test {yaml=repository_s3/20_repository_permanent_credentials/Snapshot and Restore with repository-s3 using permanent credentials} flaky ([#5325](https://github.com/opensearch-project/opensearch/pull/5325)) ([#5336](https://github.com/opensearch-project/opensearch/pull/5336)) +* [BUG] Gradle Check Failed on Windows due to JDK19 pulling by gradle ([#5188](https://github.com/opensearch-project/opensearch/pull/5188)) ([#5192](https://github.com/opensearch-project/opensearch/pull/5192)) +* Fix test to use a file from another temp directory ([#5158](https://github.com/opensearch-project/opensearch/pull/5158)) ([#5163](https://github.com/opensearch-project/opensearch/pull/5163)) +* Fix boundary condition in indexing pressure test ([#5168](https://github.com/opensearch-project/opensearch/pull/5168)) ([#5182](https://github.com/opensearch-project/opensearch/pull/5182)) +* [Backport 2.x] Fix: org.opensearch.clustermanager.ClusterManagerTaskThrottlingIT is flaky. ([#5153](https://github.com/opensearch-project/opensearch/pull/5153)) ([#5171](https://github.com/opensearch-project/opensearch/pull/5171)) +* [Backport 2.4] Raise error on malformed CSV ([#5141](https://github.com/opensearch-project/opensearch/pull/5141)) + +### Features/Enhancements +* Change the output error message back to use OpenSearchException in the cause chain ([#5081](https://github.com/opensearch-project/opensearch/pull/5081)) ([#5085](https://github.com/opensearch-project/opensearch/pull/5085)) +* Revert changes in AbstractPointGeometryFieldMapper ([#5250](https://github.com/opensearch-project/opensearch/pull/5250)) +* Add support for skipping changelog ([#5088](https://github.com/opensearch-project/opensearch/pull/5088)) ([#5160](https://github.com/opensearch-project/opensearch/pull/5160)) +* [Backport 2.4]Revert "Cluster manager task throttling feature [Final PR] ([#5071](https://github.com/opensearch-project/opensearch/pull/5071)) ([#5203](https://github.com/opensearch-project/opensearch/pull/5203)) + +### Maintenance +* Update Apache Lucene to 9.4.2 ([#5354](https://github.com/opensearch-project/opensearch/pull/5354)) ([#5361](https://github.com/opensearch-project/opensearch/pull/5361)) +* Update Jackson to 2.14.1 ([#5346](https://github.com/opensearch-project/opensearch/pull/5346)) ([#5358](https://github.com/opensearch-project/opensearch/pull/5358)) +* Bump nebula-publishing-plugin from v4.4.0 to v4.6.0. ([#5127](https://github.com/opensearch-project/opensearch/pull/5127)) ([#5131](https://github.com/opensearch-project/opensearch/pull/5131)) +* Bump commons-compress from 1.21 to 1.22. ([#5520](https://github.com/opensearch-project/OpenSearch/pull/5520)) ([#5522](https://github.com/opensearch-project/opensearch/pull/5522)) diff --git a/scripts/build.sh b/scripts/build.sh index 16906bf39fbc7..a0917776507be 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -139,7 +139,7 @@ esac echo "Building OpenSearch for $PLATFORM-$DISTRIBUTION-$ARCHITECTURE" -./gradlew :distribution:$TYPE:$TARGET:assemble :distribution:$TYPE:no-jdk-$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER +./gradlew :distribution:$TYPE:$TARGET:assemble -Dbuild.snapshot=$SNAPSHOT -Dbuild.version_qualifier=$QUALIFIER # Copy artifact to dist folder in bundle build output [[ "$SNAPSHOT" == "true" ]] && IDENTIFIER="-SNAPSHOT" diff --git a/server/build.gradle b/server/build.gradle index 62ef695a642f2..27404a46c7d1c 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -144,6 +144,10 @@ dependencies { // jna api "net.java.dev.jna:jna:${versions.jna}" + // jackson + api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" + api "com.fasterxml.jackson.core:jackson-annotations:${versions.jackson}" + testImplementation(project(":test:framework")) { // tests use the locally compiled version of server exclude group: 'org.opensearch', module: 'server' @@ -217,11 +221,12 @@ tasks.named("processResources").configure { tasks.named("thirdPartyAudit").configure { ignoreMissingClasses( + // from log4j + 'com.conversantmedia.util.concurrent.SpinPolicy', 'com.fasterxml.jackson.dataformat.xml.JacksonXmlModule', 'com.fasterxml.jackson.dataformat.xml.XmlMapper', 'com.fasterxml.jackson.dataformat.xml.util.DefaultXmlPrettyPrinter', - 'com.conversantmedia.util.concurrent.SpinPolicy', 'org.fusesource.jansi.Ansi', 'org.fusesource.jansi.AnsiRenderer$Code', 'com.lmax.disruptor.EventFactory', diff --git a/server/licenses/jackson-annotations-2.14.1.jar.sha1 b/server/licenses/jackson-annotations-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..e43faef9e23ff --- /dev/null +++ b/server/licenses/jackson-annotations-2.14.1.jar.sha1 @@ -0,0 +1 @@ +2a6ad504d591a7903ffdec76b5b7252819a2d162 \ No newline at end of file diff --git a/server/licenses/jackson-annotations-LICENSE.txt b/server/licenses/jackson-annotations-LICENSE.txt new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/server/licenses/jackson-annotations-LICENSE.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/server/licenses/jackson-annotations-NOTICE.txt b/server/licenses/jackson-annotations-NOTICE.txt new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/server/licenses/jackson-annotations-NOTICE.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/server/licenses/jackson-databind-2.14.1.jar.sha1 b/server/licenses/jackson-databind-2.14.1.jar.sha1 new file mode 100644 index 0000000000000..0e6726927ebac --- /dev/null +++ b/server/licenses/jackson-databind-2.14.1.jar.sha1 @@ -0,0 +1 @@ +268524b9056cae1211b9f1f52560ef19347f4d17 \ No newline at end of file diff --git a/server/licenses/jackson-databind-LICENSE.txt b/server/licenses/jackson-databind-LICENSE.txt new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/server/licenses/jackson-databind-LICENSE.txt @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/server/licenses/jackson-databind-NOTICE.txt b/server/licenses/jackson-databind-NOTICE.txt new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/server/licenses/jackson-databind-NOTICE.txt @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index e12c20e2a64b8..0000000000000 --- a/server/licenses/lucene-analysis-common-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -84d717ed509f8ce484c57fea720d8de2a6afdaa6 \ No newline at end of file diff --git a/server/licenses/lucene-analysis-common-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-analysis-common-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..bf375b397e5eb --- /dev/null +++ b/server/licenses/lucene-analysis-common-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +f2440fe126dad78e95f901c0f7a6eeb66da09938 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index e78e165acddb3..0000000000000 --- a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -087bcc11526f8dcc56742dd8188bd05ad0329161 \ No newline at end of file diff --git a/server/licenses/lucene-backward-codecs-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..112438e4c262d --- /dev/null +++ b/server/licenses/lucene-backward-codecs-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +51677b84f823e352ab366f6a6bf87de8816650c4 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index bd5fc52fb86c3..0000000000000 --- a/server/licenses/lucene-core-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e949897fa24e14d2701a3c41fe27a4f094681b81 \ No newline at end of file diff --git a/server/licenses/lucene-core-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-core-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..7523ee5a94ca9 --- /dev/null +++ b/server/licenses/lucene-core-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +b11b5c54ab26152c0db003c7a514f4e6c6825fdd \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 17aa27ceac3bf..0000000000000 --- a/server/licenses/lucene-grouping-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -6cb53ca55f7e313ed19852ae37fca4ad2e4caa0c \ No newline at end of file diff --git a/server/licenses/lucene-grouping-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-grouping-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..d0c7504f554fd --- /dev/null +++ b/server/licenses/lucene-grouping-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +58bd60d4c3ec753eef0d904601ab6d726633a8db \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 7f248580a6a49..0000000000000 --- a/server/licenses/lucene-highlighter-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c7f650e33ac11e01bb5c2e35e4eb080a9ce245b8 \ No newline at end of file diff --git a/server/licenses/lucene-highlighter-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-highlighter-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..9a26b338bdab6 --- /dev/null +++ b/server/licenses/lucene-highlighter-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +97f11221b89e37c575ed9538d88b1529872abd80 \ No newline at end of file diff --git a/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 88fef91bee929..0000000000000 --- a/server/licenses/lucene-join-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -914ea03f71043a9291623628396a97a4c1901f8c \ No newline at end of file diff --git a/server/licenses/lucene-join-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-join-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..09459e5aafe3f --- /dev/null +++ b/server/licenses/lucene-join-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +84980688c8eb9fbdb597a4291713ee630653392d \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index f6422c2e72fda..0000000000000 --- a/server/licenses/lucene-memory-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e83ecf8c4f5991f8e4ea319fc9194c933e02f66d \ No newline at end of file diff --git a/server/licenses/lucene-memory-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-memory-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..e5b9ceae7e187 --- /dev/null +++ b/server/licenses/lucene-memory-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +23d3d1eefec90950b34ddef988be21b8fdfeb415 \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 262190789814d..0000000000000 --- a/server/licenses/lucene-misc-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -5adc5753c741847cd84cb11ebfcd613bedc11beb \ No newline at end of file diff --git a/server/licenses/lucene-misc-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-misc-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..bf7c5edf32b62 --- /dev/null +++ b/server/licenses/lucene-misc-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +e5f164991c11efeebf0697c76ffbe986af5341d5 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index f8bba3d90a0f1..0000000000000 --- a/server/licenses/lucene-queries-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -824272a064aa2fff1f952b5ae383e80aef4e45f8 \ No newline at end of file diff --git a/server/licenses/lucene-queries-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-queries-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..d8d8cb121bcd4 --- /dev/null +++ b/server/licenses/lucene-queries-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +8c382224253727794557200e97717b927ad8fa74 \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 652ccd298c9d9..0000000000000 --- a/server/licenses/lucene-queryparser-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -d5bf983dfb6183b390bdc9d3b41b88b6ee6f780e \ No newline at end of file diff --git a/server/licenses/lucene-queryparser-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-queryparser-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..649651e62c018 --- /dev/null +++ b/server/licenses/lucene-queryparser-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +18cb84b504b8a57075efca72f4701aa8a720a057 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index b51328d19065a..0000000000000 --- a/server/licenses/lucene-sandbox-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -fff58cc6b79887348b45c9d06bff39d055540738 \ No newline at end of file diff --git a/server/licenses/lucene-sandbox-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-sandbox-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..a1ef08b0a4069 --- /dev/null +++ b/server/licenses/lucene-sandbox-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +76cc7f77a30864e853a6662a5b7f4937023bd5e6 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 37a22d637a051..0000000000000 --- a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -c391f1df56d63dff3c6543da15c87105f2106c86 \ No newline at end of file diff --git a/server/licenses/lucene-spatial-extras-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..3810d15fdf5c9 --- /dev/null +++ b/server/licenses/lucene-spatial-extras-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +70df807e8504f2fb1fe28ceaf33373e3de51aec8 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index b0c9924752852..0000000000000 --- a/server/licenses/lucene-spatial3d-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -794109c75534b1c3a19a29bcb66692f0e0708744 \ No newline at end of file diff --git a/server/licenses/lucene-spatial3d-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-spatial3d-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..6b0ad3693733d --- /dev/null +++ b/server/licenses/lucene-spatial3d-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +75aaac030d36ddf7cdb09632fe1293c6ecd756ce \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 b/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 deleted file mode 100644 index 63f5d8123c2cf..0000000000000 --- a/server/licenses/lucene-suggest-9.5.0-snapshot-a4ef70f.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -dc5fdd92541f4e78256152d3efc11bdb67ffdc91 \ No newline at end of file diff --git a/server/licenses/lucene-suggest-9.5.0-snapshot-d5cef1c.jar.sha1 b/server/licenses/lucene-suggest-9.5.0-snapshot-d5cef1c.jar.sha1 new file mode 100644 index 0000000000000..d3b116f990627 --- /dev/null +++ b/server/licenses/lucene-suggest-9.5.0-snapshot-d5cef1c.jar.sha1 @@ -0,0 +1 @@ +384e589a3d90773ff47ffbaa2797afe95609f183 \ No newline at end of file diff --git a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java index e2a1363f163da..8236e6e90afc5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/action/bulk/BulkIntegrationIT.java @@ -193,34 +193,29 @@ public void testDeleteIndexWhileIndexing() throws Exception { String index = "deleted_while_indexing"; createIndex(index); AtomicBoolean stopped = new AtomicBoolean(); - Thread[] threads = new Thread[between(1, 4)]; AtomicInteger docID = new AtomicInteger(); - for (int i = 0; i < threads.length; i++) { - threads[i] = new Thread(() -> { - while (stopped.get() == false && docID.get() < 5000) { - String id = Integer.toString(docID.incrementAndGet()); - try { - IndexResponse response = client().prepareIndex(index) - .setId(id) - .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) - .get(); - assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); - logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); - } catch (OpenSearchException ignore) { - logger.info("--> fail to index id={}", id); - } + Thread thread = new Thread(() -> { + while (stopped.get() == false && docID.get() < 5000) { + String id = Integer.toString(docID.incrementAndGet()); + try { + IndexResponse response = client().prepareIndex(index) + .setId(id) + .setSource(Collections.singletonMap("f" + randomIntBetween(1, 10), randomNonNegativeLong()), XContentType.JSON) + .get(); + assertThat(response.getResult(), is(oneOf(CREATED, UPDATED))); + logger.info("--> index id={} seq_no={}", response.getId(), response.getSeqNo()); + } catch (OpenSearchException ignore) { + logger.info("--> fail to index id={}", id); } - }); - threads[i].start(); - } + } + }); + thread.start(); ensureGreen(index); assertBusy(() -> assertThat(docID.get(), greaterThanOrEqualTo(1))); assertAcked(client().admin().indices().prepareDelete(index)); stopped.set(true); - for (Thread thread : threads) { - thread.join(ReplicationRequest.DEFAULT_TIMEOUT.millis() / 2); - assertFalse(thread.isAlive()); - } + thread.join(ReplicationRequest.DEFAULT_TIMEOUT.millis() / 2); + assertFalse(thread.isAlive()); } } diff --git a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java index aa0f90bc4a6d9..54765650cd202 100644 --- a/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/cluster/coordination/AwarenessAttributeDecommissionIT.java @@ -29,7 +29,6 @@ import org.opensearch.cluster.ClusterStateObserver; import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; -import org.opensearch.cluster.decommission.DecommissionService; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.decommission.DecommissioningFailedException; import org.opensearch.cluster.decommission.NodeDecommissionedException; @@ -824,24 +823,11 @@ public void testDecommissionFailedWithOnlyOneAttributeValue() throws Exception { // and hence due to which the leader won't get abdicated and decommission request should eventually fail. // And in this case, to ensure decommission request doesn't leave mutating change in the cluster, we ensure // that no exclusion is set to the cluster and state for decommission is marked as FAILED - Logger clusterLogger = LogManager.getLogger(DecommissionService.class); - MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(clusterLogger); - mockLogAppender.addExpectation( - new MockLogAppender.SeenEventExpectation( - "test", - DecommissionService.class.getCanonicalName(), - Level.ERROR, - "failure in removing to-be-decommissioned cluster manager eligible nodes" - ) + OpenSearchTimeoutException ex = expectThrows( + OpenSearchTimeoutException.class, + () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() ); - - assertBusy(() -> { - OpenSearchTimeoutException ex = expectThrows( - OpenSearchTimeoutException.class, - () -> client().execute(DecommissionAction.INSTANCE, decommissionRequest).actionGet() - ); - assertTrue(ex.getMessage().contains("timed out waiting for voting config exclusions")); - }); + assertTrue(ex.getMessage().contains("while removing to-be-decommissioned cluster manager eligible nodes")); ClusterService leaderClusterService = internalCluster().getInstance( ClusterService.class, @@ -877,7 +863,6 @@ public void testDecommissionFailedWithOnlyOneAttributeValue() throws Exception { // if the below condition is passed, then we are sure current decommission status is marked FAILED assertTrue(expectedStateLatch.await(30, TimeUnit.SECONDS)); - mockLogAppender.assertAllExpectationsMatched(); // ensure all nodes are part of cluster ensureStableCluster(6, TimeValue.timeValueMinutes(2)); diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java index 2ceb4e0908df3..5ab1fc79fa68a 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/replication/SegmentReplicationIT.java @@ -10,6 +10,8 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.junit.BeforeClass; +import org.opensearch.OpenSearchCorruptionException; +import org.opensearch.action.admin.cluster.health.ClusterHealthResponse; import org.opensearch.action.admin.indices.segments.IndexShardSegments; import org.opensearch.action.admin.indices.segments.IndicesSegmentResponse; import org.opensearch.action.admin.indices.segments.IndicesSegmentsRequest; @@ -24,7 +26,9 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.allocation.command.CancelAllocationCommand; import org.opensearch.common.Nullable; +import org.opensearch.common.Priority; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; import org.opensearch.common.util.FeatureFlags; import org.opensearch.index.Index; import org.opensearch.index.IndexModule; @@ -53,6 +57,7 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.hamcrest.Matchers.equalTo; import static org.opensearch.index.query.QueryBuilders.matchQuery; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertHitCount; import static org.opensearch.test.hamcrest.OpenSearchAssertions.assertAcked; @@ -194,6 +199,75 @@ public void testCancelPrimaryAllocation() throws Exception { assertSegmentStats(REPLICA_COUNT); } + /** + * This test verfies that replica shard is not added to the cluster when doing a round of segment replication fails during peer recovery. + */ + public void testAddNewReplicaFailure() throws Exception { + logger.info("--> starting [Primary Node] ..."); + final String primaryNode = internalCluster().startNode(); + + logger.info("--> creating test index ..."); + prepareCreate( + INDEX_NAME, + Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ).get(); + + logger.info("--> index 10 docs"); + for (int i = 0; i < 10; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + } + logger.info("--> flush so we have some segment files on disk"); + flush(INDEX_NAME); + logger.info("--> index more docs so we have something in the translog"); + for (int i = 10; i < 20; i++) { + client().prepareIndex(INDEX_NAME).setId(Integer.toString(i)).setSource("field", "value" + i).execute().actionGet(); + } + refresh(INDEX_NAME); + logger.info("--> verifying count"); + assertThat(client().prepareSearch(INDEX_NAME).setSize(0).execute().actionGet().getHits().getTotalHits().value, equalTo(20L)); + + logger.info("--> start empty node to add replica shard"); + final String replicaNode = internalCluster().startNode(); + + // Mock transport service to add behaviour of throwing corruption exception during segment replication process. + MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance( + TransportService.class, + primaryNode + )); + mockTransportService.addSendBehavior( + internalCluster().getInstance(TransportService.class, replicaNode), + (connection, requestId, action, request, options) -> { + if (action.equals(SegmentReplicationTargetService.Actions.FILE_CHUNK)) { + throw new OpenSearchCorruptionException("expected"); + } + connection.sendRequest(requestId, action, request, options); + } + ); + ensureGreen(INDEX_NAME); + // Add Replica shard to the new empty replica node + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(INDEX_NAME) + .setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)) + ); + + // Verify that cluster state is not green and replica shard failed during a round of segment replication is not added to the cluster + ClusterHealthResponse clusterHealthResponse = client().admin() + .cluster() + .prepareHealth() + .setWaitForEvents(Priority.LANGUID) + .setWaitForNodes("2") + .setWaitForGreenStatus() + .setTimeout(TimeValue.timeValueSeconds(2)) + .execute() + .actionGet(); + assertTrue(clusterHealthResponse.isTimedOut()); + ensureYellow(INDEX_NAME); + IndicesService indicesService = internalCluster().getInstance(IndicesService.class, replicaNode); + assertFalse(indicesService.hasIndex(resolveIndex(INDEX_NAME))); + } + public void testReplicationAfterPrimaryRefreshAndFlush() throws Exception { final String nodeA = internalCluster().startNode(); final String nodeB = internalCluster().startNode(); @@ -452,18 +526,14 @@ public void testStartReplicaAfterPrimaryIndexesDocs() throws Exception { final String replicaNode = internalCluster().startNode(); ensureGreen(INDEX_NAME); - client().prepareIndex(INDEX_NAME).setId("3").setSource("foo", "bar").get(); + assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 2); + client().prepareIndex(INDEX_NAME).setId("3").setSource("foo", "bar").get(); + refresh(INDEX_NAME); waitForReplicaUpdate(); assertHitCount(client(primaryNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); assertHitCount(client(replicaNode).prepareSearch(INDEX_NAME).setSize(0).setPreference("_only_local").get(), 3); - - IndexShard primaryShard = getIndexShard(primaryNode); - IndexShard replicaShard = getIndexShard(replicaNode); - assertEquals( - primaryShard.translogStats().estimatedNumberOfOperations(), - replicaShard.translogStats().estimatedNumberOfOperations() - ); assertSegmentStats(REPLICA_COUNT); } diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java index d87bbfb1fb69c..79527039a50f5 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/MultiMatchQueryIT.java @@ -37,6 +37,7 @@ import org.opensearch.action.index.IndexRequestBuilder; import org.opensearch.action.search.SearchResponse; import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; @@ -1024,7 +1025,7 @@ public void testFuzzyFieldLevelBoosting() throws InterruptedException, Execution SearchResponse searchResponse = client().prepareSearch(idx) .setExplain(true) - .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(0)) + .setQuery(multiMatchQuery("foo").field("title", 100).field("body").fuzziness(Fuzziness.ZERO)) .get(); SearchHit[] hits = searchResponse.getHits().getHits(); assertNotEquals("both documents should be on different shards", hits[0].getShard().getShardId(), hits[1].getShard().getShardId()); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java index 5c7e53fda3f23..9837c86cd8608 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/QueryStringIT.java @@ -216,6 +216,39 @@ public void testKeywordWithWhitespace() throws Exception { assertHitCount(resp, 3L); } + public void testRegexCaseInsensitivity() throws Exception { + createIndex("messages"); + List indexRequests = new ArrayList<>(); + indexRequests.add(client().prepareIndex("messages").setId("1").setSource("message", "message: this is a TLS handshake")); + indexRequests.add(client().prepareIndex("messages").setId("2").setSource("message", "message: this is a tcp handshake")); + indexRandom(true, false, indexRequests); + + SearchResponse response = client().prepareSearch("messages").setQuery(queryStringQuery("/TLS/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/tls/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/TCP/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "2"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/tcp/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "2"); + + response = client().prepareSearch("messages").setQuery(queryStringQuery("/HANDSHAKE/").defaultField("message")).get(); + assertNoFailures(response); + assertHitCount(response, 2); + assertHits(response.getHits(), "1", "2"); + } + public void testAllFields() throws Exception { String indexBody = copyToStringFromClasspath("/org/opensearch/search/query/all-query-index.json"); diff --git a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java index c51043f02174d..d32487df10b38 100644 --- a/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/search/query/SearchQueryIT.java @@ -49,6 +49,7 @@ import org.opensearch.common.regex.Regex; import org.opensearch.common.settings.Settings; import org.opensearch.common.time.DateFormatter; +import org.opensearch.common.unit.Fuzziness; import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.common.xcontent.XContentFactory; import org.opensearch.common.xcontent.XContentType; @@ -89,12 +90,15 @@ import java.time.format.DateTimeFormatter; import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.Map; import java.util.Random; +import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.regex.Pattern; import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.opensearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.opensearch.common.xcontent.XContentFactory.jsonBuilder; @@ -759,21 +763,21 @@ public void testMatchQueryFuzzy() throws Exception { client().prepareIndex("test").setId("2").setSource("text", "Unity") ); - SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("0")).get(); + SearchResponse searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.ZERO)).get(); assertHitCount(searchResponse, 0L); - searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("1")).get(); + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.ONE)).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "2"); - searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("AUTO")).get(); + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.AUTO)).get(); assertHitCount(searchResponse, 2L); assertSearchHits(searchResponse, "1", "2"); - searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness("AUTO:5,7")).get(); + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "uniy").fuzziness(Fuzziness.customAuto(5, 7))).get(); assertHitCount(searchResponse, 0L); - searchResponse = client().prepareSearch().setQuery(matchQuery("text", "unify").fuzziness("AUTO:5,7")).get(); + searchResponse = client().prepareSearch().setQuery(matchQuery("text", "unify").fuzziness(Fuzziness.customAuto(5, 7))).get(); assertHitCount(searchResponse, 1L); assertSearchHits(searchResponse, "2"); } @@ -2089,8 +2093,14 @@ public void testWildcardQueryNormalizationOnTextField() { refresh(); { + // test default case insensitivity: false WildcardQueryBuilder wildCardQuery = wildcardQuery("field1", "Bb*"); SearchResponse searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); + assertHitCount(searchResponse, 0L); + + // test case insensitivity set to true + wildCardQuery = wildcardQuery("field1", "Bb*").caseInsensitive(true); + searchResponse = client().prepareSearch().setQuery(wildCardQuery).get(); assertHitCount(searchResponse, 1L); wildCardQuery = wildcardQuery("field1", "bb*"); @@ -2099,6 +2109,24 @@ public void testWildcardQueryNormalizationOnTextField() { } } + /** tests wildcard case sensitivity */ + public void testWildcardCaseSensitivity() { + assertAcked(prepareCreate("test").setMapping("field", "type=text")); + client().prepareIndex("test").setId("1").setSource("field", "lowercase text").get(); + refresh(); + + // test case sensitive + SearchResponse response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(false)).get(); + assertNoFailures(response); + assertHitCount(response, 0); + + // test case insensitive + response = client().prepareSearch("test").setQuery(wildcardQuery("field", "Text").caseInsensitive(true)).get(); + assertNoFailures(response); + assertHitCount(response, 1); + assertHits(response.getHits(), "1"); + } + /** * Reserved characters should be excluded when the normalization is applied for keyword fields. * See https://github.com/elastic/elasticsearch/issues/46300 for details. @@ -2175,4 +2203,16 @@ public void testIssueFuzzyInsideSpanMulti() { SearchResponse response = client().prepareSearch("test").setQuery(query).get(); assertHitCount(response, 1); } + + /** + * asserts the search response hits include the expected ids + */ + private void assertHits(SearchHits hits, String... ids) { + assertThat(hits.getTotalHits().value, equalTo((long) ids.length)); + Set hitIds = new HashSet<>(); + for (SearchHit hit : hits.getHits()) { + hitIds.add(hit.getId()); + } + assertThat(hitIds, containsInAnyOrder(ids)); + } } diff --git a/server/src/main/java/org/opensearch/OpenSearchException.java b/server/src/main/java/org/opensearch/OpenSearchException.java index aef098403ec2b..78f6b50b3a039 100644 --- a/server/src/main/java/org/opensearch/OpenSearchException.java +++ b/server/src/main/java/org/opensearch/OpenSearchException.java @@ -34,6 +34,7 @@ import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.routing.UnsupportedWeightedRoutingStateException; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.common.CheckedFunction; import org.opensearch.common.Nullable; @@ -70,6 +71,7 @@ import static java.util.Collections.unmodifiableMap; import static org.opensearch.Version.V_2_1_0; import static org.opensearch.Version.V_2_4_0; +import static org.opensearch.Version.V_2_5_0; import static org.opensearch.Version.V_3_0_0; import static org.opensearch.cluster.metadata.IndexMetadata.INDEX_UUID_NA_VALUE; import static org.opensearch.common.xcontent.XContentParserUtils.ensureExpectedToken; @@ -1618,6 +1620,12 @@ private enum OpenSearchExceptionHandle { SnapshotInUseDeletionException::new, 166, UNKNOWN_VERSION_ADDED + ), + UNSUPPORTED_WEIGHTED_ROUTING_STATE_EXCEPTION( + UnsupportedWeightedRoutingStateException.class, + UnsupportedWeightedRoutingStateException::new, + 167, + V_2_5_0 ); final Class exceptionClass; diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index cef8ab1320342..0854cb978b4d0 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -83,16 +83,11 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_2_2_1 = new Version(2020199, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_3_0 = new Version(2030099, org.apache.lucene.util.Version.LUCENE_9_3_0); public static final Version V_2_4_0 = new Version(2040099, org.apache.lucene.util.Version.LUCENE_9_4_1); + public static final Version V_2_4_1 = new Version(2040199, org.apache.lucene.util.Version.LUCENE_9_4_2); // UNRELEASED - public static final Version V_2_4_1 = new Version( - 2040199, - org.apache.lucene.util.Version.fromBits(9, 4, 2) /** needs updated 9.5.0 snapshots */ - ); - public static final Version V_2_5_0 = new Version( - 2050099, - org.apache.lucene.util.Version.fromBits(9, 4, 2) /** needs updated 9.5.0 snapshots */ - ); + public static final Version V_2_4_2 = new Version(2040299, org.apache.lucene.util.Version.LUCENE_9_4_2); + public static final Version V_2_5_0 = new Version(2050099, org.apache.lucene.util.Version.LUCENE_9_4_2); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_5_0); public static final Version CURRENT = V_3_0_0; diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java index d0f5e8f198809..ffdb2735ae69f 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportAddVotingConfigExclusionsAction.java @@ -44,10 +44,8 @@ import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; @@ -66,6 +64,9 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.addExclusionAndGetState; + /** * Transport endpoint action for adding exclusions to voting config * @@ -144,13 +145,7 @@ public ClusterState execute(ClusterState currentState) { assert resolvedExclusions == null : resolvedExclusions; final int finalMaxVotingConfigExclusions = TransportAddVotingConfigExclusionsAction.this.maxVotingConfigExclusions; resolvedExclusions = resolveVotingConfigExclusionsAndCheckMaximum(request, currentState, finalMaxVotingConfigExclusions); - - final CoordinationMetadata.Builder builder = CoordinationMetadata.builder(currentState.coordinationMetadata()); - resolvedExclusions.forEach(builder::addVotingConfigExclusion); - final Metadata newMetadata = Metadata.builder(currentState.metadata()).coordinationMetadata(builder.build()).build(); - final ClusterState newState = ClusterState.builder(currentState).metadata(newMetadata).build(); - assert newState.getVotingConfigExclusions().size() <= finalMaxVotingConfigExclusions; - return newState; + return addExclusionAndGetState(currentState, resolvedExclusions, finalMaxVotingConfigExclusions); } @Override @@ -213,18 +208,6 @@ public void onTimeout(TimeValue timeout) { }); } - private static Set resolveVotingConfigExclusionsAndCheckMaximum( - AddVotingConfigExclusionsRequest request, - ClusterState state, - int maxVotingConfigExclusions - ) { - return request.resolveVotingConfigExclusionsAndCheckMaximum( - state, - maxVotingConfigExclusions, - MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING.getKey() - ); - } - @Override protected ClusterBlockException checkBlock(AddVotingConfigExclusionsRequest request, ClusterState state) { return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_WRITE); diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java index 1fc02db4309b1..b65688dcc30f6 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/TransportClearVotingConfigExclusionsAction.java @@ -44,10 +44,8 @@ import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.block.ClusterBlockException; import org.opensearch.cluster.block.ClusterBlockLevel; -import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; import org.opensearch.cluster.metadata.IndexNameExpressionResolver; -import org.opensearch.cluster.metadata.Metadata; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; import org.opensearch.common.inject.Inject; @@ -60,6 +58,8 @@ import java.io.IOException; import java.util.function.Predicate; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.clearExclusionsAndGetState; + /** * Transport endpoint action for clearing exclusions to voting config * @@ -166,13 +166,7 @@ private void submitClearVotingConfigExclusionsTask( clusterService.submitStateUpdateTask("clear-voting-config-exclusions", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { - final CoordinationMetadata newCoordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata()) - .clearVotingConfigExclusions() - .build(); - final Metadata newMetadata = Metadata.builder(currentState.metadata()) - .coordinationMetadata(newCoordinationMetadata) - .build(); - return ClusterState.builder(currentState).metadata(newMetadata).build(); + return clearExclusionsAndGetState(currentState); } @Override diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelper.java b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelper.java new file mode 100644 index 0000000000000..5cc4bd2f831d7 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelper.java @@ -0,0 +1,81 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.configuration; + +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; +import org.opensearch.cluster.metadata.Metadata; + +import java.util.Set; + +import static org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction.MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING; + +/** + * Static helper utilities for voting config exclusions cluster state updates + * + * @opensearch.internal + */ +public class VotingConfigExclusionsHelper { + + /** + * Static helper to update current state with given resolved exclusions + * + * @param currentState current cluster state + * @param resolvedExclusions resolved exclusions from the request + * @param finalMaxVotingConfigExclusions max exclusions that be added + * @return newly formed cluster state + */ + public static ClusterState addExclusionAndGetState( + ClusterState currentState, + Set resolvedExclusions, + int finalMaxVotingConfigExclusions + ) { + final CoordinationMetadata.Builder builder = CoordinationMetadata.builder(currentState.coordinationMetadata()); + resolvedExclusions.forEach(builder::addVotingConfigExclusion); + final Metadata newMetadata = Metadata.builder(currentState.metadata()).coordinationMetadata(builder.build()).build(); + final ClusterState newState = ClusterState.builder(currentState).metadata(newMetadata).build(); + assert newState.getVotingConfigExclusions().size() <= finalMaxVotingConfigExclusions; + return newState; + } + + /** + * Resolves the exclusion from the request and throws IAE if no nodes matched or maximum exceeded + * + * @param request AddVotingConfigExclusionsRequest request + * @param state current cluster state + * @param maxVotingConfigExclusions max number of exclusion acceptable + * @return set of VotingConfigExclusion + */ + public static Set resolveVotingConfigExclusionsAndCheckMaximum( + AddVotingConfigExclusionsRequest request, + ClusterState state, + int maxVotingConfigExclusions + ) { + return request.resolveVotingConfigExclusionsAndCheckMaximum( + state, + maxVotingConfigExclusions, + MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING.getKey() + ); + } + + /** + * Clears Voting config exclusion from the given cluster state + * + * @param currentState current cluster state + * @return newly formed cluster state after clearing voting config exclusions + */ + public static ClusterState clearExclusionsAndGetState(ClusterState currentState) { + final CoordinationMetadata newCoordinationMetadata = CoordinationMetadata.builder(currentState.coordinationMetadata()) + .clearVotingConfigExclusions() + .build(); + final Metadata newMetadata = Metadata.builder(currentState.metadata()).coordinationMetadata(newCoordinationMetadata).build(); + return ClusterState.builder(currentState).metadata(newMetadata).build(); + } +} diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java index af229fb12b4f0..5474f4effa829 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequest.java @@ -28,7 +28,6 @@ import java.io.IOException; import java.util.HashMap; -import java.util.Locale; import java.util.Map; import static org.opensearch.action.ValidateActions.addValidationError; @@ -127,26 +126,17 @@ public ActionRequestValidationException validate() { if (weightedRouting.weights() == null || weightedRouting.weights().isEmpty()) { validationException = addValidationError("Weights are missing", validationException); } - int countValueWithZeroWeights = 0; - double weight; try { for (Object value : weightedRouting.weights().values()) { if (value == null) { validationException = addValidationError(("Weight is null"), validationException); } else { - weight = Double.parseDouble(value.toString()); - countValueWithZeroWeights = (weight == 0) ? countValueWithZeroWeights + 1 : countValueWithZeroWeights; + Double.parseDouble(value.toString()); } } } catch (NumberFormatException e) { validationException = addValidationError(("Weight is not a number"), validationException); } - if (countValueWithZeroWeights > 1) { - validationException = addValidationError( - (String.format(Locale.ROOT, "More than one [%d] value has weight set as 0", countValueWithZeroWeights)), - validationException - ); - } return validationException; } diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java index d2d7d843e19db..f65d15c5c64aa 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/state/ClusterStateResponse.java @@ -96,6 +96,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(waitForTimedOut); } + @Override + public String toString() { + return "ClusterStateResponse{" + "clusterState=" + clusterState + '}'; + } + @Override public boolean equals(Object o) { if (this == o) return true; diff --git a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java index f83431994a649..9f9c85933f394 100644 --- a/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java +++ b/server/src/main/java/org/opensearch/action/admin/indices/shrink/ResizeRequest.java @@ -95,7 +95,7 @@ public ResizeRequest(StreamInput in) throws IOException { sourceIndex = in.readString(); type = in.readEnum(ResizeType.class); copySettings = in.readOptionalBoolean(); - if (in.getVersion().onOrAfter(Version.V_3_0_0)) { + if (in.getVersion().onOrAfter(Version.V_2_5_0)) { maxShardSize = in.readOptionalWriteable(ByteSizeValue::new); } } @@ -140,7 +140,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeEnum(type); out.writeOptionalBoolean(copySettings); - if (out.getVersion().onOrAfter(Version.V_3_0_0)) { + if (out.getVersion().onOrAfter(Version.V_2_5_0)) { out.writeOptionalWriteable(maxShardSize); } } diff --git a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java index 56fb688290002..59f9042ec4a85 100644 --- a/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/opensearch/action/bulk/TransportShardBulkAction.java @@ -94,6 +94,8 @@ import java.util.function.Function; import java.util.function.LongSupplier; +import org.opensearch.action.support.replication.ReplicationMode; + /** * Performs shard-level bulk (index, delete or update) operations * @@ -193,6 +195,14 @@ protected long primaryOperationSize(BulkShardRequest request) { return request.ramBytesUsed(); } + @Override + protected ReplicationMode getReplicationMode(IndexShard indexShard) { + if (indexShard.isRemoteTranslogEnabled()) { + return ReplicationMode.PRIMARY_TERM_VALIDATION; + } + return super.getReplicationMode(indexShard); + } + public static void performOnPrimary( BulkShardRequest request, IndexShard primary, diff --git a/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java new file mode 100644 index 0000000000000..2980df4c1c0af --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/FanoutReplicationProxy.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +/** + * This implementation of {@link ReplicationProxy} fans out the replication request to current shard routing if + * it is not the primary and has replication mode as {@link ReplicationMode#FULL_REPLICATION}. + * + * @opensearch.internal + */ +public class FanoutReplicationProxy extends ReplicationProxy { + + @Override + ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { + return shardRouting.isSameAllocation(primaryRouting) == false ? ReplicationMode.FULL_REPLICATION : ReplicationMode.NO_REPLICATION; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java new file mode 100644 index 0000000000000..f9b85cc4bd7aa --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationMode.java @@ -0,0 +1,32 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +/** + * The type of replication used for inter-node replication. + * + * @opensearch.internal + */ +public enum ReplicationMode { + /** + * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and is replicated logically. + * In short, this mode would replicate the {@link ReplicationRequest} to + * the replica shard along with primary term validation. + */ + FULL_REPLICATION, + /** + * In this mode, a {@code TransportReplicationAction} is fanned out to underlying concerned shard and used for + * primary term validation only. The request is not replicated logically. + */ + PRIMARY_TERM_VALIDATION, + /** + * In this mode, a {@code TransportReplicationAction} does not fan out to the underlying concerned shard. + */ + NO_REPLICATION; +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java new file mode 100644 index 0000000000000..fa28e99d5696f --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationModeAwareProxy.java @@ -0,0 +1,44 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +import java.util.Objects; + +/** + * This implementation of {@link ReplicationProxy} fans out the replication request to current shard routing basis + * the shard routing's replication mode and replication override policy. + * + * @opensearch.internal + */ +public class ReplicationModeAwareProxy extends ReplicationProxy { + + private final ReplicationMode replicationModeOverride; + + public ReplicationModeAwareProxy(ReplicationMode replicationModeOverride) { + assert Objects.nonNull(replicationModeOverride); + this.replicationModeOverride = replicationModeOverride; + } + + @Override + ReplicationMode determineReplicationMode(ShardRouting shardRouting, ShardRouting primaryRouting) { + + // If the current routing is the primary, then it does not need to be replicated + if (shardRouting.isSameAllocation(primaryRouting)) { + return ReplicationMode.NO_REPLICATION; + } + + if (primaryRouting.relocating() && shardRouting.isSameAllocation(primaryRouting.getTargetRelocatingShard())) { + return ReplicationMode.FULL_REPLICATION; + } + + return replicationModeOverride; + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java index da37eee88a4e0..1a6a5a9245eb2 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationOperation.java @@ -35,13 +35,14 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.Assertions; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.UnavailableShardsException; import org.opensearch.action.support.ActiveShardCount; import org.opensearch.action.support.RetryableAction; import org.opensearch.action.support.TransportActions; +import org.opensearch.action.support.replication.ReplicationProxyRequest.Builder; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; @@ -99,6 +100,7 @@ public class ReplicationOperation< private final TimeValue initialRetryBackoffBound; private final TimeValue retryTimeout; private final long primaryTerm; + private final ReplicationProxy replicationProxy; // exposed for tests private final ActionListener resultListener; @@ -117,7 +119,8 @@ public ReplicationOperation( String opType, long primaryTerm, TimeValue initialRetryBackoffBound, - TimeValue retryTimeout + TimeValue retryTimeout, + ReplicationProxy replicationProxy ) { this.replicasProxy = replicas; this.primary = primary; @@ -129,6 +132,7 @@ public ReplicationOperation( this.primaryTerm = primaryTerm; this.initialRetryBackoffBound = initialRetryBackoffBound; this.retryTimeout = retryTimeout; + this.replicationProxy = replicationProxy; } public void execute() throws Exception { @@ -226,20 +230,26 @@ private void performOnReplicas( final ShardRouting primaryRouting = primary.routingEntry(); - for (final ShardRouting shard : replicationGroup.getReplicationTargets()) { - if (shard.isSameAllocation(primaryRouting) == false) { - performOnReplica(shard, replicaRequest, globalCheckpoint, maxSeqNoOfUpdatesOrDeletes, pendingReplicationActions); - } + for (final ShardRouting shardRouting : replicationGroup.getReplicationTargets()) { + ReplicationProxyRequest proxyRequest = new Builder( + shardRouting, + primaryRouting, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + pendingReplicationActions, + replicaRequest + ).build(); + replicationProxy.performOnReplicaProxy(proxyRequest, this::performOnReplica); } } - private void performOnReplica( - final ShardRouting shard, - final ReplicaRequest replicaRequest, - final long globalCheckpoint, - final long maxSeqNoOfUpdatesOrDeletes, - final PendingReplicationActions pendingReplicationActions - ) { + private void performOnReplica(final ReplicationProxyRequest replicationProxyRequest) { + final ShardRouting shard = replicationProxyRequest.getShardRouting(); + final ReplicaRequest replicaRequest = replicationProxyRequest.getReplicaRequest(); + final long globalCheckpoint = replicationProxyRequest.getGlobalCheckpoint(); + final long maxSeqNoOfUpdatesOrDeletes = replicationProxyRequest.getMaxSeqNoOfUpdatesOrDeletes(); + final PendingReplicationActions pendingReplicationActions = replicationProxyRequest.getPendingReplicationActions(); + if (logger.isTraceEnabled()) { logger.trace("[{}] sending op [{}] to replica {} for request [{}]", shard.shardId(), opType, shard, replicaRequest); } diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java new file mode 100644 index 0000000000000..e098ea1aed960 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxy.java @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +import java.util.function.Consumer; + +/** + * Used for performing any replication operation on replicas. Depending on the implementation, the replication call + * can fanout or stops here. + * + * @opensearch.internal + */ +public abstract class ReplicationProxy { + + /** + * Depending on the actual implementation and the passed {@link ReplicationMode}, the replication + * mode is determined using which the replication request is performed on the replica or not. + * + * @param proxyRequest replication proxy request + * @param originalPerformOnReplicaConsumer original performOnReplica method passed as consumer + */ + public void performOnReplicaProxy( + ReplicationProxyRequest proxyRequest, + Consumer> originalPerformOnReplicaConsumer + ) { + ReplicationMode replicationMode = determineReplicationMode(proxyRequest.getShardRouting(), proxyRequest.getPrimaryRouting()); + // If the replication modes are 1. Logical replication or 2. Primary term validation, we let the call get performed on the + // replica shard. + if (replicationMode == ReplicationMode.FULL_REPLICATION || replicationMode == ReplicationMode.PRIMARY_TERM_VALIDATION) { + originalPerformOnReplicaConsumer.accept(proxyRequest); + } + } + + /** + * Determines what is the replication mode basis the constructor arguments of the implementation and the current + * replication mode aware shard routing. + * + * @param shardRouting replication mode aware ShardRouting + * @param primaryRouting primary ShardRouting + * @return the determined replication mode. + */ + abstract ReplicationMode determineReplicationMode(final ShardRouting shardRouting, final ShardRouting primaryRouting); +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java new file mode 100644 index 0000000000000..a2bbf58fb9100 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyFactory.java @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.index.shard.IndexShard; + +/** + * Factory that returns the {@link ReplicationProxy} instance basis the {@link ReplicationMode}. + * + * @opensearch.internal + */ +public class ReplicationProxyFactory { + + public static ReplicationProxy create( + final IndexShard indexShard, + final ReplicationMode replicationModeOverride + ) { + if (indexShard.isRemoteTranslogEnabled()) { + return new ReplicationModeAwareProxy<>(replicationModeOverride); + } + return new FanoutReplicationProxy<>(); + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java new file mode 100644 index 0000000000000..180efd6f423c3 --- /dev/null +++ b/server/src/main/java/org/opensearch/action/support/replication/ReplicationProxyRequest.java @@ -0,0 +1,116 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.support.replication; + +import org.opensearch.cluster.routing.ShardRouting; + +import java.util.Objects; + +/** + * This is proxy wrapper over the replication request whose object can be created using the Builder present inside. + * + * @opensearch.internal + */ +public class ReplicationProxyRequest { + + private final ShardRouting shardRouting; + + private final ShardRouting primaryRouting; + + private final long globalCheckpoint; + + private final long maxSeqNoOfUpdatesOrDeletes; + + private final PendingReplicationActions pendingReplicationActions; + + private final ReplicaRequest replicaRequest; + + private ReplicationProxyRequest( + ShardRouting shardRouting, + ShardRouting primaryRouting, + long globalCheckpoint, + long maxSeqNoOfUpdatesOrDeletes, + PendingReplicationActions pendingReplicationActions, + ReplicaRequest replicaRequest + ) { + this.shardRouting = Objects.requireNonNull(shardRouting); + this.primaryRouting = Objects.requireNonNull(primaryRouting); + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; + this.pendingReplicationActions = Objects.requireNonNull(pendingReplicationActions); + this.replicaRequest = Objects.requireNonNull(replicaRequest); + } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public ShardRouting getPrimaryRouting() { + return primaryRouting; + } + + public long getGlobalCheckpoint() { + return globalCheckpoint; + } + + public long getMaxSeqNoOfUpdatesOrDeletes() { + return maxSeqNoOfUpdatesOrDeletes; + } + + public PendingReplicationActions getPendingReplicationActions() { + return pendingReplicationActions; + } + + public ReplicaRequest getReplicaRequest() { + return replicaRequest; + } + + /** + * Builder of ReplicationProxyRequest. + * + * @opensearch.internal + */ + public static class Builder { + + private final ShardRouting shardRouting; + private final ShardRouting primaryRouting; + private final long globalCheckpoint; + private final long maxSeqNoOfUpdatesOrDeletes; + private final PendingReplicationActions pendingReplicationActions; + private final ReplicaRequest replicaRequest; + + public Builder( + ShardRouting shardRouting, + ShardRouting primaryRouting, + long globalCheckpoint, + long maxSeqNoOfUpdatesOrDeletes, + PendingReplicationActions pendingReplicationActions, + ReplicaRequest replicaRequest + ) { + this.shardRouting = shardRouting; + this.primaryRouting = primaryRouting; + this.globalCheckpoint = globalCheckpoint; + this.maxSeqNoOfUpdatesOrDeletes = maxSeqNoOfUpdatesOrDeletes; + this.pendingReplicationActions = pendingReplicationActions; + this.replicaRequest = replicaRequest; + } + + public ReplicationProxyRequest build() { + return new ReplicationProxyRequest<>( + shardRouting, + primaryRouting, + globalCheckpoint, + maxSeqNoOfUpdatesOrDeletes, + pendingReplicationActions, + replicaRequest + ); + } + + } +} diff --git a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java index 9d3ee8e49e8c2..0a0904a1b3aaa 100644 --- a/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java +++ b/server/src/main/java/org/opensearch/action/support/replication/TransportReplicationAction.java @@ -258,6 +258,19 @@ protected ReplicationOperation.Replicas newReplicasProxy() { return new ReplicasProxy(); } + /** + * This method is used for defining the {@link ReplicationMode} override per {@link TransportReplicationAction}. + * + * @param indexShard index shard used to determining the policy. + * @return the overridden replication mode. + */ + protected ReplicationMode getReplicationMode(IndexShard indexShard) { + if (indexShard.isRemoteTranslogEnabled()) { + return ReplicationMode.NO_REPLICATION; + } + return ReplicationMode.FULL_REPLICATION; + } + protected abstract Response newResponseInstance(StreamInput in) throws IOException; /** @@ -533,7 +546,11 @@ public void handleException(TransportException exp) { actionName, primaryRequest.getPrimaryTerm(), initialRetryBackoffBound, - retryTimeout + retryTimeout, + ReplicationProxyFactory.create( + primaryShardReference.indexShard, + getReplicationMode(primaryShardReference.indexShard) + ) ).execute(); } } catch (Exception e) { diff --git a/server/src/main/java/org/opensearch/bootstrap/Security.java b/server/src/main/java/org/opensearch/bootstrap/Security.java index 749c146de4f16..0eab6f9cbcbf1 100644 --- a/server/src/main/java/org/opensearch/bootstrap/Security.java +++ b/server/src/main/java/org/opensearch/bootstrap/Security.java @@ -36,6 +36,7 @@ import org.opensearch.common.SuppressForbidden; import org.opensearch.common.io.PathUtils; import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.env.Environment; import org.opensearch.http.HttpTransportSettings; import org.opensearch.plugins.PluginInfo; @@ -316,6 +317,9 @@ static void addFilePermissions(Permissions policy, Environment environment) thro addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.libDir(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.modulesDir(), "read,readlink", false); addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.pluginsDir(), "read,readlink", false); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + addDirectoryPath(policy, Environment.PATH_HOME_SETTING.getKey(), environment.extensionDir(), "read,readlink", false); + } addDirectoryPath(policy, "path.conf'", environment.configDir(), "read,readlink", false); // read-write dirs addDirectoryPath(policy, "java.io.tmpdir", environment.tmpDir(), "read,readlink,write,delete", false); diff --git a/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java new file mode 100644 index 0000000000000..e84c2c902abdd --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/ClusterSettingsResponse.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * PluginSettings Response for Extensibility + * + * @opensearch.internal + */ +public class ClusterSettingsResponse extends TransportResponse { + private final Settings clusterSettings; + + public ClusterSettingsResponse(ClusterService clusterService) { + this.clusterSettings = clusterService.getSettings(); + } + + public ClusterSettingsResponse(StreamInput in) throws IOException { + super(in); + this.clusterSettings = Settings.readSettingsFromStream(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + Settings.writeSettingsToStream(clusterSettings, out); + } + + @Override + public String toString() { + return "ClusterSettingsResponse{" + "clusterSettings=" + clusterSettings + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ClusterSettingsResponse that = (ClusterSettingsResponse) o; + return Objects.equals(clusterSettings, that.clusterSettings); + } + + @Override + public int hashCode() { + return Objects.hash(clusterSettings); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java b/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java new file mode 100644 index 0000000000000..ef1ef4a49ad62 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/LocalNodeResponse.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * LocalNode Response for Extensibility + * + * @opensearch.internal + */ +public class LocalNodeResponse extends TransportResponse { + private final DiscoveryNode localNode; + + public LocalNodeResponse(ClusterService clusterService) { + this.localNode = clusterService.localNode(); + } + + public LocalNodeResponse(StreamInput in) throws IOException { + super(in); + this.localNode = new DiscoveryNode(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + this.localNode.writeTo(out); + } + + @Override + public String toString() { + return "LocalNodeResponse{" + "localNode=" + localNode + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + LocalNodeResponse that = (LocalNodeResponse) o; + return Objects.equals(localNode, that.localNode); + } + + @Override + public int hashCode() { + return Objects.hash(localNode); + } + +} diff --git a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java index fbb345ea3a441..fd52f48c7b5f8 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/Coordinator.java @@ -105,7 +105,7 @@ import java.util.stream.StreamSupport; import static org.opensearch.cluster.coordination.NoClusterManagerBlockService.NO_CLUSTER_MANAGER_BLOCK_ID; -import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; +import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.ClusterStateUpdaters.hideStateIfNotRecovered; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.opensearch.monitor.StatusInfo.Status.UNHEALTHY; diff --git a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java index 02f3828e0e4c5..626e47108cc63 100644 --- a/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java +++ b/server/src/main/java/org/opensearch/cluster/coordination/JoinTaskExecutor.java @@ -59,7 +59,7 @@ import java.util.function.BiConsumer; import java.util.stream.Collectors; -import static org.opensearch.cluster.decommission.DecommissionService.nodeCommissioned; +import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; import static org.opensearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; /** diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java index ffb20a05b3ef7..1ff2fb52175c7 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionController.java @@ -12,12 +12,6 @@ import org.apache.logging.log4j.Logger; import org.opensearch.OpenSearchTimeoutException; import org.opensearch.action.ActionListener; -import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsAction; -import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; -import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsResponse; -import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsAction; -import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; -import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsResponse; import org.opensearch.action.admin.cluster.node.stats.NodeStats; import org.opensearch.action.admin.cluster.node.stats.NodesStatsAction; import org.opensearch.action.admin.cluster.node.stats.NodesStatsRequest; @@ -33,7 +27,6 @@ import org.opensearch.cluster.routing.allocation.AllocationService; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Priority; -import org.opensearch.common.Strings; import org.opensearch.common.io.stream.StreamInput; import org.opensearch.common.unit.TimeValue; import org.opensearch.http.HttpStats; @@ -52,6 +45,8 @@ import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.clearExclusionsAndGetState; + /** * Helper controller class to remove list of nodes from the cluster and update status * @@ -79,83 +74,6 @@ public class DecommissionController { this.threadPool = threadPool; } - /** - * Transport call to add nodes to voting config exclusion - * - * @param nodes set of nodes Ids to be added to voting config exclusion list - * @param listener callback for response or failure - */ - public void excludeDecommissionedNodesFromVotingConfig(Set nodes, ActionListener listener) { - transportService.sendRequest( - transportService.getLocalNode(), - AddVotingConfigExclusionsAction.NAME, - new AddVotingConfigExclusionsRequest( - Strings.EMPTY_ARRAY, - nodes.toArray(String[]::new), - Strings.EMPTY_ARRAY, - TimeValue.timeValueSeconds(120) // giving a larger timeout of 120 sec as cluster might already be in stress when - // decommission is triggered - ), - new TransportResponseHandler() { - @Override - public void handleResponse(AddVotingConfigExclusionsResponse response) { - listener.onResponse(null); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public AddVotingConfigExclusionsResponse read(StreamInput in) throws IOException { - return new AddVotingConfigExclusionsResponse(in); - } - } - ); - } - - /** - * Transport call to clear voting config exclusion - * - * @param listener callback for response or failure - */ - public void clearVotingConfigExclusion(ActionListener listener, boolean waitForRemoval) { - final ClearVotingConfigExclusionsRequest clearVotingConfigExclusionsRequest = new ClearVotingConfigExclusionsRequest(); - clearVotingConfigExclusionsRequest.setWaitForRemoval(waitForRemoval); - transportService.sendRequest( - transportService.getLocalNode(), - ClearVotingConfigExclusionsAction.NAME, - clearVotingConfigExclusionsRequest, - new TransportResponseHandler() { - @Override - public void handleResponse(ClearVotingConfigExclusionsResponse response) { - listener.onResponse(null); - } - - @Override - public void handleException(TransportException exp) { - listener.onFailure(exp); - } - - @Override - public String executor() { - return ThreadPool.Names.SAME; - } - - @Override - public ClearVotingConfigExclusionsResponse read(StreamInput in) throws IOException { - return new ClearVotingConfigExclusionsResponse(in); - } - } - ); - } - /** * This method triggers batch of tasks for nodes to be decommissioned using executor {@link NodeRemovalClusterStateTaskExecutor} * Once the tasks are submitted, it waits for an expected cluster state to guarantee @@ -259,9 +177,15 @@ public ClusterState execute(ClusterState currentState) { decommissionAttributeMetadata.decommissionAttribute(), decommissionStatus ); - return ClusterState.builder(currentState) + ClusterState newState = ClusterState.builder(currentState) .metadata(Metadata.builder(currentState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) .build(); + + // For terminal status we will go ahead and clear any exclusion that was added as part of decommission action + if (decommissionStatus.equals(DecommissionStatus.SUCCESSFUL) || decommissionStatus.equals(DecommissionStatus.FAILED)) { + newState = clearExclusionsAndGetState(newState); + } + return newState; } @Override diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java new file mode 100644 index 0000000000000..8305bda545998 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionHelper.java @@ -0,0 +1,124 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.opensearch.action.admin.cluster.configuration.AddVotingConfigExclusionsRequest; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata.VotingConfigExclusion; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; + +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.addExclusionAndGetState; + +/** + * Static helper utilities to execute decommission + * + * @opensearch.internal + */ +public class DecommissionHelper { + + static ClusterState registerDecommissionAttributeInClusterState( + ClusterState currentState, + DecommissionAttribute decommissionAttribute + ) { + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata(decommissionAttribute); + return ClusterState.builder(currentState) + .metadata(Metadata.builder(currentState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) + .build(); + } + + static ClusterState deleteDecommissionAttributeInClusterState(ClusterState currentState) { + Metadata metadata = currentState.metadata(); + Metadata.Builder mdBuilder = Metadata.builder(metadata); + mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); + return ClusterState.builder(currentState).metadata(mdBuilder).build(); + } + + static ClusterState addVotingConfigExclusionsForNodesToBeDecommissioned( + ClusterState currentState, + Set nodeIdsToBeExcluded, + TimeValue decommissionActionTimeout, + final int maxVotingConfigExclusions + ) { + AddVotingConfigExclusionsRequest request = new AddVotingConfigExclusionsRequest( + Strings.EMPTY_ARRAY, + nodeIdsToBeExcluded.toArray(String[]::new), + Strings.EMPTY_ARRAY, + decommissionActionTimeout + ); + Set resolvedExclusion = resolveVotingConfigExclusionsAndCheckMaximum( + request, + currentState, + maxVotingConfigExclusions + ); + return addExclusionAndGetState(currentState, resolvedExclusion, maxVotingConfigExclusions); + } + + static Set filterNodesWithDecommissionAttribute( + ClusterState clusterState, + DecommissionAttribute decommissionAttribute, + boolean onlyClusterManagerNodes + ) { + Set nodesWithDecommissionAttribute = new HashSet<>(); + Iterator nodesIter = onlyClusterManagerNodes + ? clusterState.nodes().getClusterManagerNodes().valuesIt() + : clusterState.nodes().getNodes().valuesIt(); + + while (nodesIter.hasNext()) { + final DiscoveryNode node = nodesIter.next(); + if (nodeHasDecommissionedAttribute(node, decommissionAttribute)) { + nodesWithDecommissionAttribute.add(node); + } + } + return nodesWithDecommissionAttribute; + } + + /** + * Utility method to check if the node has decommissioned attribute + * + * @param discoveryNode node to check on + * @param decommissionAttribute attribute to be checked with + * @return true or false based on whether node has decommissioned attribute + */ + public static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { + String nodeAttributeValue = discoveryNode.getAttributes().get(decommissionAttribute.attributeName()); + return nodeAttributeValue != null && nodeAttributeValue.equals(decommissionAttribute.attributeValue()); + } + + /** + * Utility method to check if the node is commissioned or not + * + * @param discoveryNode node to check on + * @param metadata metadata present current which will be used to check the commissioning status of the node + * @return if the node is commissioned or not + */ + public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata metadata) { + DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); + if (decommissionAttributeMetadata != null) { + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + DecommissionStatus status = decommissionAttributeMetadata.status(); + if (decommissionAttribute != null && status != null) { + if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) + && (status.equals(DecommissionStatus.IN_PROGRESS) + || status.equals(DecommissionStatus.SUCCESSFUL) + || status.equals(DecommissionStatus.DRAINING))) { + return false; + } + } + } + return true; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java index 85030a1e902db..f36d7b3e06da9 100644 --- a/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java +++ b/server/src/main/java/org/opensearch/cluster/decommission/DecommissionService.java @@ -18,9 +18,10 @@ import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateObserver; +import org.opensearch.cluster.ClusterStateObserver.Listener; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.NotClusterManagerException; -import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.coordination.CoordinationMetadata; import org.opensearch.cluster.metadata.WeightedRoutingMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.routing.WeightedRouting; @@ -35,14 +36,19 @@ import org.opensearch.transport.TransportService; import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; import java.util.function.Predicate; import java.util.stream.Collectors; +import static org.opensearch.action.admin.cluster.configuration.TransportAddVotingConfigExclusionsAction.MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.clearExclusionsAndGetState; +import static org.opensearch.cluster.decommission.DecommissionHelper.addVotingConfigExclusionsForNodesToBeDecommissioned; +import static org.opensearch.cluster.decommission.DecommissionHelper.deleteDecommissionAttributeInClusterState; +import static org.opensearch.cluster.decommission.DecommissionHelper.filterNodesWithDecommissionAttribute; +import static org.opensearch.cluster.decommission.DecommissionHelper.nodeHasDecommissionedAttribute; +import static org.opensearch.cluster.decommission.DecommissionHelper.registerDecommissionAttributeInClusterState; import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING; import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING; @@ -54,8 +60,7 @@ *
    *
  • Initiates nodes decommissioning by adding custom metadata with the attribute and state as {@link DecommissionStatus#INIT}
  • *
  • Remove to-be-decommissioned cluster-manager eligible nodes from voting config and wait for its abdication if it is active leader
  • - *
  • Triggers weigh away for nodes having given awareness attribute to drain.
  • - *
  • Once weighed away, the service triggers nodes decommission. This marks the decommission status as {@link DecommissionStatus#IN_PROGRESS}
  • + *
  • After the draining timeout, the service triggers nodes decommission. This marks the decommission status as {@link DecommissionStatus#IN_PROGRESS}
  • *
  • Once the decommission is successful, the service clears the voting config and marks the status as {@link DecommissionStatus#SUCCESSFUL}
  • *
  • If service fails at any step, it makes best attempt to mark the status as {@link DecommissionStatus#FAILED} and to clear voting config exclusion
  • *
@@ -72,6 +77,7 @@ public class DecommissionService { private final DecommissionController decommissionController; private volatile List awarenessAttributes; private volatile Map> forcedAwarenessAttributes; + private volatile int maxVotingConfigExclusions; @Inject public DecommissionService( @@ -94,6 +100,8 @@ public DecommissionService( CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, this::setForcedAwarenessAttributes ); + maxVotingConfigExclusions = MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING.get(settings); + clusterSettings.addSettingsUpdateConsumer(MAXIMUM_VOTING_CONFIG_EXCLUSIONS_SETTING, this::setMaxVotingConfigExclusions); } private void setAwarenessAttributes(List awarenessAttributes) { @@ -112,6 +120,10 @@ private void setForcedAwarenessAttributes(Settings forceSettings) { this.forcedAwarenessAttributes = forcedAwarenessAttributes; } + private void setMaxVotingConfigExclusions(int maxVotingConfigExclusions) { + this.maxVotingConfigExclusions = maxVotingConfigExclusions; + } + /** * Starts the new decommission request and registers the metadata with status as {@link DecommissionStatus#INIT} * Once the status is updated, it tries to exclude to-be-decommissioned cluster manager eligible nodes from Voting Configuration @@ -126,20 +138,37 @@ public void startDecommissionAction( final DecommissionAttribute decommissionAttribute = decommissionRequest.getDecommissionAttribute(); // register the metadata with status as INIT as first step clusterService.submitStateUpdateTask("decommission [" + decommissionAttribute + "]", new ClusterStateUpdateTask(Priority.URGENT) { + private Set nodeIdsToBeExcluded; + @Override public ClusterState execute(ClusterState currentState) { // validates if correct awareness attributes and forced awareness attribute set to the cluster before starting action validateAwarenessAttribute(decommissionAttribute, awarenessAttributes, forcedAwarenessAttributes); DecommissionAttributeMetadata decommissionAttributeMetadata = currentState.metadata().decommissionAttributeMetadata(); - // check that request is eligible to proceed + // check that request is eligible to proceed and attribute is weighed away ensureEligibleRequest(decommissionAttributeMetadata, decommissionAttribute); - // ensure attribute is weighed away ensureToBeDecommissionedAttributeWeighedAway(currentState, decommissionAttribute); - decommissionAttributeMetadata = new DecommissionAttributeMetadata(decommissionAttribute); - logger.info("registering decommission metadata [{}] to execute action", decommissionAttributeMetadata.toString()); - return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.metadata()).decommissionAttributeMetadata(decommissionAttributeMetadata)) - .build(); + + ClusterState newState = registerDecommissionAttributeInClusterState(currentState, decommissionAttribute); + // add all 'to-be-decommissioned' cluster manager eligible nodes to voting config exclusion + nodeIdsToBeExcluded = filterNodesWithDecommissionAttribute(currentState, decommissionAttribute, true).stream() + .map(DiscoveryNode::getId) + .collect(Collectors.toSet()); + logger.info( + "resolved cluster manager eligible nodes [{}] that should be added to voting config exclusion", + nodeIdsToBeExcluded.toString() + ); + newState = addVotingConfigExclusionsForNodesToBeDecommissioned( + newState, + nodeIdsToBeExcluded, + TimeValue.timeValueSeconds(120), // TODO - update it with request timeout + maxVotingConfigExclusions + ); + logger.debug( + "registering decommission metadata [{}] to execute action", + newState.metadata().decommissionAttributeMetadata().toString() + ); + return newState; } @Override @@ -158,160 +187,111 @@ public void onFailure(String source, Exception e) { public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { DecommissionAttributeMetadata decommissionAttributeMetadata = newState.metadata().decommissionAttributeMetadata(); assert decommissionAttribute.equals(decommissionAttributeMetadata.decommissionAttribute()); - logger.info( + assert decommissionAttributeMetadata.status().equals(DecommissionStatus.INIT); + assert newState.getVotingConfigExclusions() + .stream() + .map(CoordinationMetadata.VotingConfigExclusion::getNodeId) + .collect(Collectors.toSet()) + .containsAll(nodeIdsToBeExcluded); + logger.debug( "registered decommission metadata for attribute [{}] with status [{}]", decommissionAttributeMetadata.decommissionAttribute(), decommissionAttributeMetadata.status() ); - decommissionClusterManagerNodes(decommissionRequest, listener); - } - }); - } - - private synchronized void decommissionClusterManagerNodes( - final DecommissionRequest decommissionRequest, - ActionListener listener - ) { - final DecommissionAttribute decommissionAttribute = decommissionRequest.getDecommissionAttribute(); - ClusterState state = clusterService.getClusterApplierService().state(); - // since here metadata is already registered with INIT, we can guarantee that no new node with decommission attribute can further - // join the cluster - // and hence in further request lifecycle we are sure that no new to-be-decommission leader will join the cluster - Set clusterManagerNodesToBeDecommissioned = filterNodesWithDecommissionAttribute(state, decommissionAttribute, true); - logger.info( - "resolved cluster manager eligible nodes [{}] that should be removed from Voting Configuration", - clusterManagerNodesToBeDecommissioned.toString() - ); - - // remove all 'to-be-decommissioned' cluster manager eligible nodes from voting config - Set nodeIdsToBeExcluded = clusterManagerNodesToBeDecommissioned.stream() - .map(DiscoveryNode::getId) - .collect(Collectors.toSet()); - - final Predicate allNodesRemovedAndAbdicated = clusterState -> { - final Set votingConfigNodeIds = clusterState.getLastCommittedConfiguration().getNodeIds(); - return nodeIdsToBeExcluded.stream().noneMatch(votingConfigNodeIds::contains) - && nodeIdsToBeExcluded.contains(clusterState.nodes().getClusterManagerNodeId()) == false - && clusterState.nodes().getClusterManagerNodeId() != null; - }; - - ActionListener exclusionListener = new ActionListener() { - @Override - public void onResponse(Void unused) { - if (clusterService.getClusterApplierService().state().nodes().isLocalNodeElectedClusterManager()) { - if (nodeHasDecommissionedAttribute(clusterService.localNode(), decommissionAttribute)) { - // this is an unexpected state, as after exclusion of nodes having decommission attribute, - // this local node shouldn't have had the decommission attribute. Will send the failure response to the user - String errorMsg = - "unexpected state encountered [local node is to-be-decommissioned leader] while executing decommission request"; - logger.error(errorMsg); - // will go ahead and clear the voting config and mark the status as false - clearVotingConfigExclusionAndUpdateStatus(false, false); - // we can send the failure response to the user here - listener.onFailure(new IllegalStateException(errorMsg)); - } else { - logger.info("will attempt to fail decommissioned nodes as local node is eligible to process the request"); - // we are good here to send the response now as the request is processed by an eligible active leader - // and to-be-decommissioned cluster manager is no more part of Voting Configuration and no more to-be-decommission - // nodes can be part of Voting Config - listener.onResponse(new DecommissionResponse(true)); - drainNodesWithDecommissionedAttribute(decommissionRequest); - } - } else { - // explicitly calling listener.onFailure with NotClusterManagerException as the local node is not the cluster manager - // this will ensures that request is retried until cluster manager times out - logger.info( - "local node is not eligible to process the request, " - + "throwing NotClusterManagerException to attempt a retry on an eligible node" - ); - listener.onFailure( - new NotClusterManagerException( - "node [" - + transportService.getLocalNode().toString() - + "] not eligible to execute decommission request. Will retry until timeout." - ) - ); - } - } - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - // attempting to mark the status as FAILED - clearVotingConfigExclusionAndUpdateStatus(false, false); - } - }; - - if (allNodesRemovedAndAbdicated.test(state)) { - exclusionListener.onResponse(null); - } else { - logger.debug("sending transport request to remove nodes [{}] from voting config", nodeIdsToBeExcluded.toString()); - // send a transport request to exclude to-be-decommissioned cluster manager eligible nodes from voting config - decommissionController.excludeDecommissionedNodesFromVotingConfig(nodeIdsToBeExcluded, new ActionListener() { - @Override - public void onResponse(Void unused) { - logger.info( - "successfully removed decommissioned cluster manager eligible nodes [{}] from voting config ", - clusterManagerNodesToBeDecommissioned.toString() - ); - final ClusterStateObserver abdicationObserver = new ClusterStateObserver( - clusterService, - TimeValue.timeValueSeconds(60L), - logger, - threadPool.getThreadContext() - ); - final ClusterStateObserver.Listener abdicationListener = new ClusterStateObserver.Listener() { - @Override - public void onNewClusterState(ClusterState state) { - logger.debug("to-be-decommissioned node is no more the active leader"); - exclusionListener.onResponse(null); - } - - @Override - public void onClusterServiceClose() { - String errorMsg = "cluster service closed while waiting for abdication of to-be-decommissioned leader"; - logger.warn(errorMsg); - listener.onFailure(new DecommissioningFailedException(decommissionAttribute, errorMsg)); - } + final ClusterStateObserver observer = new ClusterStateObserver( + clusterService, + TimeValue.timeValueSeconds(120), // TODO - update it with request timeout + logger, + threadPool.getThreadContext() + ); - @Override - public void onTimeout(TimeValue timeout) { - logger.info("timed out while waiting for abdication of to-be-decommissioned leader"); - clearVotingConfigExclusionAndUpdateStatus(false, false); + final Predicate allNodesRemovedAndAbdicated = clusterState -> { + final Set votingConfigNodeIds = clusterState.getLastCommittedConfiguration().getNodeIds(); + return nodeIdsToBeExcluded.stream().noneMatch(votingConfigNodeIds::contains) + && clusterState.nodes().getClusterManagerNodeId() != null + && nodeIdsToBeExcluded.contains(clusterState.nodes().getClusterManagerNodeId()) == false; + }; + + final Listener clusterStateListener = new Listener() { + @Override + public void onNewClusterState(ClusterState state) { + logger.info( + "successfully removed decommissioned cluster manager eligible nodes [{}] from voting config ", + nodeIdsToBeExcluded.toString() + ); + if (state.nodes().isLocalNodeElectedClusterManager()) { + if (nodeHasDecommissionedAttribute(clusterService.localNode(), decommissionAttribute)) { + // this is an unexpected state, as after exclusion of nodes having decommission attribute, + // this local node shouldn't have had the decommission attribute. Will send the failure response to the user + String errorMsg = + "unexpected state encountered [local node is to-be-decommissioned leader] while executing decommission request"; + logger.error(errorMsg); + // will go ahead and clear the voting config and mark the status as failed + decommissionController.updateMetadataWithDecommissionStatus( + DecommissionStatus.FAILED, + statusUpdateListener() + ); + listener.onFailure(new IllegalStateException(errorMsg)); + } else { + logger.info("will proceed to drain decommissioned nodes as local node is eligible to process the request"); + // we are good here to send the response now as the request is processed by an eligible active leader + // and to-be-decommissioned cluster manager is no more part of Voting Configuration + listener.onResponse(new DecommissionResponse(true)); + drainNodesWithDecommissionedAttribute(decommissionRequest); + } + } else { + // explicitly calling listener.onFailure with NotClusterManagerException as the local node is not leader + // this will ensures that request is retried until cluster manager times out + logger.info( + "local node is not eligible to process the request, " + + "throwing NotClusterManagerException to attempt a retry on an eligible node" + ); listener.onFailure( - new OpenSearchTimeoutException( - "timed out [{}] while waiting for abdication of to-be-decommissioned leader", - timeout.toString() + new NotClusterManagerException( + "node [" + + transportService.getLocalNode().toString() + + "] not eligible to execute decommission request. Will retry until timeout." ) ); } - }; - // In case the cluster state is already processed even before this code is executed - // therefore testing first before attaching the listener - ClusterState currentState = clusterService.getClusterApplierService().state(); - if (allNodesRemovedAndAbdicated.test(currentState)) { - abdicationListener.onNewClusterState(currentState); - } else { - logger.debug("waiting to abdicate to-be-decommissioned leader"); - abdicationObserver.waitForNextChange(abdicationListener, allNodesRemovedAndAbdicated); } - } - @Override - public void onFailure(Exception e) { - logger.error( - new ParameterizedMessage( - "failure in removing to-be-decommissioned cluster manager eligible nodes [{}] from voting config", - nodeIdsToBeExcluded.toString() - ), - e - ); - exclusionListener.onFailure(e); + @Override + public void onClusterServiceClose() { + String errorMsg = "cluster service closed while waiting for abdication of to-be-decommissioned leader"; + logger.error(errorMsg); + listener.onFailure(new DecommissioningFailedException(decommissionAttribute, errorMsg)); + } + + @Override + public void onTimeout(TimeValue timeout) { + String errorMsg = "timed out [" + + timeout.toString() + + "] while removing to-be-decommissioned cluster manager eligible nodes [" + + nodeIdsToBeExcluded.toString() + + "] from voting config"; + logger.error(errorMsg); + listener.onFailure(new OpenSearchTimeoutException(errorMsg)); + // will go ahead and clear the voting config and mark the status as failed + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); + } + }; + + // In case the cluster state is already processed even before this code is executed + // therefore testing first before attaching the listener + if (allNodesRemovedAndAbdicated.test(newState)) { + clusterStateListener.onNewClusterState(newState); + } else { + logger.debug("waiting to abdicate to-be-decommissioned leader"); + observer.waitForNextChange(clusterStateListener, allNodesRemovedAndAbdicated); // TODO add request timeout here } - }); - } + } + }); } + // TODO - after registering the new status check if any node which is not excluded still present in decommissioned zone. If yes, start + // the action again (retry) void drainNodesWithDecommissionedAttribute(DecommissionRequest decommissionRequest) { ClusterState state = clusterService.getClusterApplierService().state(); Set decommissionedNodes = filterNodesWithDecommissionAttribute( @@ -342,8 +322,10 @@ public void onFailure(Exception e) { ), e ); - // since we are not able to update the status, we will clear the voting config exclusion we have set earlier - clearVotingConfigExclusionAndUpdateStatus(false, false); + // This decommission state update call will most likely fail as the state update call to 'DRAINING' + // failed. But attempting it anyways as FAILED update might still pass as it doesn't have dependency on + // the current state + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); } }); } @@ -385,12 +367,17 @@ public void onResponse(DecommissionStatus status) { new ActionListener() { @Override public void onResponse(Void unused) { - clearVotingConfigExclusionAndUpdateStatus(true, true); + // will clear the voting config exclusion and mark the status as successful + decommissionController.updateMetadataWithDecommissionStatus( + DecommissionStatus.SUCCESSFUL, + statusUpdateListener() + ); } @Override public void onFailure(Exception e) { - clearVotingConfigExclusionAndUpdateStatus(false, false); + // will go ahead and clear the voting config and mark the status as failed + decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); } } ); @@ -406,51 +393,12 @@ public void onFailure(Exception e) { ), e ); - // since we are not able to update the status, we will clear the voting config exclusion we have set earlier - clearVotingConfigExclusionAndUpdateStatus(false, false); - } - }); - } - - private void clearVotingConfigExclusionAndUpdateStatus(boolean decommissionSuccessful, boolean waitForRemoval) { - decommissionController.clearVotingConfigExclusion(new ActionListener() { - @Override - public void onResponse(Void unused) { - logger.info( - "successfully cleared voting config exclusion after completing decommission action, proceeding to update metadata" - ); - DecommissionStatus updateStatusWith = decommissionSuccessful ? DecommissionStatus.SUCCESSFUL : DecommissionStatus.FAILED; - decommissionController.updateMetadataWithDecommissionStatus(updateStatusWith, statusUpdateListener()); - } - - @Override - public void onFailure(Exception e) { - logger.debug( - new ParameterizedMessage("failure in clearing voting config exclusion after processing decommission request"), - e - ); + // This decommission state update call will most likely fail as the state update call to 'DRAINING' + // failed. But attempting it anyways as FAILED update might still pass as it doesn't have dependency on + // the current state decommissionController.updateMetadataWithDecommissionStatus(DecommissionStatus.FAILED, statusUpdateListener()); } - }, waitForRemoval); - } - - private Set filterNodesWithDecommissionAttribute( - ClusterState clusterState, - DecommissionAttribute decommissionAttribute, - boolean onlyClusterManagerNodes - ) { - Set nodesWithDecommissionAttribute = new HashSet<>(); - Iterator nodesIter = onlyClusterManagerNodes - ? clusterState.nodes().getClusterManagerNodes().valuesIt() - : clusterState.nodes().getNodes().valuesIt(); - - while (nodesIter.hasNext()) { - final DiscoveryNode node = nodesIter.next(); - if (nodeHasDecommissionedAttribute(node, decommissionAttribute)) { - nodesWithDecommissionAttribute.add(node); - } - } - return nodesWithDecommissionAttribute; + }); } private static void validateAwarenessAttribute( @@ -577,80 +525,28 @@ public void startRecommissionAction(final ActionListener() { - @Override - public void onResponse(Void unused) { - logger.info("successfully cleared voting config exclusion for deleting the decommission."); - deleteDecommissionState(listener); - } - - @Override - public void onFailure(Exception e) { - logger.error("Failure in clearing voting config during delete_decommission request.", e); - listener.onFailure(e); - } - }, false); - } - - void deleteDecommissionState(ActionListener listener) { - clusterService.submitStateUpdateTask("delete_decommission_state", new ClusterStateUpdateTask(Priority.URGENT) { + clusterService.submitStateUpdateTask("delete-decommission-state", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { + ClusterState newState = clearExclusionsAndGetState(currentState); logger.info("Deleting the decommission attribute from the cluster state"); - Metadata metadata = currentState.metadata(); - Metadata.Builder mdBuilder = Metadata.builder(metadata); - mdBuilder.removeCustom(DecommissionAttributeMetadata.TYPE); - return ClusterState.builder(currentState).metadata(mdBuilder).build(); + newState = deleteDecommissionAttributeInClusterState(newState); + return newState; } @Override public void onFailure(String source, Exception e) { - logger.error(() -> new ParameterizedMessage("Failed to clear decommission attribute. [{}]", source), e); + logger.error(() -> new ParameterizedMessage("failure during recommission action [{}]", source), e); listener.onFailure(e); } @Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { - // Cluster state processed for deleting the decommission attribute. + logger.info("successfully cleared voting config exclusion and decommissioned attribute"); assert newState.metadata().decommissionAttributeMetadata() == null; + assert newState.coordinationMetadata().getVotingConfigExclusions().isEmpty(); listener.onResponse(new DeleteDecommissionStateResponse(true)); } }); } - - /** - * Utility method to check if the node has decommissioned attribute - * - * @param discoveryNode node to check on - * @param decommissionAttribute attribute to be checked with - * @return true or false based on whether node has decommissioned attribute - */ - public static boolean nodeHasDecommissionedAttribute(DiscoveryNode discoveryNode, DecommissionAttribute decommissionAttribute) { - String nodeAttributeValue = discoveryNode.getAttributes().get(decommissionAttribute.attributeName()); - return nodeAttributeValue != null && nodeAttributeValue.equals(decommissionAttribute.attributeValue()); - } - - /** - * Utility method to check if the node is commissioned or not - * - * @param discoveryNode node to check on - * @param metadata metadata present current which will be used to check the commissioning status of the node - * @return if the node is commissioned or not - */ - public static boolean nodeCommissioned(DiscoveryNode discoveryNode, Metadata metadata) { - DecommissionAttributeMetadata decommissionAttributeMetadata = metadata.decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null) { - DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); - DecommissionStatus status = decommissionAttributeMetadata.status(); - if (decommissionAttribute != null && status != null) { - if (nodeHasDecommissionedAttribute(discoveryNode, decommissionAttribute) - && (status.equals(DecommissionStatus.IN_PROGRESS) - || status.equals(DecommissionStatus.SUCCESSFUL) - || status.equals(DecommissionStatus.DRAINING))) { - return false; - } - } - } - return true; - } } diff --git a/server/src/main/java/org/opensearch/cluster/routing/UnsupportedWeightedRoutingStateException.java b/server/src/main/java/org/opensearch/cluster/routing/UnsupportedWeightedRoutingStateException.java new file mode 100644 index 0000000000000..fd4fd4163ede6 --- /dev/null +++ b/server/src/main/java/org/opensearch/cluster/routing/UnsupportedWeightedRoutingStateException.java @@ -0,0 +1,35 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.routing; + +import org.opensearch.OpenSearchException; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.rest.RestStatus; + +import java.io.IOException; + +/** + * Thrown when failing to update the routing weight due to an unsupported state. See {@link WeightedRoutingService} for more details. + * + * @opensearch.internal + */ +public class UnsupportedWeightedRoutingStateException extends OpenSearchException { + public UnsupportedWeightedRoutingStateException(StreamInput in) throws IOException { + super(in); + } + + public UnsupportedWeightedRoutingStateException(String msg, Object... args) { + super(msg, args); + } + + @Override + public RestStatus status() { + return RestStatus.CONFLICT; + } +} diff --git a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java index 6acb4a1e832cb..2b5961c7340c1 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java +++ b/server/src/main/java/org/opensearch/cluster/routing/WeightedRoutingService.java @@ -19,6 +19,7 @@ import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateUpdateTask; import org.opensearch.cluster.ack.ClusterStateUpdateResponse; +import org.opensearch.cluster.decommission.DecommissionAttribute; import org.opensearch.cluster.decommission.DecommissionAttributeMetadata; import org.opensearch.cluster.decommission.DecommissionStatus; import org.opensearch.cluster.metadata.Metadata; @@ -32,10 +33,16 @@ import org.opensearch.common.settings.Settings; import org.opensearch.threadpool.ThreadPool; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; import static org.opensearch.action.ValidateActions.addValidationError; +import static org.opensearch.cluster.routing.allocation.decider.AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING; /** * * Service responsible for updating cluster state metadata with weighted routing weights @@ -45,6 +52,8 @@ public class WeightedRoutingService { private final ClusterService clusterService; private final ThreadPool threadPool; private volatile List awarenessAttributes; + private volatile Map> forcedAwarenessAttributes; + private static final Double DECOMMISSIONED_AWARENESS_VALUE_WEIGHT = 0.0; @Inject public WeightedRoutingService( @@ -60,6 +69,11 @@ public WeightedRoutingService( AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING, this::setAwarenessAttributes ); + setForcedAwarenessAttributes(CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.get(settings)); + clusterSettings.addSettingsUpdateConsumer( + CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING, + this::setForcedAwarenessAttributes + ); } public void registerWeightedRoutingMetadata( @@ -70,8 +84,10 @@ public void registerWeightedRoutingMetadata( clusterService.submitStateUpdateTask("update_weighted_routing", new ClusterStateUpdateTask(Priority.URGENT) { @Override public ClusterState execute(ClusterState currentState) { - // verify currently no decommission action is ongoing - ensureNoOngoingDecommissionAction(currentState); + // verify that request object has weights for all discovered and forced awareness values + ensureWeightsSetForAllDiscoveredAndForcedAwarenessValues(currentState, request); + // verify weights will not be updated for a decommissioned attribute + ensureDecommissionedAttributeHasZeroWeight(currentState, request); Metadata metadata = currentState.metadata(); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); WeightedRoutingMetadata weightedRoutingMetadata = metadata.custom(WeightedRoutingMetadata.TYPE); @@ -148,6 +164,18 @@ private void setAwarenessAttributes(List awarenessAttributes) { this.awarenessAttributes = awarenessAttributes; } + private void setForcedAwarenessAttributes(Settings forceSettings) { + Map> forcedAwarenessAttributes = new HashMap<>(); + Map forceGroups = forceSettings.getAsGroups(); + for (Map.Entry entry : forceGroups.entrySet()) { + List aValues = entry.getValue().getAsList("values"); + if (aValues.size() > 0) { + forcedAwarenessAttributes.put(entry.getKey(), aValues); + } + } + this.forcedAwarenessAttributes = forcedAwarenessAttributes; + } + public void verifyAwarenessAttribute(String attributeName) { if (getAwarenessAttributes().contains(attributeName) == false) { ActionRequestValidationException validationException = null; @@ -159,13 +187,62 @@ public void verifyAwarenessAttribute(String attributeName) { } } - public void ensureNoOngoingDecommissionAction(ClusterState state) { + private void ensureWeightsSetForAllDiscoveredAndForcedAwarenessValues(ClusterState state, ClusterPutWeightedRoutingRequest request) { + String attributeName = request.getWeightedRouting().attributeName(); + Set discoveredAwarenessValues = new HashSet<>(); + state.nodes().forEach(node -> { + if (node.getAttributes().containsKey(attributeName)) { + discoveredAwarenessValues.add(node.getAttributes().get(attributeName)); + } + }); + Set allAwarenessValues; + if (forcedAwarenessAttributes.get(attributeName) == null) { + allAwarenessValues = new HashSet<>(); + } else { + allAwarenessValues = new HashSet<>(forcedAwarenessAttributes.get(attributeName)); + } + allAwarenessValues.addAll(discoveredAwarenessValues); + allAwarenessValues.forEach(awarenessValue -> { + if (request.getWeightedRouting().weights().containsKey(awarenessValue) == false) { + throw new UnsupportedWeightedRoutingStateException( + "weight for [" + awarenessValue + "] is not set and it is part of forced awareness value or a node has this attribute." + ); + } + }); + } + + private void ensureDecommissionedAttributeHasZeroWeight(ClusterState state, ClusterPutWeightedRoutingRequest request) { DecommissionAttributeMetadata decommissionAttributeMetadata = state.metadata().decommissionAttributeMetadata(); - if (decommissionAttributeMetadata != null && decommissionAttributeMetadata.status().equals(DecommissionStatus.FAILED) == false) { - throw new IllegalStateException( - "a decommission action is ongoing with status [" - + decommissionAttributeMetadata.status().status() - + "], cannot update weight during this state" + if (decommissionAttributeMetadata == null || decommissionAttributeMetadata.status().equals(DecommissionStatus.FAILED)) { + // here either there's no decommission action is ongoing or it is in failed state. In this case, we will allow weight update + return; + } + DecommissionAttribute decommissionAttribute = decommissionAttributeMetadata.decommissionAttribute(); + WeightedRouting weightedRouting = request.getWeightedRouting(); + if (weightedRouting.attributeName().equals(decommissionAttribute.attributeName()) == false) { + // this is unexpected when a different attribute is requested for decommission and weight update is on another attribute + throw new UnsupportedWeightedRoutingStateException( + "decommission action ongoing for attribute [{}], cannot update weight for [{}]", + decommissionAttribute.attributeName(), + weightedRouting.attributeName() + ); + } + if (weightedRouting.weights().containsKey(decommissionAttribute.attributeValue()) == false) { + // weight of an attribute undergoing decommission must be specified + throw new UnsupportedWeightedRoutingStateException( + "weight for [{}] is not specified. Please specify its weight to [{}] as it is under decommission action", + decommissionAttribute.attributeValue(), + DECOMMISSIONED_AWARENESS_VALUE_WEIGHT + ); + } + if (Objects.equals( + weightedRouting.weights().get(decommissionAttribute.attributeValue()), + DECOMMISSIONED_AWARENESS_VALUE_WEIGHT + ) == false) { + throw new UnsupportedWeightedRoutingStateException( + "weight for [{}] must be set to [{}] as it is under decommission action", + decommissionAttribute.attributeValue(), + DECOMMISSIONED_AWARENESS_VALUE_WEIGHT ); } } diff --git a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java index 0503db713258d..249b4ff5316d9 100644 --- a/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java +++ b/server/src/main/java/org/opensearch/cluster/service/ClusterManagerTaskThrottler.java @@ -108,10 +108,12 @@ public boolean isThrottlingEnabled() { } void validateSetting(final Settings settings) { - if (minNodeVersionSupplier.get().compareTo(Version.V_2_4_0) < 0) { - throw new IllegalArgumentException("All the nodes in cluster should be on version later than or equal to 2.4.0"); - } Map groups = settings.getAsGroups(); + if (groups.size() > 0) { + if (minNodeVersionSupplier.get().compareTo(Version.V_2_4_0) < 0) { + throw new IllegalArgumentException("All the nodes in cluster should be on version later than or equal to 2.4.0"); + } + } for (String key : groups.keySet()) { if (!THROTTLING_TASK_KEYS.containsKey(key)) { throw new IllegalArgumentException("Cluster manager task throttling is not configured for given task type: " + key); diff --git a/server/src/main/java/org/opensearch/common/bytes/BytesReference.java b/server/src/main/java/org/opensearch/common/bytes/BytesReference.java index 85dcf949d479e..97100f905315b 100644 --- a/server/src/main/java/org/opensearch/common/bytes/BytesReference.java +++ b/server/src/main/java/org/opensearch/common/bytes/BytesReference.java @@ -122,8 +122,13 @@ static BytesReference fromByteBuffers(ByteBuffer[] buffers) { * Returns BytesReference composed of the provided ByteBuffer. */ static BytesReference fromByteBuffer(ByteBuffer buffer) { - assert buffer.hasArray(); - return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + if (buffer.hasArray()) { + return new BytesArray(buffer.array(), buffer.arrayOffset() + buffer.position(), buffer.remaining()); + } else { + final byte[] array = new byte[buffer.remaining()]; + buffer.asReadOnlyBuffer().get(array, 0, buffer.remaining()); + return new BytesArray(array); + } } /** diff --git a/server/src/main/java/org/opensearch/common/lucene/Lucene.java b/server/src/main/java/org/opensearch/common/lucene/Lucene.java index 66a18ee0bddfb..1f9fe917158b9 100644 --- a/server/src/main/java/org/opensearch/common/lucene/Lucene.java +++ b/server/src/main/java/org/opensearch/common/lucene/Lucene.java @@ -109,7 +109,7 @@ * @opensearch.internal */ public class Lucene { - public static final String LATEST_CODEC = "Lucene94"; + public static final String LATEST_CODEC = "Lucene95"; public static final String SOFT_DELETES_FIELD = "__soft_deletes"; diff --git a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java index a75d4f035b790..8a19d309975df 100644 --- a/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java +++ b/server/src/main/java/org/opensearch/common/settings/AbstractScopedSettings.java @@ -121,8 +121,8 @@ protected AbstractScopedSettings( keySettings.putIfAbsent(setting.getKey(), setting); } } - this.complexMatchers = Collections.unmodifiableMap(complexMatchers); - this.keySettings = Collections.unmodifiableMap(keySettings); + this.complexMatchers = complexMatchers; + this.keySettings = keySettings; } protected void validateSettingKey(Setting setting) { @@ -144,6 +144,23 @@ protected AbstractScopedSettings(Settings nodeSettings, Settings scopeSettings, settingUpdaters.addAll(other.settingUpdaters); } + public boolean registerSetting(Setting setting) { + validateSettingKey(setting); + if (setting.hasComplexMatcher()) { + return setting != complexMatchers.putIfAbsent(setting.getKey(), setting); + } else { + return setting != keySettings.putIfAbsent(setting.getKey(), setting); + } + } + + public boolean unregisterSetting(Setting setting) { + if (setting.hasComplexMatcher()) { + return setting != complexMatchers.remove(setting.getKey()); + } else { + return setting != keySettings.remove(setting.getKey()); + } + } + /** * Returns true iff the given key is a valid settings key otherwise false */ diff --git a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java index 7b4dfb7d64bb6..df16c5a499ebe 100644 --- a/server/src/main/java/org/opensearch/common/settings/SettingsModule.java +++ b/server/src/main/java/org/opensearch/common/settings/SettingsModule.java @@ -179,6 +179,45 @@ public void configure(Binder binder) { binder.bind(IndexScopedSettings.class).toInstance(indexScopedSettings); } + /** + * Dynamically registers a new Setting at Runtime. This method is mostly used by plugins/extensions + * to register new settings at runtime. Settings can be of Node Scope or Index Scope. + * @param setting which is being registered in the cluster. + * @return boolean value is set to true when successfully registered, else returns false + */ + public boolean registerDynamicSetting(Setting setting) { + boolean onNodeSetting = false; + boolean onIndexSetting = false; + try { + if (setting.hasNodeScope()) { + onNodeSetting = clusterSettings.registerSetting(setting); + } + if (setting.hasIndexScope()) { + onIndexSetting = indexScopedSettings.registerSetting(setting); + } + try { + registerSetting(setting); + if (onNodeSetting || onIndexSetting) { + logger.info("Registered new Setting: " + setting.getKey() + " successfully "); + return true; + } + } catch (IllegalArgumentException ex) { + if (onNodeSetting) { + clusterSettings.unregisterSetting(setting); + } + + if (onIndexSetting) { + indexScopedSettings.unregisterSetting(setting); + } + throw ex; + } + } catch (Exception e) { + logger.error("Could not register setting " + setting.getKey()); + throw new SettingsException("Could not register setting:" + setting.getKey()); + } + return false; + } + /** * Registers a new setting. This method should be used by plugins in order to expose any custom settings the plugin defines. * Unless a setting is registered the setting is unusable. If a setting is never the less specified the node will reject diff --git a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java index c3b6ea6b8c23d..28947b3936843 100644 --- a/server/src/main/java/org/opensearch/common/unit/Fuzziness.java +++ b/server/src/main/java/org/opensearch/common/unit/Fuzziness.java @@ -139,6 +139,16 @@ public static Fuzziness build(Object fuzziness) { return new Fuzziness(string); } + /*** + * Creates a {@link Fuzziness} instance from lowDistance and highDistance. + * where the edit distance is 0 for strings shorter than lowDistance, + * 1 for strings where its length between lowDistance and highDistance (inclusive), + * and 2 for strings longer than highDistance. + */ + public static Fuzziness customAuto(int lowDistance, int highDistance) { + return new Fuzziness("AUTO", lowDistance, highDistance); + } + private static Fuzziness parseCustomAuto(final String string) { assert string.toUpperCase(Locale.ROOT).startsWith(AUTO.asString() + ":"); String[] fuzzinessLimit = string.substring(AUTO.asString().length() + 1).split(","); diff --git a/server/src/main/java/org/opensearch/discovery/PluginRequest.java b/server/src/main/java/org/opensearch/discovery/PluginRequest.java new file mode 100644 index 0000000000000..7992de4342d86 --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/PluginRequest.java @@ -0,0 +1,76 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.discovery; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.extensions.DiscoveryExtensionNode; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; + +/** + * PluginRequest to intialize plugin + * + * @opensearch.internal + */ +public class PluginRequest extends TransportRequest { + private final DiscoveryNode sourceNode; + /* + * TODO change DiscoveryNode to Extension information + */ + private final List extensions; + + public PluginRequest(DiscoveryNode sourceNode, List extensions) { + this.sourceNode = sourceNode; + this.extensions = extensions; + } + + public PluginRequest(StreamInput in) throws IOException { + super(in); + sourceNode = new DiscoveryNode(in); + extensions = in.readList(DiscoveryExtensionNode::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + sourceNode.writeTo(out); + out.writeList(extensions); + } + + public List getExtensions() { + return extensions; + } + + public DiscoveryNode getSourceNode() { + return sourceNode; + } + + @Override + public String toString() { + return "PluginRequest{" + "sourceNode=" + sourceNode + ", extensions=" + extensions + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PluginRequest that = (PluginRequest) o; + return Objects.equals(sourceNode, that.sourceNode) && Objects.equals(extensions, that.extensions); + } + + @Override + public int hashCode() { + return Objects.hash(sourceNode, extensions); + } +} diff --git a/server/src/main/java/org/opensearch/discovery/PluginResponse.java b/server/src/main/java/org/opensearch/discovery/PluginResponse.java new file mode 100644 index 0000000000000..f8f20214e5846 --- /dev/null +++ b/server/src/main/java/org/opensearch/discovery/PluginResponse.java @@ -0,0 +1,88 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.discovery; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * PluginResponse to intialize plugin + * + * @opensearch.internal + */ +public class PluginResponse extends TransportResponse { + private String name; + + public PluginResponse(String name) { + this.name = name; + } + + public PluginResponse(StreamInput in) throws IOException { + name = in.readString(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(name); + } + + /** + * @return the node that is currently leading, according to the responding node. + */ + + public String getName() { + return this.name; + } + + @Override + public String toString() { + return "PluginResponse{" + "name" + name + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + PluginResponse that = (PluginResponse) o; + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/server/src/main/java/org/opensearch/env/Environment.java b/server/src/main/java/org/opensearch/env/Environment.java index c9e75bcbb616f..938bca58c7081 100644 --- a/server/src/main/java/org/opensearch/env/Environment.java +++ b/server/src/main/java/org/opensearch/env/Environment.java @@ -93,6 +93,8 @@ public class Environment { private final Path pluginsDir; + private final Path extensionsDir; + private final Path modulesDir; private final Path sharedDataDir; @@ -137,6 +139,7 @@ public Environment(final Settings settings, final Path configPath, final boolean tmpDir = Objects.requireNonNull(tmpPath); pluginsDir = homeFile.resolve("plugins"); + extensionsDir = homeFile.resolve("extensions"); List dataPaths = PATH_DATA_SETTING.get(settings); if (nodeLocalStorage) { @@ -308,6 +311,10 @@ public Path pluginsDir() { return pluginsDir; } + public Path extensionDir() { + return extensionsDir; + } + public Path binDir() { return binDir; } diff --git a/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java new file mode 100644 index 0000000000000..e4fa0d74f78f0 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/DiscoveryExtensionNode.java @@ -0,0 +1,70 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.xcontent.ToXContentFragment; +import org.opensearch.common.xcontent.XContentBuilder; +import org.opensearch.plugins.PluginInfo; + +import java.io.IOException; +import java.util.Map; + +/** + * Discover extensions running independently or in a separate process + * + * @opensearch.internal + */ +public class DiscoveryExtensionNode extends DiscoveryNode implements Writeable, ToXContentFragment { + + private final PluginInfo pluginInfo; + + public DiscoveryExtensionNode( + String name, + String id, + String ephemeralId, + String hostName, + String hostAddress, + TransportAddress address, + Map attributes, + Version version, + PluginInfo pluginInfo + ) { + super(name, id, ephemeralId, hostName, hostAddress, address, attributes, DiscoveryNodeRole.BUILT_IN_ROLES, version); + this.pluginInfo = pluginInfo; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + pluginInfo.writeTo(out); + } + + /** + * Construct DiscoveryExtensionNode from a stream. + * + * @param in the stream + * @throws IOException if an I/O exception occurred reading the plugin info from the stream + */ + public DiscoveryExtensionNode(final StreamInput in) throws IOException { + super(in); + this.pluginInfo = new PluginInfo(in); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + return null; + } +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionRequest.java b/server/src/main/java/org/opensearch/extensions/ExtensionRequest.java new file mode 100644 index 0000000000000..924fce49a5dc2 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionRequest.java @@ -0,0 +1,66 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Objects; + +/** + * CLusterService Request for Extensibility + * + * @opensearch.internal + */ +public class ExtensionRequest extends TransportRequest { + private static final Logger logger = LogManager.getLogger(ExtensionRequest.class); + private ExtensionsManager.RequestType requestType; + + public ExtensionRequest(ExtensionsManager.RequestType requestType) { + this.requestType = requestType; + } + + public ExtensionRequest(StreamInput in) throws IOException { + super(in); + this.requestType = in.readEnum(ExtensionsManager.RequestType.class); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeEnum(requestType); + } + + public ExtensionsManager.RequestType getRequestType() { + return this.requestType; + } + + public String toString() { + return "ExtensionRequest{" + "requestType=" + requestType + '}'; + } + + @Override + public boolean equals(Object o) { + + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ExtensionRequest that = (ExtensionRequest) o; + return Objects.equals(requestType, that.requestType); + } + + @Override + public int hashCode() { + return Objects.hash(requestType); + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java new file mode 100644 index 0000000000000..b809f2e35a483 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsManager.java @@ -0,0 +1,440 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import java.io.IOException; +import java.io.InputStream; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.ClusterSettingsResponse; +import org.opensearch.cluster.LocalNodeResponse; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.FileSystemUtils; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.TransportAddress; + +import org.opensearch.discovery.PluginRequest; +import org.opensearch.discovery.PluginResponse; +import org.opensearch.extensions.ExtensionsSettings.Extension; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexService; +import org.opensearch.index.AcknowledgedResponse; +import org.opensearch.index.IndicesModuleRequest; +import org.opensearch.index.IndicesModuleResponse; +import org.opensearch.index.shard.IndexEventListener; +import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.dataformat.yaml.YAMLFactory; + +/** + * The main class for Plugin Extensibility + * + * @opensearch.internal + */ +public class ExtensionsManager { + public static final String REQUEST_EXTENSION_ACTION_NAME = "internal:discovery/extensions"; + public static final String INDICES_EXTENSION_POINT_ACTION_NAME = "indices:internal/extensions"; + public static final String INDICES_EXTENSION_NAME_ACTION_NAME = "indices:internal/name"; + public static final String REQUEST_EXTENSION_CLUSTER_STATE = "internal:discovery/clusterstate"; + public static final String REQUEST_EXTENSION_LOCAL_NODE = "internal:discovery/localnode"; + public static final String REQUEST_EXTENSION_CLUSTER_SETTINGS = "internal:discovery/clustersettings"; + + private static final Logger logger = LogManager.getLogger(ExtensionsManager.class); + + /** + * Enum for Extension Requests + * + * @opensearch.internal + */ + public static enum RequestType { + REQUEST_EXTENSION_CLUSTER_STATE, + REQUEST_EXTENSION_LOCAL_NODE, + REQUEST_EXTENSION_CLUSTER_SETTINGS, + CREATE_COMPONENT, + ON_INDEX_MODULE, + GET_SETTINGS + }; + + private final Path extensionsPath; + private final List uninitializedExtensions; + private List extensions; + private TransportService transportService; + private ClusterService clusterService; + + public ExtensionsManager() { + this.extensionsPath = Path.of(""); + this.uninitializedExtensions = new ArrayList(); + } + + public ExtensionsManager(Settings settings, Path extensionsPath) throws IOException { + logger.info("ExtensionsManager initialized"); + this.extensionsPath = extensionsPath; + this.transportService = null; + this.uninitializedExtensions = new ArrayList(); + this.extensions = new ArrayList(); + this.clusterService = null; + + /* + * Now Discover extensions + */ + discover(); + + } + + public void setTransportService(TransportService transportService) { + this.transportService = transportService; + registerRequestHandler(); + } + + public void setClusterService(ClusterService clusterService) { + this.clusterService = clusterService; + } + + private void registerRequestHandler() { + transportService.registerRequestHandler( + REQUEST_EXTENSION_CLUSTER_STATE, + ThreadPool.Names.GENERIC, + false, + false, + ExtensionRequest::new, + ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) + ); + transportService.registerRequestHandler( + REQUEST_EXTENSION_LOCAL_NODE, + ThreadPool.Names.GENERIC, + false, + false, + ExtensionRequest::new, + ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) + ); + transportService.registerRequestHandler( + REQUEST_EXTENSION_CLUSTER_SETTINGS, + ThreadPool.Names.GENERIC, + false, + false, + ExtensionRequest::new, + ((request, channel, task) -> channel.sendResponse(handleExtensionRequest(request))) + ); + } + + /* + * Load and populate all extensions + */ + private void discover() throws IOException { + logger.info("Extensions Config Directory :" + extensionsPath.toString()); + if (!FileSystemUtils.isAccessibleDirectory(extensionsPath, logger)) { + return; + } + + List extensions = new ArrayList(); + if (Files.exists(extensionsPath.resolve("extensions.yml"))) { + try { + extensions = readFromExtensionsYml(extensionsPath.resolve("extensions.yml")).getExtensions(); + } catch (IOException e) { + throw new IOException("Could not read from extensions.yml", e); + } + for (Extension extension : extensions) { + loadExtension(extension); + } + if (!uninitializedExtensions.isEmpty()) { + logger.info("Loaded all extensions"); + } + } else { + logger.info("Extensions.yml file is not present. No extensions will be loaded."); + } + } + + /** + * Loads a single extension + * @param extension The extension to be loaded + */ + private void loadExtension(Extension extension) throws IOException { + try { + uninitializedExtensions.add( + new DiscoveryExtensionNode( + extension.getName(), + extension.getUniqueId(), + // placeholder for ephemeral id, will change with POC discovery + extension.getUniqueId(), + extension.getHostName(), + extension.getHostAddress(), + new TransportAddress(InetAddress.getByName(extension.getHostAddress()), Integer.parseInt(extension.getPort())), + new HashMap(), + Version.fromString(extension.getOpensearchVersion()), + new PluginInfo( + extension.getName(), + extension.getDescription(), + extension.getVersion(), + Version.fromString(extension.getOpensearchVersion()), + extension.getJavaVersion(), + extension.getClassName(), + new ArrayList(), + Boolean.parseBoolean(extension.hasNativeController()) + ) + ) + ); + logger.info("Loaded extension: " + extension); + } catch (IllegalArgumentException e) { + throw e; + } + } + + public void initialize() { + for (DiscoveryNode extensionNode : uninitializedExtensions) { + initializeExtension(extensionNode); + } + } + + private void initializeExtension(DiscoveryNode extensionNode) { + + final TransportResponseHandler pluginResponseHandler = new TransportResponseHandler() { + + @Override + public PluginResponse read(StreamInput in) throws IOException { + return new PluginResponse(in); + } + + @Override + public void handleResponse(PluginResponse response) { + for (DiscoveryExtensionNode extension : uninitializedExtensions) { + if (extension.getName().equals(response.getName())) { + extensions.add(extension); + break; + } + } + } + + @Override + public void handleException(TransportException exp) { + logger.error(new ParameterizedMessage("Plugin request failed"), exp); + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + }; + try { + transportService.connectToExtensionNode(extensionNode); + transportService.sendRequest( + extensionNode, + REQUEST_EXTENSION_ACTION_NAME, + new PluginRequest(transportService.getLocalNode(), new ArrayList(uninitializedExtensions)), + pluginResponseHandler + ); + } catch (Exception e) { + throw e; + } + } + + TransportResponse handleExtensionRequest(ExtensionRequest extensionRequest) throws Exception { + // Read enum + if (extensionRequest.getRequestType() == RequestType.REQUEST_EXTENSION_CLUSTER_STATE) { + ClusterStateResponse clusterStateResponse = new ClusterStateResponse( + clusterService.getClusterName(), + clusterService.state(), + false + ); + return clusterStateResponse; + } else if (extensionRequest.getRequestType() == RequestType.REQUEST_EXTENSION_LOCAL_NODE) { + LocalNodeResponse localNodeResponse = new LocalNodeResponse(clusterService); + return localNodeResponse; + } else if (extensionRequest.getRequestType() == RequestType.REQUEST_EXTENSION_CLUSTER_SETTINGS) { + ClusterSettingsResponse clusterSettingsResponse = new ClusterSettingsResponse(clusterService); + return clusterSettingsResponse; + } + throw new IllegalStateException("Handler not present for the provided request: " + extensionRequest.getRequestType()); + } + + public void onIndexModule(IndexModule indexModule) throws UnknownHostException { + for (DiscoveryNode extensionNode : uninitializedExtensions) { + onIndexModule(indexModule, extensionNode); + } + } + + private void onIndexModule(IndexModule indexModule, DiscoveryNode extensionNode) throws UnknownHostException { + logger.info("onIndexModule index:" + indexModule.getIndex()); + final CompletableFuture inProgressFuture = new CompletableFuture<>(); + final CompletableFuture inProgressIndexNameFuture = new CompletableFuture<>(); + final TransportResponseHandler acknowledgedResponseHandler = new TransportResponseHandler< + AcknowledgedResponse>() { + @Override + public void handleResponse(AcknowledgedResponse response) { + logger.info("ACK Response" + response); + inProgressIndexNameFuture.complete(response); + } + + @Override + public void handleException(TransportException exp) { + + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + public AcknowledgedResponse read(StreamInput in) throws IOException { + return new AcknowledgedResponse(in); + } + + }; + + final TransportResponseHandler indicesModuleResponseHandler = new TransportResponseHandler< + IndicesModuleResponse>() { + + @Override + public IndicesModuleResponse read(StreamInput in) throws IOException { + return new IndicesModuleResponse(in); + } + + @Override + public void handleResponse(IndicesModuleResponse response) { + logger.info("received {}", response); + if (response.getIndexEventListener() == true) { + indexModule.addIndexEventListener(new IndexEventListener() { + @Override + public void beforeIndexRemoved( + IndexService indexService, + IndicesClusterStateService.AllocatedIndices.IndexRemovalReason reason + ) { + logger.info("Index Event Listener is called"); + String indexName = indexService.index().getName(); + logger.info("Index Name" + indexName.toString()); + try { + logger.info("Sending request of index name to extension"); + transportService.sendRequest( + extensionNode, + INDICES_EXTENSION_NAME_ACTION_NAME, + new IndicesModuleRequest(indexModule), + acknowledgedResponseHandler + ); + /* + * Making async synchronous for now. + */ + inProgressIndexNameFuture.get(100, TimeUnit.SECONDS); + logger.info("Received ack response from Extension"); + } catch (Exception e) { + logger.error(e.toString()); + } + } + }); + } + inProgressFuture.complete(response); + } + + @Override + public void handleException(TransportException exp) { + logger.error(new ParameterizedMessage("IndicesModuleRequest failed"), exp); + inProgressFuture.completeExceptionally(exp); + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + }; + + try { + logger.info("Sending request to extension"); + transportService.sendRequest( + extensionNode, + INDICES_EXTENSION_POINT_ACTION_NAME, + new IndicesModuleRequest(indexModule), + indicesModuleResponseHandler + ); + /* + * Making async synchronous for now. + */ + inProgressFuture.get(100, TimeUnit.SECONDS); + logger.info("Received response from Extension"); + } catch (Exception e) { + logger.error(e.toString()); + } + } + + private ExtensionsSettings readFromExtensionsYml(Path filePath) throws IOException { + ObjectMapper objectMapper = new ObjectMapper(new YAMLFactory()); + InputStream input = Files.newInputStream(filePath); + ExtensionsSettings extensionSettings = objectMapper.readValue(input, ExtensionsSettings.class); + return extensionSettings; + } + + public static String getRequestExtensionActionName() { + return REQUEST_EXTENSION_ACTION_NAME; + } + + public static String getIndicesExtensionPointActionName() { + return INDICES_EXTENSION_POINT_ACTION_NAME; + } + + public static String getIndicesExtensionNameActionName() { + return INDICES_EXTENSION_NAME_ACTION_NAME; + } + + public static String getRequestExtensionClusterState() { + return REQUEST_EXTENSION_CLUSTER_STATE; + } + + public static String getRequestExtensionLocalNode() { + return REQUEST_EXTENSION_LOCAL_NODE; + } + + public static String getRequestExtensionClusterSettings() { + return REQUEST_EXTENSION_CLUSTER_SETTINGS; + } + + public static Logger getLogger() { + return logger; + } + + public Path getExtensionsPath() { + return extensionsPath; + } + + public List getUninitializedExtensions() { + return uninitializedExtensions; + } + + public List getExtensions() { + return extensions; + } + + public TransportService getTransportService() { + return transportService; + } + + public ClusterService getClusterService() { + return clusterService; + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java b/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java new file mode 100644 index 0000000000000..8b6226e578ea3 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/ExtensionsSettings.java @@ -0,0 +1,202 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import java.util.ArrayList; +import java.util.List; + +/** + * List of extension configurations from extension.yml + * + * @opensearch.internal + */ +public class ExtensionsSettings { + + private List extensions; + + public ExtensionsSettings() { + extensions = new ArrayList(); + } + + /** + * Extension configuration used for extension discovery + * + * @opensearch.internal + */ + public static class Extension { + + private String name; + private String uniqueId; + private String hostName; + private String hostAddress; + private String port; + private String version; + private String description; + private String opensearchVersion; + private String jvmVersion; + private String className; + private String customFolderName; + private String hasNativeController; + + public Extension() { + name = ""; + uniqueId = ""; + hostName = ""; + hostAddress = ""; + port = ""; + version = ""; + description = ""; + opensearchVersion = ""; + jvmVersion = ""; + className = ""; + customFolderName = ""; + hasNativeController = "false"; + } + + public String getName() { + return name; + } + + public void setName(String name) { + this.name = name; + } + + public String getUniqueId() { + return uniqueId; + } + + public void setUniqueId(String uniqueId) { + this.uniqueId = uniqueId; + } + + public String getHostName() { + return hostName; + } + + public void setHostName(String hostName) { + this.hostName = hostName; + } + + public String getHostAddress() { + return hostAddress; + } + + public void setHostAddress(String hostAddress) { + this.hostAddress = hostAddress; + } + + public String getPort() { + return port; + } + + public void setPort(String port) { + this.port = port; + } + + public String getVersion() { + return version; + } + + public void setVersion(String version) { + this.version = version; + } + + @Override + public String toString() { + return "Extension [className=" + + className + + ", customFolderName=" + + customFolderName + + ", description=" + + description + + ", hasNativeController=" + + hasNativeController + + ", hostAddress=" + + hostAddress + + ", hostName=" + + hostName + + ", jvmVersion=" + + jvmVersion + + ", name=" + + name + + ", opensearchVersion=" + + opensearchVersion + + ", port=" + + port + + ", uniqueId=" + + uniqueId + + ", version=" + + version + + "]"; + } + + public String getDescription() { + return description; + } + + public void setDescription(String description) { + this.description = description; + } + + public String getOpensearchVersion() { + return opensearchVersion; + } + + public void setOpensearchVersion(String opensearchVersion) { + this.opensearchVersion = opensearchVersion; + } + + public String getJavaVersion() { + return jvmVersion; + } + + public void setJavaVersion(String jvmVersion) { + this.jvmVersion = jvmVersion; + } + + public String getClassName() { + return className; + } + + public void setClassName(String className) { + this.className = className; + } + + public String getCustomFolderName() { + return customFolderName; + } + + public void setCustomFolderName(String customFolderName) { + this.customFolderName = customFolderName; + } + + public String hasNativeController() { + return hasNativeController; + } + + public void setHasNativeController(String hasNativeController) { + this.hasNativeController = hasNativeController; + } + + } + + public List getExtensions() { + return extensions; + } + + public void setExtensions(List extensions) { + this.extensions = extensions; + } + + @Override + public String toString() { + return "ExtensionsSettings [extensions=" + extensions + "]"; + } + +} diff --git a/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java new file mode 100644 index 0000000000000..24f71476dcb1e --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/NoopExtensionsManager.java @@ -0,0 +1,21 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +/** + * Noop class for ExtensionsManager + * + * @opensearch.internal + */ +public class NoopExtensionsManager extends ExtensionsManager { + + public NoopExtensionsManager() { + super(); + } +} diff --git a/server/src/main/java/org/opensearch/extensions/package-info.java b/server/src/main/java/org/opensearch/extensions/package-info.java new file mode 100644 index 0000000000000..c6efd42499240 --- /dev/null +++ b/server/src/main/java/org/opensearch/extensions/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Main OpenSearch extensions package. OpenSearch extensions provide extensibility to OpenSearch.*/ +package org.opensearch.extensions; diff --git a/server/src/main/java/org/opensearch/index/AcknowledgedResponse.java b/server/src/main/java/org/opensearch/index/AcknowledgedResponse.java new file mode 100644 index 0000000000000..5993a81158d30 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/AcknowledgedResponse.java @@ -0,0 +1,42 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; + +/** + * Response for index name of onIndexModule extension point + * + * @opensearch.internal + */ +public class AcknowledgedResponse extends TransportResponse { + private boolean requestAck; + + public AcknowledgedResponse(StreamInput in) throws IOException { + this.requestAck = in.readBoolean(); + } + + public AcknowledgedResponse(Boolean requestAck) { + this.requestAck = requestAck; + } + + public void AcknowledgedResponse(StreamInput in) throws IOException { + this.requestAck = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(requestAck); + } + +} diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java b/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java new file mode 100644 index 0000000000000..0e0fe87df76cd --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IndicesModuleRequest.java @@ -0,0 +1,68 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.settings.Settings; +import org.opensearch.transport.TransportRequest; + +import java.io.IOException; +import java.util.Objects; + +/** + * Request for onIndexModule extension point + * + * @opensearch.internal + */ +public class IndicesModuleRequest extends TransportRequest { + private final Index index; + private final Settings indexSettings; + + public IndicesModuleRequest(IndexModule indexModule) { + this.index = indexModule.getIndex(); + this.indexSettings = indexModule.getSettings(); + } + + public IndicesModuleRequest(StreamInput in) throws IOException { + super(in); + this.index = new Index(in); + this.indexSettings = Settings.readSettingsFromStream(in); + } + + public IndicesModuleRequest(Index index, Settings settings) { + this.index = index; + this.indexSettings = settings; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + index.writeTo(out); + Settings.writeSettingsToStream(indexSettings, out); + } + + @Override + public String toString() { + return "IndicesModuleRequest{" + "index=" + index + ", indexSettings=" + indexSettings + '}'; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndicesModuleRequest that = (IndicesModuleRequest) o; + return Objects.equals(index, that.index) && Objects.equals(indexSettings, that.indexSettings); + } + + @Override + public int hashCode() { + return Objects.hash(index, indexSettings); + } +} diff --git a/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java new file mode 100644 index 0000000000000..7b41f629e48ed --- /dev/null +++ b/server/src/main/java/org/opensearch/index/IndicesModuleResponse.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Objects; + +/** + * Response for onIndexModule extension point + * + * @opensearch.internal + */ +public class IndicesModuleResponse extends TransportResponse { + private boolean supportsIndexEventListener; + private boolean addIndexOperationListener; + private boolean addSearchOperationListener; + + public IndicesModuleResponse( + boolean supportsIndexEventListener, + boolean addIndexOperationListener, + boolean addSearchOperationListener + ) { + this.supportsIndexEventListener = supportsIndexEventListener; + this.addIndexOperationListener = addIndexOperationListener; + this.addSearchOperationListener = addSearchOperationListener; + } + + public IndicesModuleResponse(StreamInput in) throws IOException { + this.supportsIndexEventListener = in.readBoolean(); + this.addIndexOperationListener = in.readBoolean(); + this.addSearchOperationListener = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeBoolean(supportsIndexEventListener); + out.writeBoolean(addIndexOperationListener); + out.writeBoolean(addSearchOperationListener); + } + + public boolean getIndexEventListener() { + return this.supportsIndexEventListener; + } + + public boolean getIndexOperationListener() { + return this.addIndexOperationListener; + } + + public boolean getSearchOperationListener() { + return this.addSearchOperationListener; + } + + @Override + public String toString() { + return "IndicesModuleResponse{" + + "supportsIndexEventListener" + + supportsIndexEventListener + + " addIndexOperationListener" + + addIndexOperationListener + + " addSearchOperationListener" + + addSearchOperationListener + + "}"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + IndicesModuleResponse that = (IndicesModuleResponse) o; + return Objects.equals(supportsIndexEventListener, that.supportsIndexEventListener) + && Objects.equals(addIndexOperationListener, that.addIndexOperationListener) + && Objects.equals(addSearchOperationListener, that.addSearchOperationListener); + } + + @Override + public int hashCode() { + return Objects.hash(supportsIndexEventListener, addIndexOperationListener, addSearchOperationListener); + } +} diff --git a/server/src/main/java/org/opensearch/index/codec/CodecService.java b/server/src/main/java/org/opensearch/index/codec/CodecService.java index b1e73b3855759..e4899c02d37e8 100644 --- a/server/src/main/java/org/opensearch/index/codec/CodecService.java +++ b/server/src/main/java/org/opensearch/index/codec/CodecService.java @@ -34,8 +34,8 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene94.Lucene94Codec; -import org.apache.lucene.codecs.lucene94.Lucene94Codec.Mode; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec.Mode; import org.opensearch.common.Nullable; import org.opensearch.common.collect.MapBuilder; import org.opensearch.index.mapper.MapperService; @@ -62,8 +62,8 @@ public class CodecService { public CodecService(@Nullable MapperService mapperService, Logger logger) { final MapBuilder codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene94Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene94Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene95Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene95Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index c101321e47350..f1b515534bdeb 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -36,7 +36,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene94Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene95Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); diff --git a/server/src/main/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicy.java b/server/src/main/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicy.java index 0c064a8f188ad..493ccbb69a244 100644 --- a/server/src/main/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicy.java +++ b/server/src/main/java/org/opensearch/index/engine/RecoverySourcePruneMergePolicy.java @@ -238,8 +238,8 @@ public void close() throws IOException { } @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { - in.visitDocument(docID, visitor); + public void document(int docID, StoredFieldVisitor visitor) throws IOException { + in.document(docID, visitor); } @Override @@ -268,11 +268,11 @@ private static class RecoverySourcePruningStoredFieldsReader extends FilterStore } @Override - public void visitDocument(int docID, StoredFieldVisitor visitor) throws IOException { + public void document(int docID, StoredFieldVisitor visitor) throws IOException { if (recoverySourceToKeep != null && recoverySourceToKeep.get(docID)) { - super.visitDocument(docID, visitor); + super.document(docID, visitor); } else { - super.visitDocument(docID, new FilterStoredFieldVisitor(visitor) { + super.document(docID, new FilterStoredFieldVisitor(visitor) { @Override public Status needsField(FieldInfo fieldInfo) throws IOException { if (recoverySourceField.equals(fieldInfo.name)) { @@ -293,7 +293,6 @@ public StoredFieldsReader getMergeInstance() { public StoredFieldsReader clone() { return new RecoverySourcePruningStoredFieldsReader(in.clone(), recoverySourceToKeep, recoverySourceField); } - } /** diff --git a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java index 3a198743c3d8a..258674a096e52 100644 --- a/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java +++ b/server/src/main/java/org/opensearch/index/engine/TranslogLeafReader.java @@ -45,6 +45,8 @@ import org.apache.lucene.index.SortedNumericDocValues; import org.apache.lucene.index.SortedSetDocValues; import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.StoredFields; +import org.apache.lucene.index.TermVectors; import org.apache.lucene.index.Terms; import org.apache.lucene.index.VectorEncoding; import org.apache.lucene.index.VectorSimilarityFunction; @@ -201,6 +203,11 @@ public Fields getTermVectors(int docID) { throw new UnsupportedOperationException(); } + @Override + public TermVectors termVectors() throws IOException { + throw new UnsupportedOperationException(); + } + @Override public int numDocs() { return 1; @@ -232,6 +239,11 @@ public void document(int docID, StoredFieldVisitor visitor) throws IOException { } } + @Override + public StoredFields storedFields() throws IOException { + throw new UnsupportedOperationException(); + } + @Override protected void doClose() { @@ -251,4 +263,9 @@ public VectorValues getVectorValues(String field) throws IOException { public TopDocs searchNearestVectors(String field, float[] target, int k, Bits acceptDocs, int visitedLimit) throws IOException { throw new UnsupportedOperationException(); } + + @Override + public TopDocs searchNearestVectors(String field, BytesRef target, int k, Bits acceptDocs, int visitedLimit) throws IOException { + throw new UnsupportedOperationException(); + } } diff --git a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java index 0b85ba0d2ccd8..42069ac165b25 100644 --- a/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java +++ b/server/src/main/java/org/opensearch/index/mapper/KeywordFieldMapper.java @@ -38,7 +38,10 @@ import org.apache.lucene.document.FieldType; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.search.MultiTermQuery; +import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.opensearch.common.Nullable; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.xcontent.XContentParser; import org.opensearch.index.analysis.IndexAnalyzers; @@ -368,6 +371,18 @@ protected BytesRef indexedValueForSearch(Object value) { } return getTextSearchInfo().getSearchAnalyzer().normalize(name(), value.toString()); } + + @Override + public Query wildcardQuery( + String value, + @Nullable MultiTermQuery.RewriteMethod method, + boolean caseInsensitve, + QueryShardContext context + ) { + // keyword field types are always normalized, so ignore case sensitivity and force normalize the wildcard + // query text + return super.wildcardQuery(value, method, caseInsensitve, true, context); + } } private final boolean indexed; diff --git a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java index ead901a25e6fd..0804ad1a524a9 100644 --- a/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/MappedFieldType.java @@ -281,7 +281,7 @@ public Query prefixQuery( ) { throw new QueryShardException( context, - "Can only use prefix queries on keyword, text and wildcard fields - not on [" + name + "] which is of type [" + typeName() + "]" + "Can only use prefix queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } @@ -290,6 +290,7 @@ public final Query wildcardQuery(String value, @Nullable MultiTermQuery.RewriteM return wildcardQuery(value, method, false, context); } + /** optionally normalize the wildcard pattern based on the value of {@code caseInsensitive} */ public Query wildcardQuery( String value, @Nullable MultiTermQuery.RewriteMethod method, @@ -298,11 +299,15 @@ public Query wildcardQuery( ) { throw new QueryShardException( context, - "Can only use wildcard queries on keyword, text and wildcard fields - not on [" - + name - + "] which is of type [" - + typeName() - + "]" + "Can only use wildcard queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" + ); + } + + /** always normalizes the wildcard pattern to lowercase */ + public Query normalizedWildcardQuery(String value, @Nullable MultiTermQuery.RewriteMethod method, QueryShardContext context) { + throw new QueryShardException( + context, + "Can only use wildcard queries on keyword and text fields - not on [" + name + "] which is of type [" + typeName() + "]" ); } diff --git a/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java b/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java index fa9c02c3cf14e..fbfca44c3062a 100644 --- a/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java +++ b/server/src/main/java/org/opensearch/index/mapper/StringFieldType.java @@ -152,8 +152,34 @@ public static final String normalizeWildcardPattern(String fieldname, String val return sb.toBytesRef().utf8ToString(); } + /** optionally normalize the wildcard pattern based on the value of {@code caseInsensitive} */ @Override public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, boolean caseInsensitive, QueryShardContext context) { + return wildcardQuery(value, method, caseInsensitive, false, context); + } + + /** always normalizes the wildcard pattern to lowercase */ + @Override + public Query normalizedWildcardQuery(String value, MultiTermQuery.RewriteMethod method, QueryShardContext context) { + return wildcardQuery(value, method, false, true, context); + } + + /** + * return a wildcard query + * + * @param value the pattern + * @param method rewrite method + * @param caseInsensitive should ignore case; note, only used if there is no analyzer, else we use the analyzer rules + * @param normalizeIfAnalyzed force normalize casing if an analyzer is used + * @param context the query shard context + */ + public Query wildcardQuery( + String value, + MultiTermQuery.RewriteMethod method, + boolean caseInsensitive, + boolean normalizeIfAnalyzed, + QueryShardContext context + ) { failIfNotIndexed(); if (context.allowExpensiveQueries() == false) { throw new OpenSearchException( @@ -162,7 +188,7 @@ public Query wildcardQuery(String value, MultiTermQuery.RewriteMethod method, bo } Term term; - if (getTextSearchInfo().getSearchAnalyzer() != null) { + if (getTextSearchInfo().getSearchAnalyzer() != null && normalizeIfAnalyzed) { value = normalizeWildcardPattern(name(), value, getTextSearchInfo().getSearchAnalyzer()); term = new Term(name(), value); } else { diff --git a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java index f8f84c52309d5..f901fac22d7ae 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -175,12 +175,19 @@ public String minimumShouldMatch() { return this.minimumShouldMatch; } + @Deprecated /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchBoolPrefixQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; } + /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + public MatchBoolPrefixQueryBuilder fuzziness(Fuzziness fuzziness) { + this.fuzziness = fuzziness; + return this; + } + /** Gets the fuzziness used when evaluated to a fuzzy query type. */ public Fuzziness fuzziness() { return this.fuzziness; @@ -348,19 +355,16 @@ public static MatchBoolPrefixQueryBuilder fromXContent(XContentParser parser) th } } - MatchBoolPrefixQueryBuilder queryBuilder = new MatchBoolPrefixQueryBuilder(fieldName, value); - queryBuilder.analyzer(analyzer); - queryBuilder.operator(operator); - queryBuilder.minimumShouldMatch(minimumShouldMatch); - queryBuilder.boost(boost); - queryBuilder.queryName(queryName); - if (fuzziness != null) { - queryBuilder.fuzziness(fuzziness); - } - queryBuilder.prefixLength(prefixLength); - queryBuilder.maxExpansions(maxExpansion); - queryBuilder.fuzzyTranspositions(fuzzyTranspositions); - queryBuilder.fuzzyRewrite(fuzzyRewrite); + MatchBoolPrefixQueryBuilder queryBuilder = new MatchBoolPrefixQueryBuilder(fieldName, value).analyzer(analyzer) + .operator(operator) + .minimumShouldMatch(minimumShouldMatch) + .boost(boost) + .queryName(queryName) + .fuzziness(fuzziness) + .prefixLength(prefixLength) + .maxExpansions(maxExpansion) + .fuzzyTranspositions(fuzzyTranspositions) + .fuzzyRewrite(fuzzyRewrite); return queryBuilder; } diff --git a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java index 380e8722daca9..8dbe9392bdd95 100644 --- a/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MatchQueryBuilder.java @@ -208,12 +208,19 @@ public String analyzer() { return this.analyzer; } + @Deprecated /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ public MatchQueryBuilder fuzziness(Object fuzziness) { this.fuzziness = Fuzziness.build(fuzziness); return this; } + /** Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ + public MatchQueryBuilder fuzziness(Fuzziness fuzziness) { + this.fuzziness = fuzziness; + return this; + } + /** Gets the fuzziness used when evaluated to a fuzzy query type. */ public Fuzziness fuzziness() { return this.fuzziness; @@ -565,9 +572,7 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc matchQuery.operator(operator); matchQuery.analyzer(analyzer); matchQuery.minimumShouldMatch(minimumShouldMatch); - if (fuzziness != null) { - matchQuery.fuzziness(fuzziness); - } + matchQuery.fuzziness(fuzziness); matchQuery.fuzzyRewrite(fuzzyRewrite); matchQuery.prefixLength(prefixLength); matchQuery.fuzzyTranspositions(fuzzyTranspositions); diff --git a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java index fe3bcd81e72be..2270c3675fa11 100644 --- a/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/MultiMatchQueryBuilder.java @@ -397,6 +397,7 @@ public int slop() { return slop; } + @Deprecated /** * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". */ @@ -407,6 +408,14 @@ public MultiMatchQueryBuilder fuzziness(Object fuzziness) { return this; } + /** + * Sets the fuzziness used when evaluated to a fuzzy query type. Defaults to "AUTO". + */ + public MultiMatchQueryBuilder fuzziness(Fuzziness fuzziness) { + this.fuzziness = fuzziness; + return this; + } + public Fuzziness fuzziness() { return fuzziness; } diff --git a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java index 32337f5df34c5..4ee790291f453 100644 --- a/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/opensearch/index/query/QueryStringQueryBuilder.java @@ -79,6 +79,7 @@ public class QueryStringQueryBuilder extends AbstractQueryBuilder current, Boole } else { // We don't apply prefix on synonyms final TermAndBoost[] termAndBoosts = current.stream() - .map(t -> new TermAndBoost(t, BoostAttribute.DEFAULT_BOOST)) + .map(t -> new TermAndBoost(t.bytes(), BoostAttribute.DEFAULT_BOOST)) .toArray(TermAndBoost[]::new); - q.add(newSynonymQuery(termAndBoosts), operator); + q.add(newSynonymQuery(field, termAndBoosts), operator); } } @@ -782,9 +782,9 @@ public Query next() { } else { // We don't apply prefix on synonyms final TermAndBoost[] termAndBoosts = Arrays.stream(terms) - .map(t -> new TermAndBoost(t, BoostAttribute.DEFAULT_BOOST)) + .map(t -> new TermAndBoost(t.bytes(), BoostAttribute.DEFAULT_BOOST)) .toArray(TermAndBoost[]::new); - queryPos = newSynonymQuery(termAndBoosts); + queryPos = newSynonymQuery(field, termAndBoosts); } } if (queryPos != null) { diff --git a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java index c2254a56d8fd1..241f05af2c512 100644 --- a/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java +++ b/server/src/main/java/org/opensearch/index/search/MultiMatchQuery.java @@ -218,10 +218,10 @@ private class BlendedQueryBuilder extends MatchQueryBuilder { } @Override - protected Query newSynonymQuery(TermAndBoost[] terms) { + protected Query newSynonymQuery(String field, TermAndBoost[] terms) { BytesRef[] values = new BytesRef[terms.length]; for (int i = 0; i < terms.length; i++) { - values[i] = terms[i].term.bytes(); + values[i] = terms[i].term; } return blendTerms(context, values, commonTermsCutoff, tieBreaker, lenient, blendedFields); } diff --git a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java index 6d59e861eb32f..9a121fe55a7e7 100644 --- a/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java +++ b/server/src/main/java/org/opensearch/index/search/QueryStringQueryParser.java @@ -729,7 +729,8 @@ private Query getWildcardQuerySingle(String field, String termStr) throws ParseE if (getAllowLeadingWildcard() == false && (termStr.startsWith("*") || termStr.startsWith("?"))) { throw new ParseException("'*' or '?' not allowed as first character in WildcardQuery"); } - return currentFieldType.wildcardQuery(termStr, getMultiTermRewriteMethod(), context); + // query string query is always normalized + return currentFieldType.normalizedWildcardQuery(termStr, getMultiTermRewriteMethod(), context); } catch (RuntimeException e) { if (lenient) { return newLenientFieldQuery(field, e); diff --git a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java index 701dec069d946..a40048e7b9781 100644 --- a/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/ReplicationTracker.java @@ -622,7 +622,9 @@ public synchronized void renewPeerRecoveryRetentionLeases() { * If this shard copy is tracked then we got here here via a rolling upgrade from an older version that doesn't * create peer recovery retention leases for every shard copy. */ - assert checkpoints.get(shardRouting.allocationId().getId()).tracked == false + assert (checkpoints.get(shardRouting.allocationId().getId()).tracked + && checkpoints.get(shardRouting.allocationId().getId()).replicated == false) + || checkpoints.get(shardRouting.allocationId().getId()).tracked == false || hasAllPeerRecoveryRetentionLeases == false; return false; } @@ -680,20 +682,29 @@ public static class CheckpointState implements Writeable { */ long globalCheckpoint; /** - * whether this shard is treated as in-sync and thus contributes to the global checkpoint calculation + * When a shard is in-sync, it is capable of being promoted as the primary during a failover. An in-sync shard + * contributes to global checkpoint calculation on the primary iff {@link CheckpointState#replicated} is true. */ boolean inSync; /** - * whether this shard is tracked in the replication group, i.e., should receive document updates from the primary. + * whether this shard is tracked in the replication group and has localTranslog, i.e., should receive document updates + * from the primary. Tracked shards with localTranslog would have corresponding retention leases on the primary shard's + * {@link ReplicationTracker}. */ boolean tracked; - public CheckpointState(long localCheckpoint, long globalCheckpoint, boolean inSync, boolean tracked) { + /** + * Whether the replication requests to the primary are replicated to the concerned shard or not. + */ + boolean replicated; + + public CheckpointState(long localCheckpoint, long globalCheckpoint, boolean inSync, boolean tracked, boolean replicated) { this.localCheckpoint = localCheckpoint; this.globalCheckpoint = globalCheckpoint; this.inSync = inSync; this.tracked = tracked; + this.replicated = replicated; } public CheckpointState(StreamInput in) throws IOException { @@ -701,6 +712,11 @@ public CheckpointState(StreamInput in) throws IOException { this.globalCheckpoint = in.readZLong(); this.inSync = in.readBoolean(); this.tracked = in.readBoolean(); + if (in.getVersion().onOrAfter(Version.CURRENT)) { + this.replicated = in.readBoolean(); + } else { + this.replicated = true; + } } @Override @@ -709,13 +725,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeZLong(globalCheckpoint); out.writeBoolean(inSync); out.writeBoolean(tracked); + out.writeBoolean(replicated); } /** * Returns a full copy of this object */ public CheckpointState copy() { - return new CheckpointState(localCheckpoint, globalCheckpoint, inSync, tracked); + return new CheckpointState(localCheckpoint, globalCheckpoint, inSync, tracked, replicated); } public long getLocalCheckpoint() { @@ -737,6 +754,8 @@ public String toString() { + inSync + ", tracked=" + tracked + + ", replicated=" + + replicated + '}'; } @@ -750,7 +769,8 @@ public boolean equals(Object o) { if (localCheckpoint != that.localCheckpoint) return false; if (globalCheckpoint != that.globalCheckpoint) return false; if (inSync != that.inSync) return false; - return tracked == that.tracked; + if (tracked != that.tracked) return false; + return replicated == that.replicated; } @Override @@ -759,6 +779,7 @@ public int hashCode() { result = 31 * result + Long.hashCode(globalCheckpoint); result = 31 * result + Boolean.hashCode(inSync); result = 31 * result + Boolean.hashCode(tracked); + result = 31 * result + Boolean.hashCode(replicated); return result; } } @@ -774,7 +795,7 @@ public synchronized ObjectLongMap getInSyncGlobalCheckpoints() { final ObjectLongMap globalCheckpoints = new ObjectLongHashMap<>(checkpoints.size()); // upper bound on the size checkpoints.entrySet() .stream() - .filter(e -> e.getValue().inSync) + .filter(e -> e.getValue().inSync && e.getValue().replicated) .forEach(e -> globalCheckpoints.put(e.getKey(), e.getValue().globalCheckpoint)); return globalCheckpoints; } @@ -833,6 +854,9 @@ private boolean invariant() { // the current shard is marked as in-sync when the global checkpoint tracker operates in primary mode assert !primaryMode || checkpoints.get(shardAllocationId).inSync; + // the current shard is marked as tracked when the global checkpoint tracker operates in primary mode + assert !primaryMode || checkpoints.get(shardAllocationId).tracked; + // the routing table and replication group is set when the global checkpoint tracker operates in primary mode assert !primaryMode || (routingTable != null && replicationGroup != null) : "primary mode but routing table is " + routingTable @@ -902,7 +926,8 @@ private boolean invariant() { if (primaryMode && indexSettings.isSoftDeleteEnabled() && hasAllPeerRecoveryRetentionLeases) { // all tracked shard copies have a corresponding peer-recovery retention lease for (final ShardRouting shardRouting : routingTable.assignedShards()) { - if (checkpoints.get(shardRouting.allocationId().getId()).tracked && !indexSettings().isRemoteTranslogStoreEnabled()) { + final CheckpointState cps = checkpoints.get(shardRouting.allocationId().getId()); + if (cps.tracked && cps.replicated) { assert retentionLeases.contains(getPeerRecoveryRetentionLeaseId(shardRouting)) : "no retention lease for tracked shard [" + shardRouting + "] in " + retentionLeases; assert PEER_RECOVERY_RETENTION_LEASE_SOURCE.equals( @@ -926,7 +951,11 @@ private static long inSyncCheckpointStates( Function reducer ) { final OptionalLong value = reducer.apply( - checkpoints.values().stream().filter(cps -> cps.inSync).mapToLong(function).filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO) + checkpoints.values() + .stream() + .filter(cps -> cps.inSync && cps.replicated) + .mapToLong(function) + .filter(v -> v != SequenceNumbers.UNASSIGNED_SEQ_NO) ); return value.isPresent() ? value.getAsLong() : SequenceNumbers.UNASSIGNED_SEQ_NO; } @@ -1028,6 +1057,11 @@ private ReplicationGroup calculateReplicationGroup() { } else { newVersion = replicationGroup.getVersion() + 1; } + + assert indexSettings().isRemoteTranslogStoreEnabled() + || checkpoints.entrySet().stream().filter(e -> e.getValue().tracked).allMatch(e -> e.getValue().replicated) + : "In absence of remote translog store, all tracked shards must have replication mode as LOGICAL_REPLICATION"; + return new ReplicationGroup( routingTable, checkpoints.entrySet().stream().filter(e -> e.getValue().inSync).map(Map.Entry::getKey).collect(Collectors.toSet()), @@ -1122,10 +1156,11 @@ public synchronized void activatePrimaryMode(final long localCheckpoint) { } /** - * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole shard copy in the - * replication group. If one does not already exist and yet there are other shard copies in this group then we must have just done - * a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, in which case the missing leases should be created - * asynchronously by the caller using {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. + * Creates a peer recovery retention lease for this shard, if one does not already exist and this shard is the sole + * shard copy with local translog in the replication group. If one does not already exist and yet there are other + * shard copies in this group then we must have just done a rolling upgrade from a version before {@code LegacyESVersion#V_7_4_0}, + * in which case the missing leases should be created asynchronously by the caller using + * {@link ReplicationTracker#createMissingPeerRecoveryRetentionLeases(ActionListener)}. */ private void addPeerRecoveryRetentionLeaseForSolePrimary() { assert primaryMode; @@ -1134,7 +1169,8 @@ private void addPeerRecoveryRetentionLeaseForSolePrimary() { final ShardRouting primaryShard = routingTable.primaryShard(); final String leaseId = getPeerRecoveryRetentionLeaseId(primaryShard); if (retentionLeases.get(leaseId) == null) { - if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard))) { + if (replicationGroup.getReplicationTargets().equals(Collections.singletonList(primaryShard)) + || indexSettings().isRemoteTranslogStoreEnabled()) { assert primaryShard.allocationId().getId().equals(shardAllocationId) : routingTable.assignedShards() + " vs " + shardAllocationId; @@ -1197,6 +1233,12 @@ public synchronized void updateFromClusterManager( boolean removedEntries = checkpoints.keySet() .removeIf(aid -> !inSyncAllocationIds.contains(aid) && !initializingAllocationIds.contains(aid)); + final ShardRouting primary = routingTable.primaryShard(); + final String primaryAllocationId = primary.allocationId().getId(); + final String primaryTargetAllocationId = primary.relocating() + ? primary.getTargetRelocatingShard().allocationId().getId() + : null; + if (primaryMode) { // add new initializingIds that are missing locally. These are fresh shard copies - and not in-sync for (String initializingId : initializingAllocationIds) { @@ -1207,7 +1249,16 @@ public synchronized void updateFromClusterManager( + " as in-sync but it does not exist locally"; final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; - checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, inSync, inSync)); + checkpoints.put( + initializingId, + new CheckpointState( + localCheckpoint, + globalCheckpoint, + inSync, + inSync, + isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + ) + ); } } if (removedEntries) { @@ -1217,12 +1268,30 @@ public synchronized void updateFromClusterManager( for (String initializingId : initializingAllocationIds) { final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; - checkpoints.put(initializingId, new CheckpointState(localCheckpoint, globalCheckpoint, false, false)); + checkpoints.put( + initializingId, + new CheckpointState( + localCheckpoint, + globalCheckpoint, + false, + false, + isReplicated(initializingId, primaryAllocationId, primaryTargetAllocationId) + ) + ); } for (String inSyncId : inSyncAllocationIds) { final long localCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; final long globalCheckpoint = localCheckpoint; - checkpoints.put(inSyncId, new CheckpointState(localCheckpoint, globalCheckpoint, true, true)); + checkpoints.put( + inSyncId, + new CheckpointState( + localCheckpoint, + globalCheckpoint, + true, + true, + isReplicated(inSyncId, primaryAllocationId, primaryTargetAllocationId) + ) + ); } } appliedClusterStateVersion = applyingClusterStateVersion; @@ -1237,6 +1306,26 @@ public synchronized void updateFromClusterManager( assert invariant(); } + /** + * Returns whether the requests are replicated considering the remote translog existence, current/primary/primary target allocation ids. + * + * @param allocationId given allocation id + * @param primaryAllocationId primary allocation id + * @param primaryTargetAllocationId primary target allocation id + * @return the replication mode. + */ + private boolean isReplicated(String allocationId, String primaryAllocationId, String primaryTargetAllocationId) { + // If remote translog is enabled, then returns replication mode checking current allocation id against the + // primary and primary target allocation id. + // If remote translog is enabled, then returns true if given allocation id matches the primary or it's relocation target allocation + // id. + if (indexSettings().isRemoteTranslogStoreEnabled()) { + return (allocationId.equals(primaryAllocationId) || allocationId.equals(primaryTargetAllocationId)); + } + // For other case which is local translog, return true as the requests are replicated to all shards in the replication group. + return true; + } + /** * Notifies the tracker of the current allocation IDs in the cluster state. * @param applyingClusterStateVersion the cluster state version being applied when updating the allocation IDs from the cluster-manager @@ -1298,13 +1387,14 @@ public synchronized void markAllocationIdAsInSync(final String allocationId, fin updateLocalCheckpoint(allocationId, cps, localCheckpoint); // if it was already in-sync (because of a previously failed recovery attempt), global checkpoint must have been // stuck from advancing - assert !cps.inSync || (cps.localCheckpoint >= getGlobalCheckpoint()) : "shard copy " + assert !cps.inSync || cps.localCheckpoint >= getGlobalCheckpoint() || cps.replicated == false : "shard copy " + allocationId + " that's already in-sync should have a local checkpoint " + cps.localCheckpoint + " that's above the global checkpoint " - + getGlobalCheckpoint(); - if (cps.localCheckpoint < getGlobalCheckpoint()) { + + getGlobalCheckpoint() + + " or it's not replicated"; + if (cps.replicated && cps.localCheckpoint < getGlobalCheckpoint()) { pendingInSync.add(allocationId); try { while (true) { @@ -1375,7 +1465,7 @@ public synchronized void updateLocalCheckpoint(final String allocationId, final logger.trace("marked [{}] as in-sync", allocationId); notifyAllWaiters(); } - if (increasedLocalCheckpoint && pending == false) { + if (cps.replicated && increasedLocalCheckpoint && pending == false) { updateGlobalCheckpointOnPrimary(); } assert invariant(); @@ -1395,7 +1485,7 @@ private static long computeGlobalCheckpoint( return fallback; } for (final CheckpointState cps : localCheckpoints) { - if (cps.inSync) { + if (cps.inSync && cps.replicated) { if (cps.localCheckpoint == SequenceNumbers.UNASSIGNED_SEQ_NO) { // unassigned in-sync replica return fallback; diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b2f48ccdd389c..204bf4204511e 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -82,6 +82,7 @@ import org.opensearch.common.util.concurrent.OpenSearchExecutors; import org.opensearch.common.util.concurrent.OpenSearchRejectedExecutionException; import org.opensearch.common.util.concurrent.OpenSearchThreadPoolExecutor; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.iterable.Iterables; import org.opensearch.common.util.set.Sets; import org.opensearch.common.xcontent.LoggingDeprecationHandler; @@ -142,6 +143,7 @@ import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.node.Node; import org.opensearch.plugins.IndexStorePlugin; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.script.ScriptService; @@ -227,6 +229,7 @@ public class IndicesService extends AbstractLifecycleComponent */ private final Settings settings; private final PluginsService pluginsService; + private final ExtensionsManager extensionsManager; private final NodeEnvironment nodeEnv; private final NamedXContentRegistry xContentRegistry; private final TimeValue shardsClosedTimeout; @@ -299,6 +302,120 @@ public IndicesService( this.settings = settings; this.threadPool = threadPool; this.pluginsService = pluginsService; + this.extensionsManager = null; + this.nodeEnv = nodeEnv; + this.xContentRegistry = xContentRegistry; + this.valuesSourceRegistry = valuesSourceRegistry; + this.shardsClosedTimeout = settings.getAsTime(INDICES_SHARDS_CLOSED_TIMEOUT, new TimeValue(1, TimeUnit.DAYS)); + this.analysisRegistry = analysisRegistry; + this.indexNameExpressionResolver = indexNameExpressionResolver; + this.indicesRequestCache = new IndicesRequestCache(settings); + this.indicesQueryCache = new IndicesQueryCache(settings); + this.mapperRegistry = mapperRegistry; + this.namedWriteableRegistry = namedWriteableRegistry; + indexingMemoryController = new IndexingMemoryController( + settings, + threadPool, + // ensure we pull an iter with new shards - flatten makes a copy + () -> Iterables.flatten(this).iterator() + ); + this.indexScopedSettings = indexScopedSettings; + this.circuitBreakerService = circuitBreakerService; + this.bigArrays = bigArrays; + this.scriptService = scriptService; + this.clusterService = clusterService; + this.client = client; + this.idFieldDataEnabled = INDICES_ID_FIELD_DATA_ENABLED_SETTING.get(clusterService.getSettings()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(INDICES_ID_FIELD_DATA_ENABLED_SETTING, this::setIdFieldDataEnabled); + this.indicesFieldDataCache = new IndicesFieldDataCache(settings, new IndexFieldDataCache.Listener() { + @Override + public void onRemoval(ShardId shardId, String fieldName, boolean wasEvicted, long sizeInBytes) { + assert sizeInBytes >= 0 : "When reducing circuit breaker, it should be adjusted with a number higher or " + + "equal to 0 and not [" + + sizeInBytes + + "]"; + circuitBreakerService.getBreaker(CircuitBreaker.FIELDDATA).addWithoutBreaking(-sizeInBytes); + } + }); + this.cleanInterval = INDICES_CACHE_CLEAN_INTERVAL_SETTING.get(settings); + this.cacheCleaner = new CacheCleaner(indicesFieldDataCache, indicesRequestCache, logger, threadPool, this.cleanInterval); + this.metaStateService = metaStateService; + this.engineFactoryProviders = engineFactoryProviders; + + this.directoryFactories = directoryFactories; + this.recoveryStateFactories = recoveryStateFactories; + // doClose() is called when shutting down a node, yet there might still be ongoing requests + // that we need to wait for before closing some resources such as the caches. In order to + // avoid closing these resources while ongoing requests are still being processed, we use a + // ref count which will only close them when both this service and all index services are + // actually closed + indicesRefCount = new AbstractRefCounted("indices") { + @Override + protected void closeInternal() { + try { + IOUtils.close( + analysisRegistry, + indexingMemoryController, + indicesFieldDataCache, + cacheCleaner, + indicesRequestCache, + indicesQueryCache + ); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + closeLatch.countDown(); + } + } + }; + + final String nodeName = Objects.requireNonNull(Node.NODE_NAME_SETTING.get(settings)); + nodeWriteDanglingIndicesInfo = WRITE_DANGLING_INDICES_INFO_SETTING.get(settings); + danglingIndicesThreadPoolExecutor = nodeWriteDanglingIndicesInfo + ? OpenSearchExecutors.newScaling( + nodeName + "/" + DANGLING_INDICES_UPDATE_THREAD_NAME, + 1, + 1, + 0, + TimeUnit.MILLISECONDS, + daemonThreadFactory(nodeName, DANGLING_INDICES_UPDATE_THREAD_NAME), + threadPool.getThreadContext() + ) + : null; + + this.allowExpensiveQueries = ALLOW_EXPENSIVE_QUERIES.get(clusterService.getSettings()); + clusterService.getClusterSettings().addSettingsUpdateConsumer(ALLOW_EXPENSIVE_QUERIES, this::setAllowExpensiveQueries); + this.remoteDirectoryFactory = remoteDirectoryFactory; + } + + public IndicesService( + Settings settings, + PluginsService pluginsService, + ExtensionsManager extensionsManager, + NodeEnvironment nodeEnv, + NamedXContentRegistry xContentRegistry, + AnalysisRegistry analysisRegistry, + IndexNameExpressionResolver indexNameExpressionResolver, + MapperRegistry mapperRegistry, + NamedWriteableRegistry namedWriteableRegistry, + ThreadPool threadPool, + IndexScopedSettings indexScopedSettings, + CircuitBreakerService circuitBreakerService, + BigArrays bigArrays, + ScriptService scriptService, + ClusterService clusterService, + Client client, + MetaStateService metaStateService, + Collection>> engineFactoryProviders, + Map directoryFactories, + ValuesSourceRegistry valuesSourceRegistry, + Map recoveryStateFactories, + IndexStorePlugin.RemoteDirectoryFactory remoteDirectoryFactory + ) { + this.settings = settings; + this.threadPool = threadPool; + this.pluginsService = pluginsService; + this.extensionsManager = extensionsManager; this.nodeEnv = nodeEnv; this.xContentRegistry = xContentRegistry; this.valuesSourceRegistry = valuesSourceRegistry; @@ -721,6 +838,9 @@ private synchronized IndexService createIndexService( indexModule.addIndexOperationListener(operationListener); } pluginsService.onIndexModule(indexModule); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + extensionsManager.onIndexModule(indexModule); + } for (IndexEventListener listener : builtInListeners) { indexModule.addIndexEventListener(listener); } diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 15a9bf9e4c492..83f4e0c7cbed9 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -37,6 +37,7 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.opensearch.ResourceAlreadyExistsException; import org.opensearch.action.ActionListener; +import org.opensearch.action.StepListener; import org.opensearch.cluster.ClusterChangedEvent; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.ClusterStateApplier; @@ -45,11 +46,12 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.ShardRoutingState; +import org.opensearch.cluster.routing.RoutingTable; import org.opensearch.cluster.routing.IndexShardRoutingTable; -import org.opensearch.cluster.routing.RecoverySource.Type; import org.opensearch.cluster.routing.RoutingNode; -import org.opensearch.cluster.routing.RoutingTable; -import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.cluster.routing.RecoverySource.Type; import org.opensearch.cluster.service.ClusterService; import org.opensearch.common.Nullable; import org.opensearch.common.component.AbstractLifecycleComponent; @@ -82,8 +84,11 @@ import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.replication.SegmentReplicationSourceService; +import org.opensearch.indices.replication.SegmentReplicationState; import org.opensearch.indices.replication.SegmentReplicationTargetService; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.common.ReplicationFailedException; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.search.SearchService; @@ -143,6 +148,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final Consumer globalCheckpointSyncer; private final RetentionLeaseSyncer retentionLeaseSyncer; + private final SegmentReplicationTargetService segmentReplicationTargetService; + private final SegmentReplicationCheckpointPublisher checkpointPublisher; @Inject @@ -217,6 +224,7 @@ public IndicesClusterStateService( indexEventListeners.add(segmentReplicationTargetService); indexEventListeners.add(segmentReplicationSourceService); } + this.segmentReplicationTargetService = segmentReplicationTargetService; this.builtInIndexListener = Collections.unmodifiableList(indexEventListeners); this.indicesService = indicesService; this.clusterService = clusterService; @@ -773,8 +781,83 @@ public synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolea } public void handleRecoveryDone(ReplicationState state, ShardRouting shardRouting, long primaryTerm) { - RecoveryState RecState = (RecoveryState) state; - shardStateAction.shardStarted(shardRouting, primaryTerm, "after " + RecState.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); + RecoveryState recoveryState = (RecoveryState) state; + AllocatedIndex indexService = indicesService.indexService(shardRouting.shardId().getIndex()); + StepListener forceSegRepListener = new StepListener<>(); + // For Segment Replication enabled indices, we want replica shards to start a replication event to fetch latest segments before + // it is marked as Started. + if (indexService.getIndexSettings().isSegRepEnabled()) { + forceSegmentReplication(indexService, shardRouting, forceSegRepListener); + } else { + forceSegRepListener.onResponse(null); + } + forceSegRepListener.whenComplete( + v -> shardStateAction.shardStarted( + shardRouting, + primaryTerm, + "after " + recoveryState.getRecoverySource(), + SHARD_STATE_ACTION_LISTENER + ), + e -> handleRecoveryFailure(shardRouting, true, e) + ); + } + + /** + * Forces a round of Segment Replication with empty checkpoint, so that replicas could fetch latest segment files from primary. + */ + private void forceSegmentReplication( + AllocatedIndex indexService, + ShardRouting shardRouting, + StepListener forceSegRepListener + ) { + IndexShard indexShard = (IndexShard) indexService.getShardOrNull(shardRouting.id()); + if (indexShard != null + && indexShard.indexSettings().isSegRepEnabled() + && shardRouting.primary() == false + && shardRouting.state() == ShardRoutingState.INITIALIZING + && indexShard.state() == IndexShardState.POST_RECOVERY) { + segmentReplicationTargetService.startReplication( + ReplicationCheckpoint.empty(shardRouting.shardId()), + indexShard, + new SegmentReplicationTargetService.SegmentReplicationListener() { + @Override + public void onReplicationDone(SegmentReplicationState state) { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication complete, timing data: {}", + indexShard.shardId().getId(), + state.getReplicationId(), + state.getTimingData() + ) + ); + forceSegRepListener.onResponse(null); + } + + @Override + public void onReplicationFailure( + SegmentReplicationState state, + ReplicationFailedException e, + boolean sendShardFailure + ) { + logger.trace( + () -> new ParameterizedMessage( + "[shardId {}] [replication id {}] Replication failed, timing data: {}", + indexShard.shardId().getId(), + state.getReplicationId(), + state.getTimingData() + ) + ); + if (sendShardFailure == true) { + logger.error("replication failure", e); + indexShard.failShard("replication failure", e); + } + forceSegRepListener.onFailure(e); + } + } + ); + } else { + forceSegRepListener.onResponse(null); + } } private void failAndRemoveShard( diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java index cc51082639cdb..d2fc354cf9298 100644 --- a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -40,6 +40,8 @@ import java.io.IOException; import java.util.Objects; +import org.opensearch.action.support.replication.ReplicationMode; + /** * Replication action responsible for publishing checkpoint to a replica shard. * @@ -93,6 +95,14 @@ protected void doExecute(Task task, PublishCheckpointRequest request, ActionList assert false : "use PublishCheckpointAction#publish"; } + @Override + protected ReplicationMode getReplicationMode(IndexShard indexShard) { + if (indexShard.isRemoteTranslogEnabled()) { + return ReplicationMode.FULL_REPLICATION; + } + return super.getReplicationMode(indexShard); + } + /** * Publish checkpoint request to shard */ diff --git a/server/src/main/java/org/opensearch/node/Node.java b/server/src/main/java/org/opensearch/node/Node.java index 546ee8f8028d0..e23b9dbacf6ea 100644 --- a/server/src/main/java/org/opensearch/node/Node.java +++ b/server/src/main/java/org/opensearch/node/Node.java @@ -43,6 +43,8 @@ import org.opensearch.indices.replication.SegmentReplicationSourceFactory; import org.opensearch.indices.replication.SegmentReplicationTargetService; import org.opensearch.indices.replication.SegmentReplicationSourceService; +import org.opensearch.extensions.ExtensionsManager; +import org.opensearch.extensions.NoopExtensionsManager; import org.opensearch.search.backpressure.SearchBackpressureService; import org.opensearch.search.backpressure.settings.SearchBackpressureSettings; import org.opensearch.tasks.TaskResourceTrackingService; @@ -343,6 +345,7 @@ public static class DiscoverySettings { private final Environment environment; private final NodeEnvironment nodeEnvironment; private final PluginsService pluginsService; + private final ExtensionsManager extensionsManager; private final NodeClient client; private final Collection pluginLifecycleComponents; private final LocalNodeFactory localNodeFactory; @@ -427,6 +430,13 @@ protected Node( initialEnvironment.pluginsDir(), classpathPlugins ); + + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + this.extensionsManager = new ExtensionsManager(tmpSettings, initialEnvironment.extensionDir()); + } else { + this.extensionsManager = new NoopExtensionsManager(); + } + final Settings settings = pluginsService.updatedSettings(); final Set additionalRoles = pluginsService.filterPlugins(Plugin.class) @@ -659,29 +669,58 @@ protected Node( repositoriesServiceReference::get ); - final IndicesService indicesService = new IndicesService( - settings, - pluginsService, - nodeEnvironment, - xContentRegistry, - analysisModule.getAnalysisRegistry(), - clusterModule.getIndexNameExpressionResolver(), - indicesModule.getMapperRegistry(), - namedWriteableRegistry, - threadPool, - settingsModule.getIndexScopedSettings(), - circuitBreakerService, - bigArrays, - scriptService, - clusterService, - client, - metaStateService, - engineFactoryProviders, - Map.copyOf(directoryFactories), - searchModule.getValuesSourceRegistry(), - recoveryStateFactories, - remoteDirectoryFactory - ); + final IndicesService indicesService; + + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + indicesService = new IndicesService( + settings, + pluginsService, + extensionsManager, + nodeEnvironment, + xContentRegistry, + analysisModule.getAnalysisRegistry(), + clusterModule.getIndexNameExpressionResolver(), + indicesModule.getMapperRegistry(), + namedWriteableRegistry, + threadPool, + settingsModule.getIndexScopedSettings(), + circuitBreakerService, + bigArrays, + scriptService, + clusterService, + client, + metaStateService, + engineFactoryProviders, + Map.copyOf(directoryFactories), + searchModule.getValuesSourceRegistry(), + recoveryStateFactories, + remoteDirectoryFactory + ); + } else { + indicesService = new IndicesService( + settings, + pluginsService, + nodeEnvironment, + xContentRegistry, + analysisModule.getAnalysisRegistry(), + clusterModule.getIndexNameExpressionResolver(), + indicesModule.getMapperRegistry(), + namedWriteableRegistry, + threadPool, + settingsModule.getIndexScopedSettings(), + circuitBreakerService, + bigArrays, + scriptService, + clusterService, + client, + metaStateService, + engineFactoryProviders, + Map.copyOf(directoryFactories), + searchModule.getValuesSourceRegistry(), + recoveryStateFactories, + remoteDirectoryFactory + ); + } final AliasValidator aliasValidator = new AliasValidator(); @@ -794,6 +833,10 @@ protected Node( settingsModule.getClusterSettings(), taskHeaders ); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + this.extensionsManager.setTransportService(transportService); + this.extensionsManager.setClusterService(clusterService); + } final GatewayMetaState gatewayMetaState = new GatewayMetaState(); final ResponseCollectorService responseCollectorService = new ResponseCollectorService(clusterService); final SearchTransportService searchTransportService = new SearchTransportService( @@ -1207,6 +1250,9 @@ public Node start() throws NodeValidationException { assert clusterService.localNode().equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; transportService.acceptIncomingRequests(); + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + extensionsManager.initialize(); + } discovery.startInitialJoin(); final TimeValue initialStateTimeout = DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); configureNodeAndClusterIdStateListener(clusterService); diff --git a/server/src/main/java/org/opensearch/plugins/PluginsService.java b/server/src/main/java/org/opensearch/plugins/PluginsService.java index bff880e5a41d7..c336bf156f40c 100644 --- a/server/src/main/java/org/opensearch/plugins/PluginsService.java +++ b/server/src/main/java/org/opensearch/plugins/PluginsService.java @@ -305,6 +305,7 @@ public Collection> getGuiceServiceClasses() } public void onIndexModule(IndexModule indexModule) { + logger.info("PluginService:onIndexModule index:" + indexModule.getIndex()); for (Tuple plugin : plugins) { plugin.v2().onIndexModule(indexModule); } diff --git a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java index 4803184fa3abe..d82af3e55ee6d 100644 --- a/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java +++ b/server/src/main/java/org/opensearch/search/fetch/FetchPhase.java @@ -157,7 +157,7 @@ public void execute(SearchContext context) { // So we do a little hack here and pretend we're going to do merges in order to // get better sequential access. SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) currentReaderContext.reader(); - fieldReader = lf.getSequentialStoredFieldsReader()::visitDocument; + fieldReader = lf.getSequentialStoredFieldsReader()::document; } else { fieldReader = currentReaderContext.reader()::document; } diff --git a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java index fe95b78c72b82..b074f62652bab 100644 --- a/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java +++ b/server/src/main/java/org/opensearch/search/lookup/SourceLookup.java @@ -137,7 +137,7 @@ public void setSegmentAndDocument(LeafReaderContext context, int docId) { // So we do a little hack here and pretend we're going to do merges in order to // get better sequential access. SequentialStoredFieldsLeafReader lf = (SequentialStoredFieldsLeafReader) context.reader(); - fieldReader = lf.getSequentialStoredFieldsReader()::visitDocument; + fieldReader = lf.getSequentialStoredFieldsReader()::document; } else { fieldReader = context.reader()::document; } diff --git a/server/src/main/java/org/opensearch/transport/TransportService.java b/server/src/main/java/org/opensearch/transport/TransportService.java index b9bf035a7fa77..1d94c5600818f 100644 --- a/server/src/main/java/org/opensearch/transport/TransportService.java +++ b/server/src/main/java/org/opensearch/transport/TransportService.java @@ -397,6 +397,11 @@ public void connectToNode(DiscoveryNode node) throws ConnectTransportException { connectToNode(node, (ConnectionProfile) null); } + // We are skipping node validation for extensibility as extensionNode and opensearchNode(LocalNode) will have different ephemeral id's + public void connectToExtensionNode(final DiscoveryNode node) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, (ConnectionProfile) null, ActionListener.map(fut, x -> null))); + } + /** * Connect to the specified node with the given connection profile * @@ -407,6 +412,10 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection PlainActionFuture.get(fut -> connectToNode(node, connectionProfile, ActionListener.map(fut, x -> null))); } + public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + PlainActionFuture.get(fut -> connectToExtensionNode(node, connectionProfile, ActionListener.map(fut, x -> null))); + } + /** * Connect to the specified node with the given connection profile. * The ActionListener will be called on the calling thread or the generic thread pool. @@ -418,6 +427,10 @@ public void connectToNode(DiscoveryNode node, ActionListener listener) thr connectToNode(node, null, listener); } + public void connectToExtensionNode(DiscoveryNode node, ActionListener listener) throws ConnectTransportException { + connectToExtensionNode(node, null, listener); + } + /** * Connect to the specified node with the given connection profile. * The ActionListener will be called on the calling thread or the generic thread pool. @@ -434,14 +447,35 @@ public void connectToNode(final DiscoveryNode node, ConnectionProfile connection connectionManager.connectToNode(node, connectionProfile, connectionValidator(node), listener); } + public void connectToExtensionNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + if (isLocalNode(node)) { + listener.onResponse(null); + return; + } + connectionManager.connectToNode(node, connectionProfile, extensionConnectionValidator(node), listener); + } + public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { return (newConnection, actualProfile, listener) -> { // We don't validate cluster names to allow for CCS connections. handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { final DiscoveryNode remote = resp.discoveryNode; + if (node.equals(remote) == false) { throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); } + + return null; + })); + }; + } + + public ConnectionManager.ConnectionValidator extensionConnectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile, listener) -> { + // We don't validate cluster names to allow for CCS connections. + handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { + final DiscoveryNode remote = resp.discoveryNode; + logger.info("Connection validation was skipped"); return null; })); }; @@ -731,6 +765,7 @@ public final void sendRequest( final TransportResponseHandler handler ) { try { + logger.info("Action: " + action); final TransportResponseHandler delegate; if (request.getParentTask().isSet()) { // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. diff --git a/server/src/main/resources/org/opensearch/bootstrap/security.policy b/server/src/main/resources/org/opensearch/bootstrap/security.policy index cc040177f96a5..36e5086931032 100644 --- a/server/src/main/resources/org/opensearch/bootstrap/security.policy +++ b/server/src/main/resources/org/opensearch/bootstrap/security.policy @@ -115,6 +115,9 @@ grant { permission jdk.net.NetworkPermission "getOption.TCP_KEEPCOUNT"; permission jdk.net.NetworkPermission "setOption.TCP_KEEPCOUNT"; + permission java.lang.RuntimePermission "accessDeclaredMembers"; + permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; + // Allow read access to all system properties permission java.util.PropertyPermission "*", "read"; diff --git a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java index 559963b0e0b68..a601d20af5a3f 100644 --- a/server/src/test/java/org/opensearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/opensearch/ExceptionSerializationTests.java @@ -56,6 +56,7 @@ import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; import org.opensearch.cluster.routing.TestShardRouting; +import org.opensearch.cluster.routing.UnsupportedWeightedRoutingStateException; import org.opensearch.cluster.service.ClusterManagerThrottlingException; import org.opensearch.common.ParsingException; import org.opensearch.common.Strings; @@ -864,6 +865,7 @@ public void testIds() { ids.put(164, NodeDecommissionedException.class); ids.put(165, ClusterManagerThrottlingException.class); ids.put(166, SnapshotInUseDeletionException.class); + ids.put(167, UnsupportedWeightedRoutingStateException.class); Map, Integer> reverse = new HashMap<>(); for (Map.Entry> entry : ids.entrySet()) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java new file mode 100644 index 0000000000000..f33781064345d --- /dev/null +++ b/server/src/test/java/org/opensearch/action/admin/cluster/configuration/VotingConfigExclusionsHelperTests.java @@ -0,0 +1,123 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.action.admin.cluster.configuration; + +import org.junit.BeforeClass; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.Strings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.addExclusionAndGetState; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.clearExclusionsAndGetState; +import static org.opensearch.action.admin.cluster.configuration.VotingConfigExclusionsHelper.resolveVotingConfigExclusionsAndCheckMaximum; + +public class VotingConfigExclusionsHelperTests extends OpenSearchTestCase { + + private static DiscoveryNode localNode, otherNode1, otherNode2, otherDataNode; + private static CoordinationMetadata.VotingConfigExclusion localNodeExclusion, otherNode1Exclusion, otherNode2Exclusion; + private static ClusterState initialClusterState; + + public void testAddExclusionAndGetState() { + ClusterState updatedState = addExclusionAndGetState(initialClusterState, Set.of(localNodeExclusion), 2); + assertTrue(updatedState.coordinationMetadata().getVotingConfigExclusions().contains(localNodeExclusion)); + assertEquals(1, updatedState.coordinationMetadata().getVotingConfigExclusions().size()); + } + + public void testResolveVotingConfigExclusions() { + AddVotingConfigExclusionsRequest request = new AddVotingConfigExclusionsRequest( + Strings.EMPTY_ARRAY, + new String[] { "other1" }, + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ); + Set votingConfigExclusions = resolveVotingConfigExclusionsAndCheckMaximum( + request, + initialClusterState, + 10 + ); + assertEquals(1, votingConfigExclusions.size()); + assertTrue(votingConfigExclusions.contains(otherNode1Exclusion)); + } + + public void testResolveVotingConfigExclusionFailsWhenLimitExceeded() { + AddVotingConfigExclusionsRequest request = new AddVotingConfigExclusionsRequest( + Strings.EMPTY_ARRAY, + new String[] { "other1", "other2" }, + Strings.EMPTY_ARRAY, + TimeValue.timeValueSeconds(30) + ); + expectThrows(IllegalArgumentException.class, () -> resolveVotingConfigExclusionsAndCheckMaximum(request, initialClusterState, 1)); + } + + public void testClearExclusionAndGetState() { + ClusterState updatedState = addExclusionAndGetState(initialClusterState, Set.of(localNodeExclusion), 2); + assertTrue(updatedState.coordinationMetadata().getVotingConfigExclusions().contains(localNodeExclusion)); + updatedState = clearExclusionsAndGetState(updatedState); + assertTrue(updatedState.coordinationMetadata().getVotingConfigExclusions().isEmpty()); + } + + @BeforeClass + public static void createBaseClusterState() { + localNode = makeDiscoveryNode("local"); + localNodeExclusion = new CoordinationMetadata.VotingConfigExclusion(localNode); + otherNode1 = makeDiscoveryNode("other1"); + otherNode1Exclusion = new CoordinationMetadata.VotingConfigExclusion(otherNode1); + otherNode2 = makeDiscoveryNode("other2"); + otherNode2Exclusion = new CoordinationMetadata.VotingConfigExclusion(otherNode2); + otherDataNode = new DiscoveryNode("data", "data", buildNewFakeTransportAddress(), emptyMap(), emptySet(), Version.CURRENT); + final CoordinationMetadata.VotingConfiguration allNodesConfig = CoordinationMetadata.VotingConfiguration.of( + localNode, + otherNode1, + otherNode2 + ); + initialClusterState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + new DiscoveryNodes.Builder().add(localNode) + .add(otherNode1) + .add(otherNode2) + .add(otherDataNode) + .localNodeId(localNode.getId()) + .clusterManagerNodeId(localNode.getId()) + ) + .metadata( + Metadata.builder() + .coordinationMetadata( + CoordinationMetadata.builder() + .lastAcceptedConfiguration(allNodesConfig) + .lastCommittedConfiguration(allNodesConfig) + .build() + ) + ) + .build(); + } + + private static DiscoveryNode makeDiscoveryNode(String name) { + return new DiscoveryNode( + name, + name, + buildNewFakeTransportAddress(), + emptyMap(), + singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + Version.CURRENT + ); + } +} diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java index 186e7e8638f17..cdec66d6683eb 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/shards/routing/weighted/put/ClusterPutWeightedRoutingRequestTests.java @@ -35,15 +35,6 @@ public void testValidate_ValuesAreProper() { assertNull(actionRequestValidationException); } - public void testValidate_TwoZonesWithZeroWeight() { - String reqString = "{\"us-east-1c\" : \"0\", \"us-east-1b\":\"0\",\"us-east-1a\":\"1\"}"; - ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest("zone"); - request.setWeightedRouting(new BytesArray(reqString), XContentType.JSON); - ActionRequestValidationException actionRequestValidationException = request.validate(); - assertNotNull(actionRequestValidationException); - assertTrue(actionRequestValidationException.getMessage().contains("More than one [2] value has weight set as " + "0")); - } - public void testValidate_MissingWeights() { String reqString = "{}"; ClusterPutWeightedRoutingRequest request = new ClusterPutWeightedRoutingRequest("zone"); diff --git a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java index 72c7b5168fe15..a7ffde04314c3 100644 --- a/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java +++ b/server/src/test/java/org/opensearch/action/admin/indices/close/TransportVerifyShardBeforeCloseActionTests.java @@ -32,10 +32,16 @@ package org.opensearch.action.admin.indices.close; import org.apache.lucene.util.SetOnce; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.mockito.ArgumentCaptor; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.FanoutReplicationProxy; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicationOperation; import org.opensearch.action.support.replication.ReplicationResponse; @@ -65,11 +71,6 @@ import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import org.mockito.ArgumentCaptor; import java.util.Collections; import java.util.List; @@ -77,21 +78,21 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import static org.mockito.Mockito.doNothing; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; public class TransportVerifyShardBeforeCloseActionTests extends OpenSearchTestCase { @@ -290,7 +291,8 @@ public void testUnavailableShardsMarkedAsStale() throws Exception { "test", primaryTerm, TimeValue.timeValueMillis(20), - TimeValue.timeValueSeconds(60) + TimeValue.timeValueSeconds(60), + new FanoutReplicationProxy<>() ); operation.execute(); diff --git a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java index 2ebca16519258..acf46e2a63333 100644 --- a/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/resync/TransportResyncReplicationActionTests.java @@ -31,10 +31,13 @@ package org.opensearch.action.resync; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.support.ActionFilters; import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.action.shard.ShardStateAction; import org.opensearch.cluster.block.ClusterBlocks; @@ -66,30 +69,29 @@ import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.MockNioTransport; -import org.junit.AfterClass; -import org.junit.BeforeClass; import java.nio.charset.Charset; import java.util.Collections; import java.util.HashSet; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; -import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.nullValue; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; +import static org.opensearch.transport.TransportService.NOOP_TRANSPORT_INTERCEPTOR; public class TransportResyncReplicationActionTests extends OpenSearchTestCase { @@ -156,23 +158,26 @@ public void testResyncDoesNotBlockOnPrimaryAction() throws Exception { final AtomicInteger acquiredPermits = new AtomicInteger(); final IndexShard indexShard = mock(IndexShard.class); + final PendingReplicationActions replicationActions = new PendingReplicationActions(shardId, threadPool); when(indexShard.indexSettings()).thenReturn(new IndexSettings(indexMetadata, Settings.EMPTY)); when(indexShard.shardId()).thenReturn(shardId); when(indexShard.routingEntry()).thenReturn(primaryShardRouting); when(indexShard.getPendingPrimaryTerm()).thenReturn(primaryTerm); when(indexShard.getOperationPrimaryTerm()).thenReturn(primaryTerm); when(indexShard.getActiveOperationsCount()).then(i -> acquiredPermits.get()); + when(indexShard.getPendingReplicationActions()).thenReturn(replicationActions); doAnswer(invocation -> { ActionListener callback = (ActionListener) invocation.getArguments()[0]; acquiredPermits.incrementAndGet(); callback.onResponse(acquiredPermits::decrementAndGet); return null; }).when(indexShard).acquirePrimaryOperationPermit(any(ActionListener.class), anyString(), any(), eq(true)); + Set trackedAllocationIds = shardRoutingTable.getAllAllocationIds(); when(indexShard.getReplicationGroup()).thenReturn( new ReplicationGroup( shardRoutingTable, clusterService.state().metadata().index(index).inSyncAllocationIds(shardId.id()), - shardRoutingTable.getAllAllocationIds(), + trackedAllocationIds, 0 ) ); diff --git a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java index 8a4cdfc953bf8..3a689e356bbdf 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/ReplicationOperationTests.java @@ -45,6 +45,7 @@ import org.opensearch.cluster.metadata.IndexMetadata; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.cluster.routing.AllocationId; import org.opensearch.cluster.routing.IndexShardRoutingTable; import org.opensearch.cluster.routing.ShardRouting; import org.opensearch.cluster.routing.ShardRoutingState; @@ -80,14 +81,18 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.IntStream; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.opensearch.action.support.replication.ReplicationOperation.RetryOnPrimaryException; +import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; public class ReplicationOperationTests extends OpenSearchTestCase { @@ -157,7 +162,14 @@ public void testReplication() throws Exception { final TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); final TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + primaryTerm, + new FanoutReplicationProxy<>() + ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); assertThat(request.processedOnReplicas, equalTo(expectedReplicas)); @@ -179,6 +191,199 @@ public void testReplication() throws Exception { assertThat(primary.knownGlobalCheckpoints, equalTo(replicasProxy.generatedGlobalCheckpoints)); } + public void testReplicationWithRemoteTranslogEnabled() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId)) + ); + activeIds.stream() + .filter(aId -> !aId.equals(primaryId)) + .forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId)) + ); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + assertEquals(0, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + assertEquals(1 + initializingIds.size(), shardInfo.getTotal()); + } + + public void testPrimaryToPrimaryReplicationWithRemoteTranslogEnabled() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = AllocationId.newRelocation(AllocationId.newInitializing()); + AllocationId relocationTargetId = AllocationId.newTargetRelocation(primaryId); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + nodeIdFromAllocationId(relocationTargetId), + true, + ShardRoutingState.RELOCATING, + primaryId + ); + initializingIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId)) + ); + activeIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId)) + ); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + // Add primary and it's relocating target to activeIds + activeIds.add(primaryId); + activeIds.add(relocationTargetId); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new ReplicationModeAwareProxy<>(ReplicationMode.NO_REPLICATION) + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + assertEquals(1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + assertEquals(2 + initializingIds.size(), shardInfo.getTotal()); + } + + public void testForceReplicationWithRemoteTranslogEnabled() throws Exception { + Set initializingIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> initializingIds.add(AllocationId.newInitializing())); + Set activeIds = new HashSet<>(); + IntStream.range(0, randomIntBetween(2, 5)).forEach(x -> activeIds.add(AllocationId.newInitializing())); + + AllocationId primaryId = activeIds.iterator().next(); + + ShardId shardId = new ShardId("test", "_na_", 0); + IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); + final ShardRouting primaryShard = newShardRouting( + shardId, + nodeIdFromAllocationId(primaryId), + null, + true, + ShardRoutingState.STARTED, + primaryId + ); + initializingIds.forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.INITIALIZING, aId)) + ); + activeIds.stream() + .filter(aId -> !aId.equals(primaryId)) + .forEach( + aId -> builder.addShard(newShardRouting(shardId, nodeIdFromAllocationId(aId), null, false, ShardRoutingState.STARTED, aId)) + ); + builder.addShard(primaryShard); + IndexShardRoutingTable routingTable = builder.build(); + + Set inSyncAllocationIds = activeIds.stream().map(AllocationId::getId).collect(Collectors.toSet()); + ReplicationGroup replicationGroup = new ReplicationGroup(routingTable, inSyncAllocationIds, inSyncAllocationIds, 0); + List replicationTargets = replicationGroup.getReplicationTargets(); + assertEquals(inSyncAllocationIds.size(), replicationTargets.size()); + assertTrue( + replicationTargets.stream().map(sh -> sh.allocationId().getId()).collect(Collectors.toSet()).containsAll(inSyncAllocationIds) + ); + + Request request = new Request(shardId); + PlainActionFuture listener = new PlainActionFuture<>(); + Map simulatedFailures = new HashMap<>(); + TestReplicaProxy replicasProxy = new TestReplicaProxy(simulatedFailures); + TestPrimary primary = new TestPrimary(primaryShard, () -> replicationGroup, threadPool); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + 0, + new FanoutReplicationProxy<>() + ); + op.execute(); + assertTrue("request was not processed on primary", request.processedOnPrimary.get()); + assertEquals(activeIds.size() - 1, request.processedOnReplicas.size()); + assertEquals(0, replicasProxy.failedReplicas.size()); + assertEquals(0, replicasProxy.markedAsStaleCopies.size()); + assertTrue("post replication operations not run on primary", request.runPostReplicationActionsOnPrimary.get()); + assertTrue("listener is not marked as done", listener.isDone()); + + ShardInfo shardInfo = listener.actionGet().getShardInfo(); + assertEquals(activeIds.size() + initializingIds.size(), shardInfo.getTotal()); + } + + static String nodeIdFromAllocationId(final AllocationId allocationId) { + return "n-" + allocationId.getId().substring(0, 8); + } + public void testRetryTransientReplicationFailure() throws Exception { final String index = "test"; final ShardId shardId = new ShardId(index, "_na_", 0); @@ -242,7 +447,8 @@ public void testRetryTransientReplicationFailure() throws Exception { replicasProxy, primaryTerm, TimeValue.timeValueMillis(20), - TimeValue.timeValueSeconds(60) + TimeValue.timeValueSeconds(60), + new FanoutReplicationProxy<>() ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -379,7 +585,14 @@ public void failShard(String message, Exception exception) { assertTrue(primaryFailed.compareAndSet(false, true)); } }; - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, replicasProxy, primaryTerm); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + replicasProxy, + primaryTerm, + new FanoutReplicationProxy<>() + ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -389,7 +602,7 @@ public void failShard(String message, Exception exception) { } else { assertFalse(primaryFailed.get()); } - assertListenerThrows("should throw exception to trigger retry", listener, ReplicationOperation.RetryOnPrimaryException.class); + assertListenerThrows("should throw exception to trigger retry", listener, RetryOnPrimaryException.class); } public void testAddedReplicaAfterPrimaryOperation() throws Exception { @@ -438,7 +651,14 @@ public void perform(Request request, ActionListener listener) { Request request = new Request(shardId); PlainActionFuture listener = new PlainActionFuture<>(); - final TestReplicationOperation op = new TestReplicationOperation(request, primary, listener, new TestReplicaProxy(), primaryTerm); + final TestReplicationOperation op = new TestReplicationOperation( + request, + primary, + listener, + new TestReplicaProxy(), + primaryTerm, + new FanoutReplicationProxy<>() + ); op.execute(); assertThat("request was not processed on primary", request.processedOnPrimary.get(), equalTo(true)); @@ -493,7 +713,8 @@ public void testWaitForActiveShards() throws Exception { logger, threadPool, "test", - primaryTerm + primaryTerm, + new FanoutReplicationProxy<>() ); if (passesActiveShardCheck) { @@ -554,7 +775,14 @@ public void updateLocalCheckpointForShard(String allocationId, long checkpoint) final PlainActionFuture listener = new PlainActionFuture<>(); final ReplicationOperation.Replicas replicas = new TestReplicaProxy(Collections.emptyMap()); - TestReplicationOperation operation = new TestReplicationOperation(request, primary, listener, replicas, primaryTerm); + TestReplicationOperation operation = new TestReplicationOperation( + request, + primary, + listener, + replicas, + primaryTerm, + new FanoutReplicationProxy<>() + ); operation.execute(); assertThat(primaryFailed.get(), equalTo(fatal)); @@ -841,7 +1069,8 @@ class TestReplicationOperation extends ReplicationOperation replicas, long primaryTerm, TimeValue initialRetryBackoffBound, - TimeValue retryTimeout + TimeValue retryTimeout, + ReplicationProxy replicationProxy ) { this( request, @@ -853,7 +1082,8 @@ class TestReplicationOperation extends ReplicationOperation primary, ActionListener listener, Replicas replicas, - long primaryTerm + long primaryTerm, + ReplicationProxy replicationProxy ) { - this(request, primary, listener, replicas, ReplicationOperationTests.this.logger, threadPool, "test", primaryTerm); + this( + request, + primary, + listener, + replicas, + ReplicationOperationTests.this.logger, + threadPool, + "test", + primaryTerm, + replicationProxy + ); } TestReplicationOperation( @@ -875,7 +1116,8 @@ class TestReplicationOperation extends ReplicationOperation replicationProxy ) { this( request, @@ -887,7 +1129,8 @@ class TestReplicationOperation extends ReplicationOperation replicationProxy ) { - super(request, primary, listener, replicas, logger, threadPool, opType, primaryTerm, initialRetryBackoffBound, retryTimeout); + super( + request, + primary, + listener, + replicas, + logger, + threadPool, + opType, + primaryTerm, + initialRetryBackoffBound, + retryTimeout, + replicationProxy + ); } } diff --git a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java index 696958c340375..bde483e171d1e 100644 --- a/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java +++ b/server/src/test/java/org/opensearch/action/support/replication/TransportReplicationActionTests.java @@ -33,6 +33,12 @@ package org.opensearch.action.support.replication; import org.apache.lucene.store.AlreadyClosedException; +import org.hamcrest.Matcher; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.ActionListener; @@ -99,12 +105,6 @@ import org.opensearch.transport.TransportResponse; import org.opensearch.transport.TransportService; import org.opensearch.transport.nio.MockNioTransport; -import org.hamcrest.Matcher; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; import java.io.IOException; import java.util.Collections; @@ -121,11 +121,6 @@ import java.util.stream.Collectors; import static java.util.Collections.singleton; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; -import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; -import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; -import static org.opensearch.test.ClusterServiceUtils.createClusterService; -import static org.opensearch.test.ClusterServiceUtils.setState; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.arrayWithSize; import static org.hamcrest.Matchers.equalTo; @@ -138,12 +133,17 @@ import static org.mockito.Mockito.anyInt; import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.anyString; -import static org.mockito.Mockito.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.state; +import static org.opensearch.action.support.replication.ClusterStateCreationUtils.stateWithActivePrimary; +import static org.opensearch.cluster.metadata.IndexMetadata.SETTING_WAIT_FOR_ACTIVE_SHARDS; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; +import static org.opensearch.test.ClusterServiceUtils.setState; public class TransportReplicationActionTests extends OpenSearchTestCase { @@ -950,7 +950,8 @@ public void testSeqNoIsSetOnPrimary() { Set inSyncIds = randomBoolean() ? singleton(routingEntry.allocationId().getId()) : clusterService.state().metadata().index(index).inSyncAllocationIds(0); - ReplicationGroup replicationGroup = new ReplicationGroup(shardRoutingTable, inSyncIds, shardRoutingTable.getAllAllocationIds(), 0); + Set trackedAllocationIds = shardRoutingTable.getAllAllocationIds(); + ReplicationGroup replicationGroup = new ReplicationGroup(shardRoutingTable, inSyncIds, trackedAllocationIds, 0); when(shard.getReplicationGroup()).thenReturn(replicationGroup); PendingReplicationActions replicationActions = new PendingReplicationActions(shardId, threadPool); replicationActions.accept(replicationGroup); diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java index 3d78ea7c6b423..08cec8310db7d 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionControllerTests.java @@ -53,7 +53,6 @@ import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.sameInstance; @@ -130,47 +129,6 @@ public void shutdownThreadPoolAndClusterService() { threadPool.shutdown(); } - public void testAddNodesToVotingConfigExclusion() throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(2); - - ClusterStateObserver clusterStateObserver = new ClusterStateObserver(clusterService, null, logger, threadPool.getThreadContext()); - clusterStateObserver.waitForNextChange(new AdjustConfigurationForExclusions(countDownLatch)); - Set nodesToRemoveFromVotingConfig = Collections.singleton(randomFrom("node1", "node6", "node11")); - decommissionController.excludeDecommissionedNodesFromVotingConfig(nodesToRemoveFromVotingConfig, new ActionListener() { - @Override - public void onResponse(Void unused) { - countDownLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail("unexpected failure occurred while removing node from voting config " + e); - } - }); - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); - clusterService.getClusterApplierService().state().getVotingConfigExclusions().forEach(vce -> { - assertTrue(nodesToRemoveFromVotingConfig.contains(vce.getNodeId())); - assertEquals(nodesToRemoveFromVotingConfig.size(), 1); - }); - } - - public void testClearVotingConfigExclusions() throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - decommissionController.clearVotingConfigExclusion(new ActionListener() { - @Override - public void onResponse(Void unused) { - countDownLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail("unexpected failure occurred while clearing voting config exclusion" + e); - } - }, false); - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); - assertThat(clusterService.getClusterApplierService().state().getVotingConfigExclusions(), empty()); - } - public void testNodesRemovedForDecommissionRequestSuccessfulResponse() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); Set nodesToBeRemoved = new HashSet<>(); diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java new file mode 100644 index 0000000000000..ab2d8218ec97d --- /dev/null +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionHelperTests.java @@ -0,0 +1,142 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.cluster.decommission; + +import org.junit.BeforeClass; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.ClusterState; +import org.opensearch.cluster.coordination.CoordinationMetadata; +import org.opensearch.cluster.metadata.Metadata; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodeRole; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.test.OpenSearchTestCase; + +import java.util.Set; + +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonMap; +import static org.opensearch.cluster.decommission.DecommissionHelper.addVotingConfigExclusionsForNodesToBeDecommissioned; +import static org.opensearch.cluster.decommission.DecommissionHelper.deleteDecommissionAttributeInClusterState; +import static org.opensearch.cluster.decommission.DecommissionHelper.filterNodesWithDecommissionAttribute; +import static org.opensearch.cluster.decommission.DecommissionHelper.nodeCommissioned; +import static org.opensearch.cluster.decommission.DecommissionHelper.registerDecommissionAttributeInClusterState; + +public class DecommissionHelperTests extends OpenSearchTestCase { + + private static DiscoveryNode node1, node2, node3, dataNode; + private static ClusterState initialClusterState; + + public void testRegisterAndDeleteDecommissionAttributeInClusterState() { + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone2"); + ClusterState updatedState = registerDecommissionAttributeInClusterState(initialClusterState, decommissionAttribute); + assertEquals(decommissionAttribute, updatedState.metadata().decommissionAttributeMetadata().decommissionAttribute()); + updatedState = deleteDecommissionAttributeInClusterState(updatedState); + assertNull(updatedState.metadata().decommissionAttributeMetadata()); + } + + public void testAddVotingConfigExclusionsForNodesToBeDecommissioned() { + Set nodeIdToBeExcluded = Set.of("node2"); + ClusterState updatedState = addVotingConfigExclusionsForNodesToBeDecommissioned( + initialClusterState, + nodeIdToBeExcluded, + TimeValue.timeValueMinutes(1), + 10 + ); + CoordinationMetadata.VotingConfigExclusion v1 = new CoordinationMetadata.VotingConfigExclusion(node2); + assertTrue( + updatedState.coordinationMetadata().getVotingConfigExclusions().contains(new CoordinationMetadata.VotingConfigExclusion(node2)) + ); + assertEquals(1, updatedState.coordinationMetadata().getVotingConfigExclusions().size()); + } + + public void testFilterNodes() { + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone1"); + Set filteredNodes = filterNodesWithDecommissionAttribute(initialClusterState, decommissionAttribute, true); + assertTrue(filteredNodes.contains(node1)); + assertEquals(1, filteredNodes.size()); + filteredNodes = filterNodesWithDecommissionAttribute(initialClusterState, decommissionAttribute, false); + assertTrue(filteredNodes.contains(node1)); + assertTrue(filteredNodes.contains(dataNode)); + assertEquals(2, filteredNodes.size()); + } + + public void testNodeCommissioned() { + DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone1"); + DecommissionStatus decommissionStatus = randomFrom( + DecommissionStatus.IN_PROGRESS, + DecommissionStatus.DRAINING, + DecommissionStatus.SUCCESSFUL + ); + DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( + decommissionAttribute, + decommissionStatus + ); + Metadata metadata = Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build(); + assertTrue(nodeCommissioned(node2, metadata)); + assertFalse(nodeCommissioned(node1, metadata)); + DecommissionStatus commissionStatus = randomFrom(DecommissionStatus.FAILED, DecommissionStatus.INIT); + decommissionAttributeMetadata = new DecommissionAttributeMetadata(decommissionAttribute, commissionStatus); + metadata = Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build(); + assertTrue(nodeCommissioned(node2, metadata)); + assertTrue(nodeCommissioned(node1, metadata)); + metadata = Metadata.builder().removeCustom(DecommissionAttributeMetadata.TYPE).build(); + assertTrue(nodeCommissioned(node2, metadata)); + assertTrue(nodeCommissioned(node1, metadata)); + } + + @BeforeClass + public static void createBaseClusterState() { + node1 = makeDiscoveryNode("node1", "zone1"); + node2 = makeDiscoveryNode("node2", "zone2"); + node3 = makeDiscoveryNode("node3", "zone3"); + dataNode = new DiscoveryNode( + "data", + "data", + buildNewFakeTransportAddress(), + singletonMap("zone", "zone1"), + emptySet(), + Version.CURRENT + ); + final CoordinationMetadata.VotingConfiguration allNodesConfig = CoordinationMetadata.VotingConfiguration.of(node1, node2, node3); + initialClusterState = ClusterState.builder(new ClusterName("cluster")) + .nodes( + new DiscoveryNodes.Builder().add(node1) + .add(node2) + .add(node3) + .add(dataNode) + .localNodeId(node1.getId()) + .clusterManagerNodeId(node1.getId()) + ) + .metadata( + Metadata.builder() + .coordinationMetadata( + CoordinationMetadata.builder() + .lastAcceptedConfiguration(allNodesConfig) + .lastCommittedConfiguration(allNodesConfig) + .build() + ) + ) + .build(); + } + + private static DiscoveryNode makeDiscoveryNode(String name, String zone) { + return new DiscoveryNode( + name, + name, + buildNewFakeTransportAddress(), + singletonMap("zone", zone), + singleton(DiscoveryNodeRole.CLUSTER_MANAGER_ROLE), + Version.CURRENT + ); + } +} diff --git a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java index 7fe58d75932a1..95980991d22b0 100644 --- a/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/decommission/DecommissionServiceTests.java @@ -11,14 +11,12 @@ import org.hamcrest.Matchers; import org.junit.After; import org.junit.Before; -import org.mockito.ArgumentCaptor; import org.mockito.Mockito; import org.opensearch.Version; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.cluster.decommission.awareness.delete.DeleteDecommissionStateResponse; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionRequest; import org.opensearch.action.admin.cluster.decommission.awareness.put.DecommissionResponse; -import org.opensearch.action.admin.cluster.configuration.ClearVotingConfigExclusionsRequest; import org.opensearch.cluster.ClusterName; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.coordination.CoordinationMetadata; @@ -39,7 +37,6 @@ import org.opensearch.test.transport.MockTransport; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.TransportResponseHandler; import org.opensearch.transport.TransportService; import java.util.Collections; @@ -48,6 +45,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; @@ -313,22 +311,35 @@ public void testDrainNodesWithDecommissionedAttributeWithNoDelay() { } - public void testClearClusterDecommissionState() throws InterruptedException { + public void testRecommissionAction() throws InterruptedException { final CountDownLatch countDownLatch = new CountDownLatch(1); DecommissionAttribute decommissionAttribute = new DecommissionAttribute("zone", "zone-2"); DecommissionAttributeMetadata decommissionAttributeMetadata = new DecommissionAttributeMetadata( decommissionAttribute, DecommissionStatus.SUCCESSFUL ); - ClusterState state = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(DecommissionAttributeMetadata.TYPE, decommissionAttributeMetadata).build()) - .build(); + final ClusterState.Builder builder = builder(clusterService.state()); + setState( + clusterService, + builder.metadata( + Metadata.builder(clusterService.state().metadata()) + .decommissionAttributeMetadata(decommissionAttributeMetadata) + .coordinationMetadata( + CoordinationMetadata.builder() + .addVotingConfigExclusion( + new CoordinationMetadata.VotingConfigExclusion(clusterService.state().nodes().get("node6")) + ) + .build() + ) + .build() + ) + ); + AtomicReference clusterStateAtomicReference = new AtomicReference<>(); ActionListener listener = new ActionListener<>() { @Override public void onResponse(DeleteDecommissionStateResponse decommissionResponse) { - DecommissionAttributeMetadata metadata = clusterService.state().metadata().custom(DecommissionAttributeMetadata.TYPE); - assertNull(metadata); + clusterStateAtomicReference.set(clusterService.state()); countDownLatch.countDown(); } @@ -338,59 +349,11 @@ public void onFailure(Exception e) { countDownLatch.countDown(); } }; - - this.decommissionService.deleteDecommissionState(listener); - + this.decommissionService.startRecommissionAction(listener); // Decommission Attribute should be removed. assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); - } - - public void testDeleteDecommissionAttributeClearVotingExclusion() { - TransportService mockTransportService = Mockito.mock(TransportService.class); - Mockito.when(mockTransportService.getLocalNode()).thenReturn(Mockito.mock(DiscoveryNode.class)); - DecommissionService decommissionService = new DecommissionService( - Settings.EMPTY, - clusterSettings, - clusterService, - mockTransportService, - threadPool, - allocationService - ); - decommissionService.startRecommissionAction(Mockito.mock(ActionListener.class)); - - ArgumentCaptor clearVotingConfigExclusionsRequestArgumentCaptor = ArgumentCaptor.forClass( - ClearVotingConfigExclusionsRequest.class - ); - Mockito.verify(mockTransportService) - .sendRequest( - Mockito.any(DiscoveryNode.class), - Mockito.anyString(), - clearVotingConfigExclusionsRequestArgumentCaptor.capture(), - Mockito.any(TransportResponseHandler.class) - ); - - ClearVotingConfigExclusionsRequest request = clearVotingConfigExclusionsRequestArgumentCaptor.getValue(); - assertFalse(request.getWaitForRemoval()); - } - - public void testClusterUpdateTaskForDeletingDecommission() throws InterruptedException { - final CountDownLatch countDownLatch = new CountDownLatch(1); - ActionListener listener = new ActionListener<>() { - @Override - public void onResponse(DeleteDecommissionStateResponse response) { - assertTrue(response.isAcknowledged()); - assertNull(clusterService.state().metadata().decommissionAttributeMetadata()); - countDownLatch.countDown(); - } - - @Override - public void onFailure(Exception e) { - fail("On Failure shouldn't have been called"); - countDownLatch.countDown(); - } - }; - decommissionService.deleteDecommissionState(listener); - assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + assertNull(clusterStateAtomicReference.get().metadata().decommissionAttributeMetadata()); + assertEquals(0, clusterStateAtomicReference.get().coordinationMetadata().getVotingConfigExclusions().size()); } private void setWeightedRoutingWeights(Map weights) { diff --git a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java index 91b8703cacf5c..89d9555fe225b 100644 --- a/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java +++ b/server/src/test/java/org/opensearch/cluster/routing/WeightedRoutingServiceTests.java @@ -295,6 +295,38 @@ public void testVerifyAwarenessAttribute_ValidAttributeName() { } } + public void testAddWeightedRoutingFailsWhenWeightsNotSetForAllDiscoveredZones() throws InterruptedException { + ClusterPutWeightedRoutingRequestBuilder request = new ClusterPutWeightedRoutingRequestBuilder( + client, + ClusterAddWeightedRoutingAction.INSTANCE + ); + Map weights = Map.of("zone_A", 1.0, "zone_C", 1.0); + WeightedRouting weightedRouting = new WeightedRouting("zone", weights); + request.setWeightedRouting(weightedRouting); + final CountDownLatch countDownLatch = new CountDownLatch(1); + final AtomicReference exceptionReference = new AtomicReference<>(); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) { + exceptionReference.set(e); + countDownLatch.countDown(); + } + }; + weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); + MatcherAssert.assertThat(exceptionReference.get(), instanceOf(UnsupportedWeightedRoutingStateException.class)); + MatcherAssert.assertThat( + exceptionReference.get().getMessage(), + containsString("weight for [zone_B] is not set and it is part of forced awareness value or a node has this attribute.") + ); + } + public void testAddWeightedRoutingFailsWhenDecommissionOngoing() throws InterruptedException { Map weights = Map.of("zone_A", 1.0, "zone_B", 1.0, "zone_C", 1.0); DecommissionStatus status = randomFrom(DecommissionStatus.INIT, DecommissionStatus.IN_PROGRESS, DecommissionStatus.SUCCESSFUL); @@ -327,8 +359,11 @@ public void onFailure(Exception e) { weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); MatcherAssert.assertThat("Expected onFailure to be called", exceptionReference.get(), notNullValue()); - MatcherAssert.assertThat(exceptionReference.get(), instanceOf(IllegalStateException.class)); - MatcherAssert.assertThat(exceptionReference.get().getMessage(), containsString("a decommission action is ongoing with status")); + MatcherAssert.assertThat(exceptionReference.get(), instanceOf(UnsupportedWeightedRoutingStateException.class)); + MatcherAssert.assertThat( + exceptionReference.get().getMessage(), + containsString("weight for [zone_A] must be set to [0.0] as it is under decommission action") + ); } public void testAddWeightedRoutingPassesWhenDecommissionFailed() throws InterruptedException { @@ -362,4 +397,36 @@ public void onFailure(Exception e) {} weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); } + + public void testAddWeightedRoutingPassesWhenWeightOfDecommissionedAttributeStillZero() throws InterruptedException { + Map weights = Map.of("zone_A", 0.0, "zone_B", 1.0, "zone_C", 1.0); + DecommissionStatus status = DecommissionStatus.SUCCESSFUL; + ClusterState state = clusterService.state(); + state = setWeightedRoutingWeights(state, weights); + state = setDecommissionAttribute(state, status); + ClusterState.Builder builder = ClusterState.builder(state); + ClusterServiceUtils.setState(clusterService, builder); + + ClusterPutWeightedRoutingRequestBuilder request = new ClusterPutWeightedRoutingRequestBuilder( + client, + ClusterAddWeightedRoutingAction.INSTANCE + ); + Map updatedWeights = Map.of("zone_A", 0.0, "zone_B", 2.0, "zone_C", 1.0); + WeightedRouting updatedWeightedRouting = new WeightedRouting("zone", updatedWeights); + request.setWeightedRouting(updatedWeightedRouting); + final CountDownLatch countDownLatch = new CountDownLatch(1); + ActionListener listener = new ActionListener() { + @Override + public void onResponse(ClusterStateUpdateResponse clusterStateUpdateResponse) { + assertTrue(clusterStateUpdateResponse.isAcknowledged()); + countDownLatch.countDown(); + } + + @Override + public void onFailure(Exception e) {} + }; + weightedRoutingService.registerWeightedRoutingMetadata(request.request(), listener); + assertTrue(countDownLatch.await(30, TimeUnit.SECONDS)); + assertEquals(updatedWeightedRouting, clusterService.state().metadata().weightedRoutingMetadata().getWeightedRouting()); + } } diff --git a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java index d20fed5c37361..c5e706e50c298 100644 --- a/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java +++ b/server/src/test/java/org/opensearch/cluster/service/ClusterManagerTaskThrottlerTests.java @@ -114,6 +114,15 @@ public void testValidateSettingsForDifferentVersion() { Settings newSettings = Settings.builder().put("cluster_manager.throttling.thresholds.put-mapping.value", newLimit).build(); assertThrows(IllegalArgumentException.class, () -> throttler.validateSetting(newSettings)); + + // validate for empty setting, it shouldn't throw exception + Settings emptySettings = Settings.builder().build(); + try { + throttler.validateSetting(emptySettings); + } catch (Exception e) { + // it shouldn't throw exception + throw new AssertionError(e); + } } public void testValidateSettingsForTaskWihtoutRetryOnDataNode() { diff --git a/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java b/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java new file mode 100644 index 0000000000000..4665a8e113ff2 --- /dev/null +++ b/server/src/test/java/org/opensearch/common/bytes/ByteBuffersBytesReferenceTests.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.common.bytes; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.hamcrest.Matchers; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.function.Function; + +public class ByteBuffersBytesReferenceTests extends AbstractBytesReferenceTestCase { + @ParametersFactory + public static Collection allocator() { + return Arrays.asList( + new Object[] { (Function) ByteBuffer::allocateDirect }, + new Object[] { (Function) ByteBuffer::allocate } + ); + } + + private final Function allocator; + + public ByteBuffersBytesReferenceTests(Function allocator) { + this.allocator = allocator; + } + + @Override + protected BytesReference newBytesReference(int length) throws IOException { + return newBytesReference(length, randomInt(length)); + } + + @Override + protected BytesReference newBytesReferenceWithOffsetOfZero(int length) throws IOException { + return newBytesReference(length, 0); + } + + private BytesReference newBytesReference(int length, int offset) throws IOException { + // we know bytes stream output always creates a paged bytes reference, we use it to create randomized content + final ByteBuffer buffer = allocator.apply(length + offset); + for (int i = 0; i < length + offset; i++) { + buffer.put((byte) random().nextInt(1 << 8)); + } + assertEquals(length + offset, buffer.limit()); + buffer.flip().position(offset); + + BytesReference ref = BytesReference.fromByteBuffer(buffer); + assertEquals(length, ref.length()); + assertTrue(ref instanceof BytesArray); + assertThat(ref.length(), Matchers.equalTo(length)); + return ref; + } + + public void testArray() throws IOException { + int[] sizes = { 0, randomInt(PAGE_SIZE), PAGE_SIZE, randomIntBetween(2, PAGE_SIZE * randomIntBetween(2, 5)) }; + + for (int i = 0; i < sizes.length; i++) { + BytesArray pbr = (BytesArray) newBytesReference(sizes[i]); + byte[] array = pbr.array(); + assertNotNull(array); + assertEquals(sizes[i], array.length - pbr.offset()); + assertSame(array, pbr.array()); + } + } + + public void testArrayOffset() throws IOException { + int length = randomInt(PAGE_SIZE * randomIntBetween(2, 5)); + BytesArray pbr = (BytesArray) newBytesReferenceWithOffsetOfZero(length); + assertEquals(0, pbr.offset()); + } +} diff --git a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java index a8306107aaccc..8b53e5fe51635 100644 --- a/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java +++ b/server/src/test/java/org/opensearch/common/settings/SettingsModuleTests.java @@ -34,6 +34,7 @@ import org.opensearch.common.inject.ModuleTestCase; import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.util.FeatureFlagTests; import org.hamcrest.Matchers; import java.util.Arrays; @@ -237,4 +238,47 @@ public void testOldMaxClauseCountSetting() { ex.getMessage() ); } + + public void testDynamicNodeSettingsRegistration() { + FeatureFlagTests.enableFeature(); + Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); + SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); + assertNotNull(module.getClusterSettings().get("some.custom.setting")); + // For unregistered setting the value is expected to be null + assertNull(module.getClusterSettings().get("some.custom.setting2")); + assertInstanceBinding(module, Settings.class, (s) -> s == settings); + + assertTrue(module.registerDynamicSetting(Setting.floatSetting("some.custom.setting2", 1.0f, Property.NodeScope))); + assertNotNull(module.getClusterSettings().get("some.custom.setting2")); + // verify if some.custom.setting still exists + assertNotNull(module.getClusterSettings().get("some.custom.setting")); + + // verify exception is thrown when setting registration fails + expectThrows( + SettingsException.class, + () -> module.registerDynamicSetting(Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)) + ); + } + + public void testDynamicIndexSettingsRegistration() { + FeatureFlagTests.enableFeature(); + Settings settings = Settings.builder().put("some.custom.setting", "2.0").build(); + SettingsModule module = new SettingsModule(settings, Setting.floatSetting("some.custom.setting", 1.0f, Property.NodeScope)); + assertNotNull(module.getClusterSettings().get("some.custom.setting")); + // For unregistered setting the value is expected to be null + assertNull(module.getIndexScopedSettings().get("index.custom.setting2")); + assertInstanceBinding(module, Settings.class, (s) -> s == settings); + + assertTrue(module.registerDynamicSetting(Setting.floatSetting("index.custom.setting2", 1.0f, Property.IndexScope))); + assertNotNull(module.getIndexScopedSettings().get("index.custom.setting2")); + + // verify if some.custom.setting still exists + assertNotNull(module.getClusterSettings().get("some.custom.setting")); + + // verify exception is thrown when setting registration fails + expectThrows( + SettingsException.class, + () -> module.registerDynamicSetting(Setting.floatSetting("index.custom.setting2", 1.0f, Property.IndexScope)) + ); + } } diff --git a/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java b/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java index 71e2b322c2307..9b8eb79112953 100644 --- a/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java +++ b/server/src/test/java/org/opensearch/common/unit/FuzzinessTests.java @@ -174,6 +174,12 @@ public void testSerializationCustomAuto() throws IOException { assertNotSame(original, deserializedFuzziness); assertEquals(original, deserializedFuzziness); assertEquals(original.asString(), deserializedFuzziness.asString()); + + original = Fuzziness.customAuto(4, 7); + deserializedFuzziness = doSerializeRoundtrip(original); + assertNotSame(original, deserializedFuzziness); + assertEquals(original, deserializedFuzziness); + assertEquals(original.asString(), deserializedFuzziness.asString()); } private static Fuzziness doSerializeRoundtrip(Fuzziness in) throws IOException { @@ -205,5 +211,11 @@ public void testAsDistanceString() { assertEquals(1, fuzziness.asDistance("abcdef")); assertEquals(2, fuzziness.asDistance("abcdefg")); + fuzziness = Fuzziness.customAuto(5, 7); + assertEquals(0, fuzziness.asDistance("")); + assertEquals(0, fuzziness.asDistance("abcd")); + assertEquals(1, fuzziness.asDistance("abcde")); + assertEquals(1, fuzziness.asDistance("abcdef")); + assertEquals(2, fuzziness.asDistance("abcdefg")); } } diff --git a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java index a4f2b242564e2..05ede515e042c 100644 --- a/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java +++ b/server/src/test/java/org/opensearch/common/util/FeatureFlagTests.java @@ -22,6 +22,7 @@ public class FeatureFlagTests extends OpenSearchTestCase { public static void enableFeature() { AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REPLICATION_TYPE, "true")); AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.REMOTE_STORE, "true")); + AccessController.doPrivileged((PrivilegedAction) () -> System.setProperty(FeatureFlags.EXTENSIONS, "true")); } public void testReplicationTypeFeatureFlag() { @@ -47,4 +48,5 @@ public void testRemoteStoreFeatureFlag() { assertNotNull(System.getProperty(remoteStoreFlag)); assertTrue(FeatureFlags.isEnabled(remoteStoreFlag)); } + } diff --git a/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java new file mode 100644 index 0000000000000..cbd86378c0fac --- /dev/null +++ b/server/src/test/java/org/opensearch/extensions/ExtensionsManagerTests.java @@ -0,0 +1,418 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.extensions; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.mock; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; + +import java.io.IOException; +import java.net.InetAddress; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.security.AccessControlException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; +import org.junit.After; +import org.junit.Before; +import org.opensearch.Version; +import org.opensearch.action.admin.cluster.state.ClusterStateResponse; +import org.opensearch.cluster.ClusterSettingsResponse; +import org.opensearch.cluster.LocalNodeResponse; +import org.opensearch.cluster.metadata.IndexMetadata; +import org.opensearch.cluster.metadata.IndexNameExpressionResolver; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.io.PathUtils; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.util.FeatureFlagTests; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.env.Environment; +import org.opensearch.env.TestEnvironment; +import org.opensearch.index.IndexModule; +import org.opensearch.index.IndexSettings; +import org.opensearch.index.analysis.AnalysisRegistry; +import org.opensearch.index.engine.EngineConfigFactory; +import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.plugins.PluginInfo; +import org.opensearch.test.IndexSettingsModule; +import org.opensearch.test.MockLogAppender; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectTransportException; +import org.opensearch.transport.Transport; +import org.opensearch.transport.TransportService; +import org.opensearch.transport.nio.MockNioTransport; + +public class ExtensionsManagerTests extends OpenSearchTestCase { + + private TransportService transportService; + private ClusterService clusterService; + private MockNioTransport transport; + private final ThreadPool threadPool = new TestThreadPool(ExtensionsManagerTests.class.getSimpleName()); + private final Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) + .build(); + + @Before + public void setup() throws Exception { + FeatureFlagTests.enableFeature(); + Settings settings = Settings.builder().put("cluster.name", "test").build(); + transport = new MockNioTransport( + settings, + Version.CURRENT, + threadPool, + new NetworkService(Collections.emptyList()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(Collections.emptyList()), + new NoneCircuitBreakerService() + ); + transportService = new MockTransportService( + settings, + transport, + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + (boundAddress) -> new DiscoveryNode( + "test_node", + "test_node", + boundAddress.publishAddress(), + emptyMap(), + emptySet(), + Version.CURRENT + ), + null, + Collections.emptySet() + ); + clusterService = createClusterService(threadPool); + } + + @After + public void tearDown() throws Exception { + super.tearDown(); + transportService.close(); + ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS); + } + + public void testExtensionsDiscovery() throws Exception { + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList( + "extensions:", + " - name: firstExtension", + " uniqueId: uniqueid1", + " hostName: 'myIndependentPluginHost1'", + " hostAddress: '127.0.0.0'", + " port: '9300'", + " version: '0.0.7'", + " description: Fake description 1", + " opensearchVersion: '3.0.0'", + " javaVersion: '14'", + " className: fakeClass1", + " customFolderName: fakeFolder1", + " hasNativeController: false", + " - name: secondExtension", + " uniqueId: 'uniqueid2'", + " hostName: 'myIndependentPluginHost2'", + " hostAddress: '127.0.0.1'", + " port: '9301'", + " version: '3.14.16'", + " description: Fake description 2", + " opensearchVersion: '2.0.0'", + " javaVersion: '17'", + " className: fakeClass2", + " customFolderName: fakeFolder2", + " hasNativeController: true" + ); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + List expectedUninitializedExtensions = new ArrayList(); + + expectedUninitializedExtensions.add( + new DiscoveryExtensionNode( + "firstExtension", + "uniqueid1", + "uniqueid1", + "myIndependentPluginHost1", + "127.0.0.0", + new TransportAddress(InetAddress.getByName("127.0.0.0"), 9300), + new HashMap(), + Version.fromString("3.0.0"), + new PluginInfo( + "firstExtension", + "Fake description 1", + "0.0.7", + Version.fromString("3.0.0"), + "14", + "fakeClass1", + new ArrayList(), + false + ) + ) + ); + + expectedUninitializedExtensions.add( + new DiscoveryExtensionNode( + "secondExtension", + "uniqueid2", + "uniqueid2", + "myIndependentPluginHost2", + "127.0.0.1", + new TransportAddress(TransportAddress.META_ADDRESS, 9301), + new HashMap(), + Version.fromString("2.0.0"), + new PluginInfo( + "secondExtension", + "Fake description 2", + "3.14.16", + Version.fromString("2.0.0"), + "17", + "fakeClass2", + new ArrayList(), + true + ) + ) + ); + assertEquals(expectedUninitializedExtensions, extensionsManager.getUninitializedExtensions()); + } + + public void testNonAccessibleDirectory() throws Exception { + AccessControlException e = expectThrows( + + AccessControlException.class, + () -> new ExtensionsManager(settings, PathUtils.get("")) + ); + assertEquals("access denied (\"java.io.FilePermission\" \"\" \"read\")", e.getMessage()); + } + + public void testNoExtensionsFile() throws Exception { + Path extensionDir = createTempDir(); + + Settings settings = Settings.builder().build(); + + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "No Extensions File Present", + "org.opensearch.extensions.ExtensionsManager", + Level.INFO, + "Extensions.yml file is not present. No extensions will be loaded." + ) + ); + + new ExtensionsManager(settings, extensionDir); + + mockLogAppender.assertAllExpectationsMatched(); + } + } + + public void testEmptyExtensionsFile() throws Exception { + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList(); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + Settings settings = Settings.builder().build(); + + expectThrows(IOException.class, () -> new ExtensionsManager(settings, extensionDir)); + } + + public void testInitialize() throws Exception { + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList( + "extensions:", + " - name: firstExtension", + " uniqueId: uniqueid1", + " hostName: 'myIndependentPluginHost1'", + " hostAddress: '127.0.0.0'", + " port: '9300'", + " version: '0.0.7'", + " description: Fake description 1", + " opensearchVersion: '3.0.0'", + " javaVersion: '14'", + " className: fakeClass1", + " customFolderName: fakeFolder1", + " hasNativeController: false", + " - name: secondExtension", + " uniqueId: 'uniqueid2'", + " hostName: 'myIndependentPluginHost2'", + " hostAddress: '127.0.0.1'", + " port: '9301'", + " version: '3.14.16'", + " description: Fake description 2", + " opensearchVersion: '2.0.0'", + " javaVersion: '17'", + " className: fakeClass2", + " customFolderName: fakeFolder2", + " hasNativeController: true" + ); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + transportService.start(); + transportService.acceptIncomingRequests(); + extensionsManager.setTransportService(transportService); + + expectThrows(ConnectTransportException.class, () -> extensionsManager.initialize()); + + } + + public void testHandleExtensionRequest() throws Exception { + + Path extensionDir = createTempDir(); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + extensionsManager.setTransportService(transportService); + extensionsManager.setClusterService(clusterService); + ExtensionRequest clusterStateRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_CLUSTER_STATE); + assertEquals(extensionsManager.handleExtensionRequest(clusterStateRequest).getClass(), ClusterStateResponse.class); + + ExtensionRequest clusterSettingRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_CLUSTER_SETTINGS); + assertEquals(extensionsManager.handleExtensionRequest(clusterSettingRequest).getClass(), ClusterSettingsResponse.class); + + ExtensionRequest localNodeRequest = new ExtensionRequest(ExtensionsManager.RequestType.REQUEST_EXTENSION_LOCAL_NODE); + assertEquals(extensionsManager.handleExtensionRequest(localNodeRequest).getClass(), LocalNodeResponse.class); + + ExtensionRequest exceptionRequest = new ExtensionRequest(ExtensionsManager.RequestType.GET_SETTINGS); + Exception exception = expectThrows(IllegalStateException.class, () -> extensionsManager.handleExtensionRequest(exceptionRequest)); + assertEquals(exception.getMessage(), "Handler not present for the provided request: " + exceptionRequest.getRequestType()); + } + + public void testRegisterHandler() throws Exception { + Path extensionDir = createTempDir(); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + TransportService mockTransportService = spy( + new TransportService( + Settings.EMPTY, + mock(Transport.class), + null, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + x -> null, + null, + Collections.emptySet() + ) + ); + + extensionsManager.setTransportService(mockTransportService); + verify(mockTransportService, times(3)).registerRequestHandler(anyString(), anyString(), anyBoolean(), anyBoolean(), any(), any()); + + } + + public void testOnIndexModule() throws Exception { + + Path extensionDir = createTempDir(); + + List extensionsYmlLines = Arrays.asList( + "extensions:", + " - name: firstExtension", + " uniqueId: uniqueid1", + " hostName: 'myIndependentPluginHost1'", + " hostAddress: '127.0.0.0'", + " port: '9300'", + " version: '0.0.7'", + " description: Fake description 1", + " opensearchVersion: '3.0.0'", + " javaVersion: '14'", + " className: fakeClass1", + " customFolderName: fakeFolder1", + " hasNativeController: false", + " - name: secondExtension", + " uniqueId: 'uniqueid2'", + " hostName: 'myIndependentPluginHost2'", + " hostAddress: '127.0.0.1'", + " port: '9301'", + " version: '3.14.16'", + " description: Fake description 2", + " opensearchVersion: '2.0.0'", + " javaVersion: '17'", + " className: fakeClass2", + " customFolderName: fakeFolder2", + " hasNativeController: true" + ); + Files.write(extensionDir.resolve("extensions.yml"), extensionsYmlLines, StandardCharsets.UTF_8); + + ExtensionsManager extensionsManager = new ExtensionsManager(settings, extensionDir); + + transportService.start(); + transportService.acceptIncomingRequests(); + extensionsManager.setTransportService(transportService); + + Environment environment = TestEnvironment.newEnvironment(settings); + AnalysisRegistry emptyAnalysisRegistry = new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ); + + IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test_index", settings); + IndexModule indexModule = new IndexModule( + indexSettings, + emptyAnalysisRegistry, + new InternalEngineFactory(), + new EngineConfigFactory(indexSettings), + Collections.emptyMap(), + () -> true, + new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), + Collections.emptyMap() + ); + + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(ExtensionsManager.class))) { + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "IndicesModuleRequest Failure", + "org.opensearch.extensions.ExtensionsManager", + Level.ERROR, + "IndicesModuleRequest failed" + ) + ); + + extensionsManager.onIndexModule(indexModule); + mockLogAppender.assertAllExpectationsMatched(); + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java index faab2f405010a..8757458e3317e 100644 --- a/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java +++ b/server/src/test/java/org/opensearch/index/ShardIndexingPressureConcurrentExecutionTests.java @@ -269,7 +269,13 @@ public void testCoordinatingPrimaryThreadedUpdateToShardLimitsAndRejections() th nodeStats = shardIndexingPressure.stats(); IndexingPressurePerShardStats shardStoreStats = shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1); - assertNull(shardStoreStats); + // If rejection count equals NUM_THREADS that means rejections happened until the last request, then we'll get shardStoreStats which + // was updated on the last request. In other cases, the shardStoreStats simply moves to the cold store and null is returned. + if (rejectionCount.get() == NUM_THREADS) { + assertEquals(10, shardStoreStats.getCurrentPrimaryAndCoordinatingLimits()); + } else { + assertNull(shardStoreStats); + } shardStats = shardIndexingPressure.coldStats(); if (randomBoolean) { assertEquals(rejectionCount.get(), nodeStats.getCoordinatingRejections()); @@ -331,7 +337,13 @@ public void testReplicaThreadedUpdateToShardLimitsAndRejections() throws Excepti assertEquals(0, nodeStats.getCurrentReplicaBytes()); IndexingPressurePerShardStats shardStoreStats = shardIndexingPressure.shardStats().getIndexingPressureShardStats(shardId1); - assertNull(shardStoreStats); + // If rejection count equals NUM_THREADS that means rejections happened until the last request, then we'll get shardStoreStats which + // was updated on the last request. In other cases, the shardStoreStats simply moves to the cold store and null is returned. + if (rejectionCount.get() == NUM_THREADS) { + assertEquals(15, shardStoreStats.getCurrentReplicaLimits()); + } else { + assertNull(shardStoreStats); + } shardStats = shardIndexingPressure.coldStats(); assertEquals(rejectionCount.get(), shardStats.getIndexingPressureShardStats(shardId1).getReplicaNodeLimitsBreachedRejections()); diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 0a6338333bffc..bc50525412954 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -65,21 +65,21 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene94Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene95Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService().codec("default"); - assertStoredFieldsCompressionEquals(Lucene94Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService().codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene94Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene95Codec.Mode.BEST_COMPRESSION, codec); } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene94Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene95Codec.Mode expected, Codec actual) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(null); iwc.setCodec(actual); @@ -91,7 +91,7 @@ private void assertStoredFieldsCompressionEquals(Lucene94Codec.Mode expected, Co SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene94Codec.Mode.valueOf(v)); + assertEquals(expected, Lucene95Codec.Mode.valueOf(v)); ir.close(); dir.close(); } diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index 575997dc2609e..d960fa910fde6 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,7 +32,7 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene94.Lucene94Codec; +import org.apache.lucene.codecs.lucene95.Lucene95Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -70,7 +70,7 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene94Codec() { + indexWriterConfig.setCodec(new Lucene95Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java index 8a6b09b1ea925..0e3fd0efe7804 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchBoolPrefixQueryBuilderTests.java @@ -179,6 +179,11 @@ public void testIllegalValues() { } } + public void testDefaultFuzziness() { + MatchBoolPrefixQueryBuilder matchBoolPrefixQueryBuilder = new MatchBoolPrefixQueryBuilder(TEXT_FIELD_NAME, "text").fuzziness(null); + assertNull(matchBoolPrefixQueryBuilder.fuzziness()); + } + public void testFromSimpleJson() throws IOException { final String simple = "{" + "\"match_bool_prefix\": {" + "\"fieldName\": \"fieldValue\"" + "}" + "}"; final String expected = "{" diff --git a/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java index d6cd157d6f84e..2e18b1829d542 100644 --- a/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MatchQueryBuilderTests.java @@ -326,6 +326,11 @@ public void testFuzzinessOnNonStringField() throws Exception { query.toQuery(context); // no exception } + public void testDefaultFuzziness() { + MatchQueryBuilder matchQueryBuilder = new MatchQueryBuilder("text", TEXT_FIELD_NAME).fuzziness(null); + assertNull(matchQueryBuilder.fuzziness()); + } + public void testExactOnUnsupportedField() throws Exception { MatchQueryBuilder query = new MatchQueryBuilder(GEO_POINT_FIELD_NAME, "2,3"); QueryShardContext context = createShardContext(); diff --git a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java index ec04ee8fd3d6d..735bb4d47fe47 100644 --- a/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/MultiMatchQueryBuilderTests.java @@ -365,6 +365,11 @@ public void testFuzzinessNotAllowedTypes() throws IOException { } } + public void testDefaultFuzziness() { + MultiMatchQueryBuilder multiMatchQueryBuilder = new MultiMatchQueryBuilder("text", TEXT_FIELD_NAME).fuzziness(null); + assertNull(multiMatchQueryBuilder.fuzziness()); + } + public void testQueryParameterArrayException() { String json = "{\n" + " \"multi_match\" : {\n" diff --git a/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java index 48b309ea4eca3..8f4f70e96e2b4 100644 --- a/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/PrefixQueryBuilderTests.java @@ -130,7 +130,7 @@ public void testNumeric() throws Exception { QueryShardContext context = createShardContext(); QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); assertEquals( - "Can only use prefix queries on keyword, text and wildcard fields - not on [mapped_int] which is of type [integer]", + "Can only use prefix queries on keyword and text fields - not on [mapped_int] which is of type [integer]", e.getMessage() ); } diff --git a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java index 393d4cb3f2121..ad39cd831a30c 100644 --- a/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java +++ b/server/src/test/java/org/opensearch/index/query/QueryStringQueryBuilderTests.java @@ -593,7 +593,7 @@ public void testToQueryWildcardWithIndexedPrefixes() throws Exception { assertThat(query, equalTo(expectedQuery)); } - public void testToQueryWilcardQueryWithSynonyms() throws Exception { + public void testToQueryWildcardQueryWithSynonyms() throws Exception { for (Operator op : Operator.values()) { BooleanClause.Occur defaultOp = op.toBooleanClauseOccur(); QueryStringQueryParser queryParser = new QueryStringQueryParser(createShardContext(), TEXT_FIELD_NAME); @@ -803,7 +803,7 @@ public void testToQueryRegExpQueryMaxDeterminizedStatesParsing() throws Exceptio assertThat(e.getMessage(), containsString("would require more than 10 effort")); } - public void testToQueryFuzzyQueryAutoFuziness() throws Exception { + public void testToQueryFuzzyQueryAutoFuzziness() throws Exception { for (int i = 0; i < 3; i++) { final int len; final int expectedEdits; @@ -828,7 +828,6 @@ public void testToQueryFuzzyQueryAutoFuziness() throws Exception { String queryString = new String(bytes); for (int j = 0; j < 2; j++) { Query query = queryStringQuery(queryString + (j == 0 ? "~" : "~auto")).defaultField(TEXT_FIELD_NAME) - .fuzziness(Fuzziness.AUTO) .toQuery(createShardContext()); assertThat(query, instanceOf(FuzzyQuery.class)); FuzzyQuery fuzzyQuery = (FuzzyQuery) query; @@ -868,12 +867,17 @@ public void testFuzzyNumeric() throws Exception { query.toQuery(context); // no exception } + public void testDefaultFuzziness() { + QueryStringQueryBuilder queryStringQueryBuilder = new QueryStringQueryBuilder(TEXT_FIELD_NAME).fuzziness(null); + assertNull(queryStringQueryBuilder.fuzziness()); + } + public void testPrefixNumeric() throws Exception { QueryStringQueryBuilder query = queryStringQuery("12*").defaultField(INT_FIELD_NAME); QueryShardContext context = createShardContext(); QueryShardException e = expectThrows(QueryShardException.class, () -> query.toQuery(context)); assertEquals( - "Can only use prefix queries on keyword, text and wildcard fields - not on [mapped_int] which is of type [integer]", + "Can only use prefix queries on keyword and text fields - not on [mapped_int] which is of type [integer]", e.getMessage() ); query.lenient(true); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java index 34a2d1189d234..17a6bfc8fbd82 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTestCase.java @@ -43,6 +43,7 @@ import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.IndexSettingsModule; +import java.util.Collections; import java.util.Set; import java.util.function.LongConsumer; import java.util.function.LongSupplier; @@ -55,12 +56,13 @@ public abstract class ReplicationTrackerTestCase extends OpenSearchTestCase { ReplicationTracker newTracker( final AllocationId allocationId, final LongConsumer updatedGlobalCheckpoint, - final LongSupplier currentTimeMillisSupplier + final LongSupplier currentTimeMillisSupplier, + final Settings settings ) { return new ReplicationTracker( new ShardId("test", "_na_", 0), allocationId.getId(), - IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), + IndexSettingsModule.newIndexSettings("test", settings), randomNonNegativeLong(), UNASSIGNED_SEQ_NO, updatedGlobalCheckpoint, @@ -70,6 +72,14 @@ ReplicationTracker newTracker( ); } + ReplicationTracker newTracker( + final AllocationId allocationId, + final LongConsumer updatedGlobalCheckpoint, + final LongSupplier currentTimeMillisSupplier + ) { + return newTracker(allocationId, updatedGlobalCheckpoint, currentTimeMillisSupplier, Settings.EMPTY); + } + static final Supplier OPS_BASED_RECOVERY_ALWAYS_REASONABLE = () -> SafeCommitInfo.EMPTY; static String nodeIdFromAllocationId(final AllocationId allocationId) { @@ -77,6 +87,14 @@ static String nodeIdFromAllocationId(final AllocationId allocationId) { } static IndexShardRoutingTable routingTable(final Set initializingIds, final AllocationId primaryId) { + return routingTable(initializingIds, Collections.singleton(primaryId), primaryId); + } + + static IndexShardRoutingTable routingTable( + final Set initializingIds, + final Set activeIds, + final AllocationId primaryId + ) { final ShardId shardId = new ShardId("test", "_na_", 0); final ShardRouting primaryShard = TestShardRouting.newShardRouting( shardId, @@ -86,11 +104,17 @@ static IndexShardRoutingTable routingTable(final Set initializingI ShardRoutingState.STARTED, primaryId ); - return routingTable(initializingIds, primaryShard); + return routingTable(initializingIds, activeIds, primaryShard); } - static IndexShardRoutingTable routingTable(final Set initializingIds, final ShardRouting primaryShard) { + static IndexShardRoutingTable routingTable( + final Set initializingIds, + final Set activeIds, + final ShardRouting primaryShard + ) { + assert initializingIds != null && activeIds != null; assert !initializingIds.contains(primaryShard.allocationId()); + assert activeIds.contains(primaryShard.allocationId()); final ShardId shardId = new ShardId("test", "_na_", 0); final IndexShardRoutingTable.Builder builder = new IndexShardRoutingTable.Builder(shardId); for (final AllocationId initializingId : initializingIds) { @@ -105,6 +129,21 @@ static IndexShardRoutingTable routingTable(final Set initializingI ) ); } + for (final AllocationId activeId : activeIds) { + if (activeId.equals(primaryShard.allocationId())) { + continue; + } + builder.addShard( + TestShardRouting.newShardRouting( + shardId, + nodeIdFromAllocationId(activeId), + null, + false, + ShardRoutingState.STARTED, + activeId + ) + ); + } builder.addShard(primaryShard); diff --git a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java index 66c484cd40cce..8ea64e71fb9dc 100644 --- a/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/ReplicationTrackerTests.java @@ -437,6 +437,10 @@ public void testWaitForAllocationIdToBeInSync() throws Exception { private AtomicLong updatedGlobalCheckpoint = new AtomicLong(UNASSIGNED_SEQ_NO); + private ReplicationTracker newTracker(final AllocationId allocationId, Settings settings) { + return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L, settings); + } + private ReplicationTracker newTracker(final AllocationId allocationId) { return newTracker(allocationId, updatedGlobalCheckpoint::set, () -> 0L); } @@ -966,7 +970,11 @@ private static FakeClusterState initialState() { relocatingId ); - return new FakeClusterState(initialClusterStateVersion, activeAllocationIds, routingTable(initializingAllocationIds, primaryShard)); + return new FakeClusterState( + initialClusterStateVersion, + activeAllocationIds, + routingTable(initializingAllocationIds, Collections.singleton(primaryShard.allocationId()), primaryShard) + ); } private static void randomLocalCheckpointUpdate(ReplicationTracker gcp) { @@ -1007,6 +1015,7 @@ private static FakeClusterState randomUpdateClusterState(Set allocationI remainingInSyncIds.isEmpty() ? clusterState.inSyncIds : remainingInSyncIds, routingTable( Sets.difference(Sets.union(initializingIdsExceptRelocationTargets, initializingIdsToAdd), initializingIdsToRemove), + Collections.singleton(clusterState.routingTable.primaryShard().allocationId()), clusterState.routingTable.primaryShard() ) ); @@ -1046,9 +1055,20 @@ private static void markAsTrackingAndInSyncQuietly( final ReplicationTracker tracker, final String allocationId, final long localCheckpoint + ) { + markAsTrackingAndInSyncQuietly(tracker, allocationId, localCheckpoint, true); + } + + private static void markAsTrackingAndInSyncQuietly( + final ReplicationTracker tracker, + final String allocationId, + final long localCheckpoint, + final boolean addPRRL ) { try { - addPeerRecoveryRetentionLease(tracker, allocationId); + if (addPRRL) { + addPeerRecoveryRetentionLease(tracker, allocationId); + } tracker.initiateTracking(allocationId); tracker.markAllocationIdAsInSync(allocationId, localCheckpoint); } catch (final InterruptedException e) { @@ -1252,4 +1272,695 @@ public void testPeerRecoveryRetentionLeaseCreationAndRenewal() { ); } + /** + * This test checks that the global checkpoint update mechanism is honored and relies only on the shards that have + * translog stored locally. + */ + public void testGlobalCheckpointUpdateWithRemoteTranslogEnabled() { + final long initialClusterStateVersion = randomNonNegativeLong(); + Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(1, 5); + Set active = new HashSet<>(activeWithCheckpoints.keySet()); + Map allocations = new HashMap<>(activeWithCheckpoints); + Map initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5); + Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); + allocations.putAll(initializingWithCheckpoints); + assertThat(allocations.size(), equalTo(active.size() + initializing.size())); + + final AllocationId primaryId = active.iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + + long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); + + logger.info("--> using allocations"); + allocations.keySet().forEach(aId -> { + final String type; + if (active.contains(aId)) { + type = "active"; + } else if (initializing.contains(aId)) { + type = "init"; + } else { + throw new IllegalStateException(aId + " not found in any map"); + } + logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); + }); + + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1)); + initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED, false)); + assertThat(tracker.getReplicationGroup().getReplicationTargets().size(), equalTo(1 + initializing.size())); + Set replicationTargets = tracker.getReplicationGroup() + .getReplicationTargets() + .stream() + .map(ShardRouting::allocationId) + .collect(Collectors.toSet()); + assertTrue(replicationTargets.containsAll(initializing)); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + + // increment checkpoints + active.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + initializing.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + final long minLocalCheckpointAfterUpdates = allocations.values().stream().min(Long::compareTo).orElse(UNASSIGNED_SEQ_NO); + + // now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested. + final AllocationId extraId = AllocationId.newInitializing(); + + // first check that adding it without the cluster-manager blessing doesn't change anything. + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + assertNull(tracker.checkpoints.get(extraId.getId())); + expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); + + Set newInitializing = new HashSet<>(initializing); + newInitializing.add(extraId); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); + + tracker.initiateTracking(extraId.getId()); + + // now notify for the new id + if (randomBoolean()) { + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), randomInt((int) minLocalCheckpointAfterUpdates), false); + } else { + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4), false); + } + } + + public void testUpdateFromClusterManagerWithRemoteTranslogEnabled() { + final long initialClusterStateVersion = randomNonNegativeLong(); + Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(2, 5); + Set active = new HashSet<>(activeWithCheckpoints.keySet()); + Map allocations = new HashMap<>(activeWithCheckpoints); + Map initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(0, 5); + Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); + allocations.putAll(initializingWithCheckpoints); + assertThat(allocations.size(), equalTo(active.size() + initializing.size())); + + final AllocationId primaryId = active.iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + assertThat(tracker.getGlobalCheckpoint(), equalTo(UNASSIGNED_SEQ_NO)); + + long primaryLocalCheckpoint = activeWithCheckpoints.get(primaryId); + + logger.info("--> using allocations"); + allocations.keySet().forEach(aId -> { + final String type; + if (active.contains(aId)) { + type = "active"; + } else if (initializing.contains(aId)) { + type = "init"; + } else { + throw new IllegalStateException(aId + " not found in any map"); + } + logger.info(" - [{}], local checkpoint [{}], [{}]", aId, allocations.get(aId), type); + }); + + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, active, primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertEquals(tracker.getReplicationGroup().getReplicationTargets().size(), active.size()); + initializing.forEach(aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED, false)); + assertEquals(tracker.getReplicationGroup().getReplicationTargets().size(), active.size() + initializing.size()); + Set replicationTargets = tracker.getReplicationGroup() + .getReplicationTargets() + .stream() + .map(ShardRouting::allocationId) + .collect(Collectors.toSet()); + assertTrue(replicationTargets.containsAll(initializing)); + assertTrue(replicationTargets.containsAll(active)); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + + // increment checkpoints + active.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + initializing.forEach(aId -> allocations.put(aId, allocations.get(aId) + 1 + randomInt(4))); + allocations.keySet().forEach(aId -> updateLocalCheckpoint(tracker, aId.getId(), allocations.get(aId))); + + final long minLocalCheckpointAfterUpdates = allocations.values().stream().min(Long::compareTo).orElse(UNASSIGNED_SEQ_NO); + + // now insert an unknown active/insync id , the checkpoint shouldn't change but a refresh should be requested. + final AllocationId extraId = AllocationId.newInitializing(); + + // first check that adding it without the cluster-manager blessing doesn't change anything. + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + assertNull(tracker.checkpoints.get(extraId.getId())); + expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(extraId.getId())); + + Set newInitializing = new HashSet<>(initializing); + newInitializing.add(extraId); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(active), routingTable(newInitializing, primaryId)); + + tracker.initiateTracking(extraId.getId()); + + // now notify for the new id + if (randomBoolean()) { + updateLocalCheckpoint(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4)); + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), randomInt((int) minLocalCheckpointAfterUpdates), false); + } else { + markAsTrackingAndInSyncQuietly(tracker, extraId.getId(), minLocalCheckpointAfterUpdates + 1 + randomInt(4), false); + } + } + + /** + * This test checks that updateGlobalCheckpointOnReplica with remote translog does not violate any of the invariants + */ + public void testUpdateGlobalCheckpointOnReplicaWithRemoteTranslogEnabled() { + final AllocationId active = AllocationId.newInitializing(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(active, settings); + final long globalCheckpoint = randomLongBetween(NO_OPS_PERFORMED, Long.MAX_VALUE - 1); + tracker.updateGlobalCheckpointOnReplica(globalCheckpoint, "test"); + assertEquals(updatedGlobalCheckpoint.get(), globalCheckpoint); + final long nonUpdate = randomLongBetween(NO_OPS_PERFORMED, globalCheckpoint); + updatedGlobalCheckpoint.set(UNASSIGNED_SEQ_NO); + tracker.updateGlobalCheckpointOnReplica(nonUpdate, "test"); + assertEquals(updatedGlobalCheckpoint.get(), UNASSIGNED_SEQ_NO); + final long update = randomLongBetween(globalCheckpoint, Long.MAX_VALUE); + tracker.updateGlobalCheckpointOnReplica(update, "test"); + assertEquals(updatedGlobalCheckpoint.get(), update); + } + + public void testMarkAllocationIdAsInSyncWithRemoteTranslogEnabled() throws Exception { + final long initialClusterStateVersion = randomNonNegativeLong(); + Map activeWithCheckpoints = randomAllocationsWithLocalCheckpoints(1, 1); + Set active = new HashSet<>(activeWithCheckpoints.keySet()); + Map initializingWithCheckpoints = randomAllocationsWithLocalCheckpoints(1, 1); + Set initializing = new HashSet<>(initializingWithCheckpoints.keySet()); + final AllocationId primaryId = active.iterator().next(); + final AllocationId replicaId = initializing.iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + final long localCheckpoint = randomLongBetween(0, Long.MAX_VALUE - 1); + tracker.activatePrimaryMode(localCheckpoint); + tracker.initiateTracking(replicaId.getId()); + tracker.markAllocationIdAsInSync(replicaId.getId(), randomLongBetween(NO_OPS_PERFORMED, localCheckpoint - 1)); + assertFalse(tracker.pendingInSync()); + final long updatedLocalCheckpoint = randomLongBetween(1 + localCheckpoint, Long.MAX_VALUE); + updatedGlobalCheckpoint.set(UNASSIGNED_SEQ_NO); + tracker.updateLocalCheckpoint(primaryId.getId(), updatedLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), updatedLocalCheckpoint); + tracker.updateLocalCheckpoint(replicaId.getId(), localCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), updatedLocalCheckpoint); + tracker.markAllocationIdAsInSync(replicaId.getId(), updatedLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), updatedLocalCheckpoint); + } + + public void testMissingActiveIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() { + final Map active = randomAllocationsWithLocalCheckpoints(2, 5); + final Map initializing = randomAllocationsWithLocalCheckpoints(0, 5); + final Map assigned = new HashMap<>(); + assigned.putAll(active); + assigned.putAll(initializing); + AllocationId primaryId = active.keySet().iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + List initializingRandomSubset = randomSubsetOf(initializing.keySet()); + initializingRandomSubset.forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + final AllocationId missingActiveID = randomFrom(active.keySet()); + assigned.entrySet() + .stream() + .filter(e -> !e.getKey().equals(missingActiveID)) + .forEach(e -> updateLocalCheckpoint(tracker, e.getKey().getId(), e.getValue())); + long primaryLocalCheckpoint = active.get(primaryId); + + assertEquals(1 + initializingRandomSubset.size(), tracker.getReplicationGroup().getReplicationTargets().size()); + if (missingActiveID.equals(primaryId) == false) { + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), primaryLocalCheckpoint); + } + // now update all knowledge of all shards + assigned.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), 10 + localCP)); + assertEquals(tracker.getGlobalCheckpoint(), 10 + primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), 10 + primaryLocalCheckpoint); + } + + public void testMissingInSyncIdsDoesNotPreventAdvanceWithRemoteTranslogEnabled() { + final Map active = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializing = randomAllocationsWithLocalCheckpoints(2, 5); + logger.info("active: {}, initializing: {}", active, initializing); + + AllocationId primaryId = active.keySet().iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + randomSubsetOf(randomIntBetween(1, initializing.size() - 1), initializing.keySet()).forEach( + aId -> markAsTrackingAndInSyncQuietly(tracker, aId.getId(), NO_OPS_PERFORMED) + ); + long primaryLocalCheckpoint = active.get(primaryId); + + active.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP)); + + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), primaryLocalCheckpoint); + + // update again + initializing.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP)); + assertEquals(tracker.getGlobalCheckpoint(), primaryLocalCheckpoint); + assertEquals(updatedGlobalCheckpoint.get(), primaryLocalCheckpoint); + } + + public void testInSyncIdsAreIgnoredIfNotValidatedByClusterManagerWithRemoteTranslogEnabled() { + final Map active = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializing = randomAllocationsWithLocalCheckpoints(1, 5); + final Map nonApproved = randomAllocationsWithLocalCheckpoints(1, 5); + final AllocationId primaryId = active.keySet().iterator().next(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(randomNonNegativeLong(), ids(active.keySet()), routingTable(initializing.keySet(), primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + initializing.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + nonApproved.keySet() + .forEach( + k -> expectThrows(IllegalStateException.class, () -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)) + ); + + List> allocations = Arrays.asList(active, initializing, nonApproved); + Collections.shuffle(allocations, random()); + allocations.forEach(a -> a.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP))); + + assertNotEquals(UNASSIGNED_SEQ_NO, tracker.getGlobalCheckpoint()); + } + + public void testInSyncIdsAreRemovedIfNotValidatedByClusterManagerWithRemoteTranslogEnabled() { + final long initialClusterStateVersion = randomNonNegativeLong(); + final Map activeToStay = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializingToStay = randomAllocationsWithLocalCheckpoints(1, 5); + final Map activeToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5); + final Map initializingToBeRemoved = randomAllocationsWithLocalCheckpoints(1, 5); + final Set active = Sets.union(activeToStay.keySet(), activeToBeRemoved.keySet()); + final Set initializing = Sets.union(initializingToStay.keySet(), initializingToBeRemoved.keySet()); + final Map allocations = new HashMap<>(); + final AllocationId primaryId = active.iterator().next(); + if (activeToBeRemoved.containsKey(primaryId)) { + activeToStay.put(primaryId, activeToBeRemoved.remove(primaryId)); + } + allocations.putAll(activeToStay); + if (randomBoolean()) { + allocations.putAll(activeToBeRemoved); + } + allocations.putAll(initializingToStay); + if (randomBoolean()) { + allocations.putAll(initializingToBeRemoved); + } + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(active), routingTable(initializing, primaryId)); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + if (randomBoolean()) { + initializingToStay.keySet().forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + } else { + initializing.forEach(k -> markAsTrackingAndInSyncQuietly(tracker, k.getId(), NO_OPS_PERFORMED)); + } + if (randomBoolean()) { + allocations.forEach((aid, localCP) -> updateLocalCheckpoint(tracker, aid.getId(), localCP)); + } + + // now remove shards + if (randomBoolean()) { + tracker.updateFromClusterManager( + initialClusterStateVersion + 1, + ids(activeToStay.keySet()), + routingTable(initializingToStay.keySet(), primaryId) + ); + allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); + } else { + allocations.forEach((aid, ckp) -> updateLocalCheckpoint(tracker, aid.getId(), ckp + 10L)); + tracker.updateFromClusterManager( + initialClusterStateVersion + 2, + ids(activeToStay.keySet()), + routingTable(initializingToStay.keySet(), primaryId) + ); + } + + final long checkpoint = activeToStay.get(primaryId) + 10; + assertEquals(tracker.getGlobalCheckpoint(), checkpoint); + } + + public void testUpdateAllocationIdsFromClusterManagerWithRemoteTranslogEnabled() throws Exception { + final long initialClusterStateVersion = randomNonNegativeLong(); + final int numberOfActiveAllocationsIds = randomIntBetween(2, 16); + final int numberOfInitializingIds = randomIntBetween(2, 16); + final Tuple, Set> activeAndInitializingAllocationIds = randomActiveAndInitializingAllocationIds( + numberOfActiveAllocationsIds, + numberOfInitializingIds + ); + final Set activeAllocationIds = activeAndInitializingAllocationIds.v1(); + final Set initializingIds = activeAndInitializingAllocationIds.v2(); + AllocationId primaryId = activeAllocationIds.iterator().next(); + IndexShardRoutingTable routingTable = routingTable(initializingIds, primaryId); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(primaryId, settings); + tracker.updateFromClusterManager(initialClusterStateVersion, ids(activeAllocationIds), routingTable); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + assertThat(tracker.getReplicationGroup().getInSyncAllocationIds(), equalTo(ids(activeAllocationIds))); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + + // first we assert that the in-sync and tracking sets are set up correctly + assertTrue(activeAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + activeAllocationIds.stream() + .filter(a -> a.equals(primaryId) == false) + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + assertTrue(initializingIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + initializingIds.stream() + .filter(a -> a.equals(primaryId) == false) + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + + // now we will remove some allocation IDs from these and ensure that they propagate through + final Set removingActiveAllocationIds = new HashSet<>(randomSubsetOf(activeAllocationIds)); + removingActiveAllocationIds.remove(primaryId); + final Set newActiveAllocationIds = activeAllocationIds.stream() + .filter(a -> !removingActiveAllocationIds.contains(a)) + .collect(Collectors.toSet()); + final List removingInitializingAllocationIds = randomSubsetOf(initializingIds); + final Set newInitializingAllocationIds = initializingIds.stream() + .filter(a -> !removingInitializingAllocationIds.contains(a)) + .collect(Collectors.toSet()); + routingTable = routingTable(newInitializingAllocationIds, primaryId); + tracker.updateFromClusterManager(initialClusterStateVersion + 1, ids(newActiveAllocationIds), routingTable); + assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue(removingActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); + assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue(removingInitializingAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()) == null)); + assertThat( + tracker.getReplicationGroup().getInSyncAllocationIds(), + equalTo(ids(Sets.difference(Sets.union(activeAllocationIds, newActiveAllocationIds), removingActiveAllocationIds))) + ); + assertThat(tracker.getReplicationGroup().getRoutingTable(), equalTo(routingTable)); + + /* + * Now we will add an allocation ID to each of active and initializing and ensure they propagate through. Using different lengths + * than we have been using above ensures that we can not collide with a previous allocation ID + */ + newInitializingAllocationIds.add(AllocationId.newInitializing()); + tracker.updateFromClusterManager( + initialClusterStateVersion + 2, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId) + ); + assertTrue(newActiveAllocationIds.stream().allMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + newActiveAllocationIds.stream() + .filter(a -> a.equals(primaryId) == false) + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + assertTrue(newInitializingAllocationIds.stream().noneMatch(a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).inSync)); + assertTrue( + newInitializingAllocationIds.stream() + .allMatch( + a -> tracker.getTrackedLocalCheckpointForShard(a.getId()).getLocalCheckpoint() == SequenceNumbers.UNASSIGNED_SEQ_NO + ) + ); + + // the tracking allocation IDs should play no role in determining the global checkpoint + final Map activeLocalCheckpoints = newActiveAllocationIds.stream() + .collect(Collectors.toMap(Function.identity(), a -> randomIntBetween(1, 1024))); + activeLocalCheckpoints.forEach((a, l) -> updateLocalCheckpoint(tracker, a.getId(), l)); + final Map initializingLocalCheckpoints = newInitializingAllocationIds.stream() + .collect(Collectors.toMap(Function.identity(), a -> randomIntBetween(1, 1024))); + initializingLocalCheckpoints.forEach((a, l) -> updateLocalCheckpoint(tracker, a.getId(), l)); + assertTrue( + activeLocalCheckpoints.entrySet() + .stream() + .allMatch(e -> tracker.getTrackedLocalCheckpointForShard(e.getKey().getId()).getLocalCheckpoint() == e.getValue()) + ); + assertTrue( + initializingLocalCheckpoints.entrySet() + .stream() + .allMatch(e -> tracker.getTrackedLocalCheckpointForShard(e.getKey().getId()).getLocalCheckpoint() == e.getValue()) + ); + final long primaryLocalCheckpoint = activeLocalCheckpoints.get(primaryId); + assertThat(tracker.getGlobalCheckpoint(), equalTo(primaryLocalCheckpoint)); + assertThat(updatedGlobalCheckpoint.get(), equalTo(primaryLocalCheckpoint)); + final long minimumInitailizingLocalCheckpoint = (long) initializingLocalCheckpoints.values().stream().min(Integer::compareTo).get(); + + // now we are going to add a new allocation ID and bring it in sync which should move it to the in-sync allocation IDs + final long localCheckpoint = randomIntBetween( + 0, + Math.toIntExact(Math.min(primaryLocalCheckpoint, minimumInitailizingLocalCheckpoint) - 1) + ); + + // using a different length than we have been using above ensures that we can not collide with a previous allocation ID + final AllocationId newSyncingAllocationId = AllocationId.newInitializing(); + newInitializingAllocationIds.add(newSyncingAllocationId); + tracker.updateFromClusterManager( + initialClusterStateVersion + 3, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId) + ); + addPeerRecoveryRetentionLease(tracker, newSyncingAllocationId); + final CyclicBarrier barrier = new CyclicBarrier(2); + final Thread thread = new Thread(() -> { + try { + barrier.await(); + tracker.initiateTracking(newSyncingAllocationId.getId()); + tracker.markAllocationIdAsInSync(newSyncingAllocationId.getId(), localCheckpoint); + barrier.await(); + } catch (final BrokenBarrierException | InterruptedException e) { + throw new RuntimeException(e); + } + }); + + thread.start(); + + barrier.await(); + + assertBusy(() -> { + assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); + assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); + }); + + tracker.updateLocalCheckpoint(newSyncingAllocationId.getId(), randomIntBetween(Math.toIntExact(primaryLocalCheckpoint), 1024)); + + barrier.await(); + + assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); + assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); + + /* + * The new in-sync allocation ID is in the in-sync set now yet the cluster-manager does not know this; the allocation ID should still be in + * the in-sync set even if we receive a cluster state update that does not reflect this. + * + */ + tracker.updateFromClusterManager( + initialClusterStateVersion + 4, + ids(newActiveAllocationIds), + routingTable(newInitializingAllocationIds, primaryId) + ); + assertTrue(tracker.getTrackedLocalCheckpointForShard(newSyncingAllocationId.getId()).inSync); + assertFalse(tracker.pendingInSync.contains(newSyncingAllocationId.getId())); + } + + public void testPrimaryContextHandoffWithRemoteTranslogEnabled() throws IOException { + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("test", settings); + final ShardId shardId = new ShardId("test", "_na_", 0); + + FakeClusterState clusterState = initialState(); + final AllocationId aId = clusterState.routingTable.primaryShard().allocationId(); + final LongConsumer onUpdate = updatedGlobalCheckpoint -> {}; + final long primaryTerm = randomNonNegativeLong(); + final long globalCheckpoint = UNASSIGNED_SEQ_NO; + final BiConsumer> onNewRetentionLease = (leases, listener) -> {}; + ReplicationTracker oldPrimary = new ReplicationTracker( + shardId, + aId.getId(), + indexSettings, + primaryTerm, + globalCheckpoint, + onUpdate, + () -> 0L, + onNewRetentionLease, + OPS_BASED_RECOVERY_ALWAYS_REASONABLE + ); + ReplicationTracker newPrimary = new ReplicationTracker( + shardId, + aId.getRelocationId(), + indexSettings, + primaryTerm, + globalCheckpoint, + onUpdate, + () -> 0L, + onNewRetentionLease, + OPS_BASED_RECOVERY_ALWAYS_REASONABLE + ); + + Set allocationIds = new HashSet<>(Arrays.asList(oldPrimary.shardAllocationId, newPrimary.shardAllocationId)); + + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + + oldPrimary.activatePrimaryMode(randomIntBetween(Math.toIntExact(NO_OPS_PERFORMED), 10)); + addPeerRecoveryRetentionLease(oldPrimary, newPrimary.shardAllocationId); + newPrimary.updateRetentionLeasesOnReplica(oldPrimary.getRetentionLeases()); + + final int numUpdates = randomInt(10); + for (int i = 0; i < numUpdates; i++) { + if (rarely()) { + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + } + if (randomBoolean()) { + randomLocalCheckpointUpdate(oldPrimary); + } + if (randomBoolean()) { + randomMarkInSync(oldPrimary, newPrimary); + } + } + + // simulate transferring the global checkpoint to the new primary after finalizing recovery before the handoff + markAsTrackingAndInSyncQuietly( + oldPrimary, + newPrimary.shardAllocationId, + Math.max(SequenceNumbers.NO_OPS_PERFORMED, oldPrimary.getGlobalCheckpoint() + randomInt(5)) + ); + oldPrimary.updateGlobalCheckpointForShard(newPrimary.shardAllocationId, oldPrimary.getGlobalCheckpoint()); + ReplicationTracker.PrimaryContext primaryContext = oldPrimary.startRelocationHandoff(newPrimary.shardAllocationId); + + if (randomBoolean()) { + // cluster state update after primary context handoff + if (randomBoolean()) { + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + } + + // abort handoff, check that we can continue updates and retry handoff + oldPrimary.abortRelocationHandoff(); + + if (rarely()) { + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + } + if (randomBoolean()) { + randomLocalCheckpointUpdate(oldPrimary); + } + if (randomBoolean()) { + randomMarkInSync(oldPrimary, newPrimary); + } + + // do another handoff + primaryContext = oldPrimary.startRelocationHandoff(newPrimary.shardAllocationId); + } + + // send primary context through the wire + BytesStreamOutput output = new BytesStreamOutput(); + primaryContext.writeTo(output); + StreamInput streamInput = output.bytes().streamInput(); + primaryContext = new ReplicationTracker.PrimaryContext(streamInput); + switch (randomInt(3)) { + case 0: { + // apply cluster state update on old primary while primary context is being transferred + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + // activate new primary + newPrimary.activateWithPrimaryContext(primaryContext); + // apply cluster state update on new primary so that the states on old and new primary are comparable + clusterState.apply(newPrimary); + break; + } + case 1: { + // apply cluster state update on new primary while primary context is being transferred + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(newPrimary); + // activate new primary + newPrimary.activateWithPrimaryContext(primaryContext); + // apply cluster state update on old primary so that the states on old and new primary are comparable + clusterState.apply(oldPrimary); + break; + } + case 2: { + // apply cluster state update on both copies while primary context is being transferred + clusterState = randomUpdateClusterState(allocationIds, clusterState); + clusterState.apply(oldPrimary); + clusterState.apply(newPrimary); + newPrimary.activateWithPrimaryContext(primaryContext); + break; + } + case 3: { + // no cluster state update + newPrimary.activateWithPrimaryContext(primaryContext); + break; + } + } + + assertTrue(oldPrimary.primaryMode); + assertTrue(newPrimary.primaryMode); + assertThat(newPrimary.appliedClusterStateVersion, equalTo(oldPrimary.appliedClusterStateVersion)); + /* + * We can not assert on shared knowledge of the global checkpoint between the old primary and the new primary as the new primary + * will update its global checkpoint state without the old primary learning of it, and the old primary could have updated its + * global checkpoint state after the primary context was transferred. + */ + Map oldPrimaryCheckpointsCopy = new HashMap<>(oldPrimary.checkpoints); + oldPrimaryCheckpointsCopy.remove(oldPrimary.shardAllocationId); + oldPrimaryCheckpointsCopy.remove(newPrimary.shardAllocationId); + Map newPrimaryCheckpointsCopy = new HashMap<>(newPrimary.checkpoints); + newPrimaryCheckpointsCopy.remove(oldPrimary.shardAllocationId); + newPrimaryCheckpointsCopy.remove(newPrimary.shardAllocationId); + assertThat(newPrimaryCheckpointsCopy, equalTo(oldPrimaryCheckpointsCopy)); + // we can however assert that shared knowledge of the local checkpoint and in-sync status is equal + assertThat( + oldPrimary.checkpoints.get(oldPrimary.shardAllocationId).localCheckpoint, + equalTo(newPrimary.checkpoints.get(oldPrimary.shardAllocationId).localCheckpoint) + ); + assertThat( + oldPrimary.checkpoints.get(newPrimary.shardAllocationId).localCheckpoint, + equalTo(newPrimary.checkpoints.get(newPrimary.shardAllocationId).localCheckpoint) + ); + assertThat( + oldPrimary.checkpoints.get(oldPrimary.shardAllocationId).inSync, + equalTo(newPrimary.checkpoints.get(oldPrimary.shardAllocationId).inSync) + ); + assertThat( + oldPrimary.checkpoints.get(newPrimary.shardAllocationId).inSync, + equalTo(newPrimary.checkpoints.get(newPrimary.shardAllocationId).inSync) + ); + assertThat(newPrimary.getGlobalCheckpoint(), equalTo(oldPrimary.getGlobalCheckpoint())); + assertThat(newPrimary.routingTable, equalTo(oldPrimary.routingTable)); + assertThat(newPrimary.replicationGroup, equalTo(oldPrimary.replicationGroup)); + + assertFalse(oldPrimary.relocated); + oldPrimary.completeRelocationHandoff(); + assertFalse(oldPrimary.primaryMode); + assertTrue(oldPrimary.relocated); + } + + public void testIllegalStateExceptionIfUnknownAllocationIdWithRemoteTranslogEnabled() { + final AllocationId active = AllocationId.newInitializing(); + final AllocationId initializing = AllocationId.newInitializing(); + Settings settings = Settings.builder().put("index.remote_store.translog.enabled", "true").build(); + final ReplicationTracker tracker = newTracker(active, settings); + tracker.updateFromClusterManager( + randomNonNegativeLong(), + Collections.singleton(active.getId()), + routingTable(Collections.singleton(initializing), active) + ); + tracker.activatePrimaryMode(NO_OPS_PERFORMED); + + expectThrows(IllegalStateException.class, () -> tracker.initiateTracking(randomAlphaOfLength(10))); + expectThrows(IllegalStateException.class, () -> tracker.markAllocationIdAsInSync(randomAlphaOfLength(10), randomNonNegativeLong())); + } + } diff --git a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java index 6f3387a935c03..9104ab1a6882b 100644 --- a/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java +++ b/server/src/test/java/org/opensearch/index/store/remote/file/OnDemandBlockSnapshotIndexInputTests.java @@ -53,7 +53,6 @@ public class OnDemandBlockSnapshotIndexInputTests extends OpenSearchTestCase { @Before public void init() { - assumeFalse("Awaiting Windows fix https://github.com/opensearch-project/OpenSearch/issues/5396", Constants.WINDOWS); transferManager = mock(TransferManager.class); lockFactory = SimpleFSLockFactory.INSTANCE; path = LuceneTestCase.createTempDir("OnDemandBlockSnapshotIndexInputTests"); @@ -69,6 +68,7 @@ public void clean() { } public void testVariousBlockSize() throws Exception { + assumeFalse("Awaiting Windows fix https://github.com/opensearch-project/OpenSearch/issues/5396", Constants.WINDOWS); int fileSize = 29360128; int blockSizeShift; diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index ff4005d9bcedf..663c325db12c2 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -156,6 +156,7 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.transport.TransportAddress; import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.common.util.concurrent.AbstractRunnable; import org.opensearch.common.util.concurrent.PrioritizedOpenSearchThreadPoolExecutor; @@ -191,6 +192,7 @@ import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; +import org.opensearch.extensions.ExtensionsManager; import org.opensearch.plugins.PluginsService; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; @@ -1795,40 +1797,79 @@ public void onFailure(final Exception e) { ); final BigArrays bigArrays = new BigArrays(new PageCacheRecycler(settings), null, "test"); final MapperRegistry mapperRegistry = new IndicesModule(Collections.emptyList()).getMapperRegistry(); - indicesService = new IndicesService( - settings, - mock(PluginsService.class), - nodeEnv, - namedXContentRegistry, - new AnalysisRegistry( - environment, - emptyMap(), - emptyMap(), - emptyMap(), - emptyMap(), + if (FeatureFlags.isEnabled(FeatureFlags.EXTENSIONS)) { + indicesService = new IndicesService( + settings, + mock(PluginsService.class), + mock(ExtensionsManager.class), + nodeEnv, + namedXContentRegistry, + new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ), + indexNameExpressionResolver, + mapperRegistry, + namedWriteableRegistry, + threadPool, + indexScopedSettings, + new NoneCircuitBreakerService(), + bigArrays, + scriptService, + clusterService, + client, + new MetaStateService(nodeEnv, namedXContentRegistry), + Collections.emptyList(), emptyMap(), + null, emptyMap(), + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + ); + } else { + indicesService = new IndicesService( + settings, + mock(PluginsService.class), + nodeEnv, + namedXContentRegistry, + new AnalysisRegistry( + environment, + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap(), + emptyMap() + ), + indexNameExpressionResolver, + mapperRegistry, + namedWriteableRegistry, + threadPool, + indexScopedSettings, + new NoneCircuitBreakerService(), + bigArrays, + scriptService, + clusterService, + client, + new MetaStateService(nodeEnv, namedXContentRegistry), + Collections.emptyList(), emptyMap(), + null, emptyMap(), - emptyMap() - ), - indexNameExpressionResolver, - mapperRegistry, - namedWriteableRegistry, - threadPool, - indexScopedSettings, - new NoneCircuitBreakerService(), - bigArrays, - scriptService, - clusterService, - client, - new MetaStateService(nodeEnv, namedXContentRegistry), - Collections.emptyList(), - emptyMap(), - null, - emptyMap(), - new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) - ); + new RemoteSegmentStoreDirectoryFactory(() -> repositoriesService) + ); + } + final RecoverySettings recoverySettings = new RecoverySettings(settings, clusterSettings); snapshotShardsService = new SnapshotShardsService( settings, diff --git a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java index 8463d9268e760..c0af5d6e76c59 100644 --- a/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/opensearch/transport/TransportServiceHandshakeTests.java @@ -41,12 +41,15 @@ import org.opensearch.common.settings.Settings; import org.opensearch.common.util.PageCacheRecycler; import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.test.MockLogAppender; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.test.VersionUtils; import org.opensearch.test.transport.MockTransportService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; import org.opensearch.transport.nio.MockNioTransport; +import org.apache.logging.log4j.Level; +import org.apache.logging.log4j.LogManager; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -223,6 +226,36 @@ public void testNodeConnectWithDifferentNodeId() { assertFalse(handleA.transportService.nodeConnected(discoveryNode)); } + public void testNodeConnectWithDifferentNodeIDSkipValidation() throws IllegalAccessException { + Settings settings = Settings.builder().put("cluster.name", "test").build(); + NetworkHandle handleA = startServices("TS_A", settings, Version.CURRENT); + NetworkHandle handleB = startServices("TS_B", settings, Version.CURRENT); + DiscoveryNode discoveryNode = new DiscoveryNode( + randomAlphaOfLength(10), + handleB.discoveryNode.getAddress(), + emptyMap(), + emptySet(), + handleB.discoveryNode.getVersion() + ); + try (MockLogAppender mockLogAppender = MockLogAppender.createForLoggers(LogManager.getLogger(TransportService.class))) { + + mockLogAppender.addExpectation( + new MockLogAppender.SeenEventExpectation( + "Validation Skipped", + "org.opensearch.transport.TransportService", + Level.INFO, + "Connection validation was skipped" + ) + ); + + handleA.transportService.connectToExtensionNode(discoveryNode, TestProfiles.LIGHT_PROFILE); + + mockLogAppender.assertAllExpectationsMatched(); + + assertTrue(handleA.transportService.nodeConnected(discoveryNode)); + } + } + private static class NetworkHandle { private TransportService transportService; private DiscoveryNode discoveryNode; diff --git a/server/src/test/resources/config/extensions.yml b/server/src/test/resources/config/extensions.yml new file mode 100644 index 0000000000000..6264e9630ad60 --- /dev/null +++ b/server/src/test/resources/config/extensions.yml @@ -0,0 +1,13 @@ +extensions: + - name: firstExtension + uniqueId: uniqueid1 + hostName: 'myIndependentPluginHost1' + hostAddress: '127.0.0.0' + port: '9300' + version: '3.0.0' + - name: "secondExtension" + uniqueId: 'uniqueid2' + hostName: 'myIndependentPluginHost2' + hostAddress: '127.0.0.1' + port: '9301' + version: '2.0.0' diff --git a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java index b3f062aef4fbe..92c80ac1799ef 100644 --- a/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/replication/OpenSearchIndexLevelReplicationTestCase.java @@ -52,6 +52,7 @@ import org.opensearch.action.support.ActionTestUtils; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.action.support.WriteRequest; +import org.opensearch.action.support.replication.FanoutReplicationProxy; import org.opensearch.action.support.replication.PendingReplicationActions; import org.opensearch.action.support.replication.ReplicatedWriteRequest; import org.opensearch.action.support.replication.ReplicationOperation; @@ -727,7 +728,8 @@ public void execute() { opType, primaryTerm, TimeValue.timeValueMillis(20), - TimeValue.timeValueSeconds(60) + TimeValue.timeValueSeconds(60), + new FanoutReplicationProxy<>() ).execute(); } catch (Exception e) { listener.onFailure(e);