From 96b45ca1035c5a6e75e137a29da27f0e2d7a06db Mon Sep 17 00:00:00 2001 From: Andriy Redko Date: Thu, 19 May 2022 17:20:26 -0400 Subject: [PATCH 01/16] [REMOVE] Cleanup deprecated thread pool types (FIXED_AUTO_QUEUE_SIZE) (#3369) Signed-off-by: Andriy Redko --- .../main/java/org/opensearch/threadpool/ThreadPool.java | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java index 77682a7946c8f..cc8d81d2a7b4b 100644 --- a/server/src/main/java/org/opensearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/opensearch/threadpool/ThreadPool.java @@ -118,7 +118,6 @@ public enum ThreadPoolType { DIRECT("direct"), FIXED("fixed"), RESIZABLE("resizable"), - FIXED_AUTO_QUEUE_SIZE("fixed_auto_queue_size"), SCALING("scaling"); private final String type; @@ -696,7 +695,13 @@ public Info(String name, ThreadPoolType type, int min, int max, @Nullable TimeVa public Info(StreamInput in) throws IOException { name = in.readString(); - type = ThreadPoolType.fromType(in.readString()); + final String typeStr = in.readString(); + // Opensearch on or after 3.0.0 version doesn't know about "fixed_auto_queue_size" thread pool. Convert it to RESIZABLE. + if (typeStr.equalsIgnoreCase("fixed_auto_queue_size")) { + type = ThreadPoolType.RESIZABLE; + } else { + type = ThreadPoolType.fromType(typeStr); + } min = in.readInt(); max = in.readInt(); keepAlive = in.readOptionalTimeValue(); From 6699624dde2ee214b8f40951a054e2dae0a45170 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Fri, 20 May 2022 07:29:55 -0700 Subject: [PATCH 02/16] [Type removal] _type removal from tests of yaml tests (#3406) * [Type removal] _type removal from tests of yaml tests Signed-off-by: Suraj Singh * Fix spotless failures Signed-off-by: Suraj Singh * Fix assertion failures Signed-off-by: Suraj Singh * Fix assertion failures in DoSectionTests Signed-off-by: Suraj Singh --- .../section/ClientYamlTestSuiteTests.java | 105 ++++-------------- .../rest/yaml/section/DoSectionTests.java | 52 +++------ .../rest-api-spec/test/suite1/10_basic.yml | 4 - 3 files changed, 40 insertions(+), 121 deletions(-) diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java index f995e18d0f2df..40421ef43ab6b 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/ClientYamlTestSuiteTests.java @@ -32,8 +32,6 @@ package org.opensearch.test.rest.yaml.section; -import org.opensearch.LegacyESVersion; -import org.opensearch.Version; import org.opensearch.client.NodeSelector; import org.opensearch.common.ParsingException; import org.opensearch.common.xcontent.XContentLocation; @@ -76,23 +74,9 @@ public void testParseTestSetupTeardownAndSections() throws Exception { + " indices.get_mapping:\n" + " index: test_index\n" + "\n" - + " - match: {test_index.test_type.properties.text.type: string}\n" - + " - match: {test_index.test_type.properties.text.analyzer: whitespace}\n" + + " - match: {test_index.properties.text.type: string}\n" + + " - match: {test_index.properties.text.analyzer: whitespace}\n" + "\n" - + "---\n" - + "\"Get type mapping - pre 6.0\":\n" - + "\n" - + " - skip:\n" - + " version: \"6.0.0 - \"\n" - + " reason: \"for newer versions the index name is always returned\"\n" - + "\n" - + " - do:\n" - + " indices.get_mapping:\n" - + " index: test_index\n" - + " type: test_type\n" - + "\n" - + " - match: {test_type.properties.text.type: string}\n" - + " - match: {test_type.properties.text.analyzer: whitespace}\n" ); ClientYamlTestSuite restTestSuite = ClientYamlTestSuite.parse(getTestClass().getName(), getTestName(), parser); @@ -135,7 +119,7 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(restTestSuite.getTeardownSection().isEmpty(), equalTo(true)); } - assertThat(restTestSuite.getTestSections().size(), equalTo(2)); + assertThat(restTestSuite.getTestSections().size(), equalTo(1)); assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Get index mapping")); assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); @@ -147,36 +131,13 @@ public void testParseTestSetupTeardownAndSections() throws Exception { assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class)); MatchAssertion matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1); - assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.type")); + assertThat(matchAssertion.getField(), equalTo("test_index.properties.text.type")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string")); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(2), instanceOf(MatchAssertion.class)); matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(2); - assertThat(matchAssertion.getField(), equalTo("test_index.test_type.properties.text.analyzer")); + assertThat(matchAssertion.getField(), equalTo("test_index.properties.text.analyzer")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); - assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Get type mapping - pre 6.0")); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().isEmpty(), equalTo(false)); - assertThat( - restTestSuite.getTestSections().get(1).getSkipSection().getReason(), - equalTo("for newer versions the index name is always returned") - ); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getLowerVersion(), equalTo(LegacyESVersion.fromString("6.0.0"))); - assertThat(restTestSuite.getTestSections().get(1).getSkipSection().getUpperVersion(), equalTo(Version.CURRENT)); - assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().size(), equalTo(3)); - assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(0), instanceOf(DoSection.class)); - doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0); - assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); - assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(1); - assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.type")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("string")); - assertThat(restTestSuite.getTestSections().get(1).getExecutableSections().get(2), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(1).getExecutableSections().get(2); - assertThat(matchAssertion.getField(), equalTo("test_type.properties.text.analyzer")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("whitespace")); } public void testParseTestSingleTestSection() throws Exception { @@ -188,24 +149,20 @@ public void testParseTestSingleTestSection() throws Exception { + " - do:\n" + " index:\n" + " index: test-weird-index-中文\n" - + " type: weird.type\n" + " id: 1\n" + " body: { foo: bar }\n" + "\n" + " - is_true: ok\n" + " - match: { _index: test-weird-index-中文 }\n" - + " - match: { _type: weird.type }\n" + " - match: { _id: \"1\"}\n" + " - match: { _version: 1}\n" + "\n" + " - do:\n" + " get:\n" + " index: test-weird-index-中文\n" - + " type: weird.type\n" + " id: 1\n" + "\n" + " - match: { _index: test-weird-index-中文 }\n" - + " - match: { _type: weird.type }\n" + " - match: { _id: \"1\"}\n" + " - match: { _version: 1}\n" + " - match: { _source: { foo: bar }}" @@ -222,12 +179,12 @@ public void testParseTestSingleTestSection() throws Exception { assertThat(restTestSuite.getTestSections().get(0).getName(), equalTo("Index with ID")); assertThat(restTestSuite.getTestSections().get(0).getSkipSection().isEmpty(), equalTo(true)); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(12)); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().size(), equalTo(10)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(0), instanceOf(DoSection.class)); DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("index")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(IsTrueAssertion.class)); IsTrueAssertion trueAssertion = (IsTrueAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(1); @@ -238,40 +195,32 @@ public void testParseTestSingleTestSection() throws Exception { assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文")); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(3), instanceOf(MatchAssertion.class)); matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(3); - assertThat(matchAssertion.getField(), equalTo("_type")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4); assertThat(matchAssertion.getField(), equalTo("_id")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(5); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(4), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(4); assertThat(matchAssertion.getField(), equalTo("_version")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(DoSection.class)); - doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(6); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(5), instanceOf(DoSection.class)); + doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(5); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("get")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(6), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(6); assertThat(matchAssertion.getField(), equalTo("_index")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("test-weird-index-中文")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8); - assertThat(matchAssertion.getField(), equalTo("_type")); - assertThat(matchAssertion.getExpectedValue().toString(), equalTo("weird.type")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(7), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(7); assertThat(matchAssertion.getField(), equalTo("_id")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(10), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(10); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(8), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(8); assertThat(matchAssertion.getField(), equalTo("_version")); assertThat(matchAssertion.getExpectedValue().toString(), equalTo("1")); - assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(11), instanceOf(MatchAssertion.class)); - matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(11); + assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(9), instanceOf(MatchAssertion.class)); + matchAssertion = (MatchAssertion) restTestSuite.getTestSections().get(0).getExecutableSections().get(9); assertThat(matchAssertion.getField(), equalTo("_source")); assertThat(matchAssertion.getExpectedValue(), instanceOf(Map.class)); assertThat(((Map) matchAssertion.getExpectedValue()).get("foo").toString(), equalTo("bar")); @@ -287,14 +236,12 @@ public void testParseTestMultipleTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: { doc: { foo: bar } }\n" + "\n" + " - do:\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: { doc: { foo: bar } }\n" + " ignore: 404\n" @@ -307,7 +254,6 @@ public void testParseTestMultipleTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body:\n" + " script: \"ctx._source.foo = bar\"\n" @@ -316,7 +262,6 @@ public void testParseTestMultipleTestSections() throws Exception { + " - do:\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " ignore: 404\n" + " body:\n" @@ -341,13 +286,13 @@ public void testParseTestMultipleTestSections() throws Exception { DoSection doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(0); assertThat(doSection.getCatch(), equalTo("missing")); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class)); doSection = (DoSection) restTestSuite.getTestSections().get(0).getExecutableSections().get(1); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(1).getName(), equalTo("Missing document (script)")); @@ -358,13 +303,13 @@ public void testParseTestMultipleTestSections() throws Exception { doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(0); assertThat(doSection.getCatch(), equalTo("missing")); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); assertThat(restTestSuite.getTestSections().get(0).getExecutableSections().get(1), instanceOf(DoSection.class)); doSection = (DoSection) restTestSuite.getTestSections().get(1).getExecutableSections().get(1); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("update")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(4)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); assertThat(doSection.getApiCallSection().hasBody(), equalTo(true)); } @@ -378,7 +323,6 @@ public void testParseTestDuplicateTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: { doc: { foo: bar } }\n" + "\n" @@ -390,7 +334,6 @@ public void testParseTestDuplicateTestSections() throws Exception { + " catch: missing\n" + " update:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body:\n" + " script: \"ctx._source.foo = bar\"\n" diff --git a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java index 53cae686e3cac..1fb08934c8b8b 100644 --- a/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java +++ b/test/framework/src/test/java/org/opensearch/test/rest/yaml/section/DoSectionTests.java @@ -173,19 +173,15 @@ public void testIgnoreTypesWarnings() { } public void testParseDoSectionNoBody() throws Exception { - parser = createParser( - YamlXContent.yamlXContent, - "get:\n" + " index: test_index\n" + " type: test_type\n" + " id: 1" - ); + parser = createParser(YamlXContent.yamlXContent, "get:\n" + " index: test_index\n" + " id: 1"); DoSection doSection = DoSection.parse(parser); ApiCallSection apiCallSection = doSection.getApiCallSection(); assertThat(apiCallSection, notNullValue()); assertThat(apiCallSection.getApi(), equalTo("get")); - assertThat(apiCallSection.getParams().size(), equalTo(3)); + assertThat(apiCallSection.getParams().size(), equalTo(2)); assertThat(apiCallSection.getParams().get("index"), equalTo("test_index")); - assertThat(apiCallSection.getParams().get("type"), equalTo("test_type")); assertThat(apiCallSection.getParams().get("id"), equalTo("1")); assertThat(apiCallSection.hasBody(), equalTo(false)); } @@ -204,19 +200,15 @@ public void testParseDoSectionNoParamsNoBody() throws Exception { public void testParseDoSectionWithJsonBody() throws Exception { String body = "{ \"include\": { \"field1\": \"v1\", \"field2\": \"v2\" }, \"count\": 1 }"; - parser = createParser( - YamlXContent.yamlXContent, - "index:\n" + " index: test_1\n" + " type: test\n" + " id: 1\n" + " body: " + body - ); + parser = createParser(YamlXContent.yamlXContent, "index:\n" + " index: test_1\n" + " id: 1\n" + " body: " + body); DoSection doSection = DoSection.parse(parser); ApiCallSection apiCallSection = doSection.getApiCallSection(); assertThat(apiCallSection, notNullValue()); assertThat(apiCallSection.getApi(), equalTo("index")); - assertThat(apiCallSection.getParams().size(), equalTo(3)); + assertThat(apiCallSection.getParams().size(), equalTo(2)); assertThat(apiCallSection.getParams().get("index"), equalTo("test_1")); - assertThat(apiCallSection.getParams().get("type"), equalTo("test")); assertThat(apiCallSection.getParams().get("id"), equalTo("1")); assertThat(apiCallSection.hasBody(), equalTo(true)); @@ -225,9 +217,9 @@ public void testParseDoSectionWithJsonBody() throws Exception { public void testParseDoSectionWithJsonMultipleBodiesAsLongString() throws Exception { String bodies[] = new String[] { - "{ \"index\": { \"_index\":\"test_index\", \"_type\":\"test_type\", \"_id\":\"test_id\" } }\n", + "{ \"index\": { \"_index\":\"test_index\", \"_id\":\"test_id\" } }\n", "{ \"f1\":\"v1\", \"f2\":42 }\n", - "{ \"index\": { \"_index\":\"test_index2\", \"_type\":\"test_type2\", \"_id\":\"test_id2\" } }\n", + "{ \"index\": { \"_index\":\"test_index2\", \"_id\":\"test_id2\" } }\n", "{ \"f1\":\"v2\", \"f2\":47 }\n" }; parser = createParser( YamlXContent.yamlXContent, @@ -284,21 +276,19 @@ public void testParseDoSectionWithYamlMultipleBodies() throws Exception { + " body:\n" + " - index:\n" + " _index: test_index\n" - + " _type: test_type\n" + " _id: test_id\n" + " - f1: v1\n" + " f2: 42\n" + " - index:\n" + " _index: test_index2\n" - + " _type: test_type2\n" + " _id: test_id2\n" + " - f1: v2\n" + " f2: 47" ); String[] bodies = new String[4]; - bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_type\": \"test_type\", \"_id\": \"test_id\"}}"; + bodies[0] = "{\"index\": {\"_index\": \"test_index\", \"_id\": \"test_id\"}}"; bodies[1] = "{ \"f1\":\"v1\", \"f2\": 42 }"; - bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_type\": \"test_type2\", \"_id\": \"test_id2\"}}"; + bodies[2] = "{\"index\": {\"_index\": \"test_index2\", \"_id\": \"test_id2\"}}"; bodies[3] = "{ \"f1\":\"v2\", \"f2\": 47 }"; DoSection doSection = DoSection.parse(parser); @@ -322,13 +312,10 @@ public void testParseDoSectionWithYamlBodyMultiGet() throws Exception { "mget:\n" + " body:\n" + " docs:\n" - + " - { _index: test_2, _type: test, _id: 1}\n" - + " - { _index: test_1, _type: none, _id: 1}" + + " - { _index: test_2, _id: 1}\n" + + " - { _index: test_1, _id: 1}" ); - String body = "{ \"docs\": [ " - + "{\"_index\": \"test_2\", \"_type\":\"test\", \"_id\":1}, " - + "{\"_index\": \"test_1\", \"_type\":\"none\", \"_id\":1} " - + "]}"; + String body = "{ \"docs\": [ " + "{\"_index\": \"test_2\", \"_id\":1}, " + "{\"_index\": \"test_1\", \"_id\":1} " + "]}"; DoSection doSection = DoSection.parse(parser); ApiCallSection apiCallSection = doSection.getApiCallSection(); @@ -346,7 +333,6 @@ public void testParseDoSectionWithBodyStringified() throws Exception { YamlXContent.yamlXContent, "index:\n" + " index: test_1\n" - + " type: test\n" + " id: 1\n" + " body: \"{ \\\"_source\\\": true, \\\"query\\\": { \\\"match_all\\\": {} } }\"" ); @@ -356,9 +342,8 @@ public void testParseDoSectionWithBodyStringified() throws Exception { assertThat(apiCallSection, notNullValue()); assertThat(apiCallSection.getApi(), equalTo("index")); - assertThat(apiCallSection.getParams().size(), equalTo(3)); + assertThat(apiCallSection.getParams().size(), equalTo(2)); assertThat(apiCallSection.getParams().get("index"), equalTo("test_1")); - assertThat(apiCallSection.getParams().get("type"), equalTo("test")); assertThat(apiCallSection.getParams().get("id"), equalTo("1")); assertThat(apiCallSection.hasBody(), equalTo(true)); assertThat(apiCallSection.getBodies().size(), equalTo(1)); @@ -444,16 +429,15 @@ public void testParseDoSectionWithoutClientCallSection() throws Exception { public void testParseDoSectionMultivaluedField() throws Exception { parser = createParser( YamlXContent.yamlXContent, - "indices.get_field_mapping:\n" + " index: test_index\n" + " type: test_type\n" + " field: [ text , text1 ]" + "indices.get_field_mapping:\n" + " index: test_index\n" + " field: [ text , text1 ]" ); DoSection doSection = DoSection.parse(parser); assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(3)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); assertThat(doSection.getApiCallSection().getParams().get("field"), equalTo("text,text1")); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); @@ -464,7 +448,6 @@ public void testParseDoSectionExpectedWarnings() throws Exception { YamlXContent.yamlXContent, "indices.get_field_mapping:\n" + " index: test_index\n" - + " type: test_type\n" + "warnings:\n" + " - some test warning they are typically pretty long\n" + " - some other test warning sometimes they have [in] them" @@ -474,9 +457,8 @@ public void testParseDoSectionExpectedWarnings() throws Exception { assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1)); assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); assertThat( @@ -502,7 +484,6 @@ public void testParseDoSectionAllowedWarnings() throws Exception { YamlXContent.yamlXContent, "indices.get_field_mapping:\n" + " index: test_index\n" - + " type: test_type\n" + "allowed_warnings:\n" + " - some test warning they are typically pretty long\n" + " - some other test warning sometimes they have [in] them" @@ -512,9 +493,8 @@ public void testParseDoSectionAllowedWarnings() throws Exception { assertThat(doSection.getCatch(), nullValue()); assertThat(doSection.getApiCallSection(), notNullValue()); assertThat(doSection.getApiCallSection().getApi(), equalTo("indices.get_field_mapping")); - assertThat(doSection.getApiCallSection().getParams().size(), equalTo(2)); + assertThat(doSection.getApiCallSection().getParams().size(), equalTo(1)); assertThat(doSection.getApiCallSection().getParams().get("index"), equalTo("test_index")); - assertThat(doSection.getApiCallSection().getParams().get("type"), equalTo("test_type")); assertThat(doSection.getApiCallSection().hasBody(), equalTo(false)); assertThat(doSection.getApiCallSection().getBodies().size(), equalTo(0)); assertThat( diff --git a/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml b/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml index 0689f714d6416..c5fde76e94cc2 100644 --- a/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml +++ b/test/framework/src/test/resources/rest-api-spec/test/suite1/10_basic.yml @@ -4,18 +4,15 @@ - do: index: index: test_1 - type: test id: 中文 body: { "foo": "Hello: 中文" } - do: get: index: test_1 - type: test id: 中文 - match: { _index: test_1 } - - match: { _type: test } - match: { _id: 中文 } - match: { _source: { foo: "Hello: 中文" } } @@ -26,6 +23,5 @@ id: 中文 - match: { _index: test_1 } - - match: { _type: test } - match: { _id: 中文 } - match: { _source: { foo: "Hello: 中文" } } From acf7da78f99eb8b3c8013287718720a69f2fa8c2 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Fri, 20 May 2022 13:48:58 -0700 Subject: [PATCH 03/16] Add release notes for version 2.0.0 (#3410) Signed-off-by: Rabi Panda --- .../opensearch.release-notes-2.0.0.md | 189 ++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 release-notes/opensearch.release-notes-2.0.0.md diff --git a/release-notes/opensearch.release-notes-2.0.0.md b/release-notes/opensearch.release-notes-2.0.0.md new file mode 100644 index 0000000000000..8880d7a7bddf9 --- /dev/null +++ b/release-notes/opensearch.release-notes-2.0.0.md @@ -0,0 +1,189 @@ +## 2022-05-19 Version 2.0.0 Release Notes + +### Breaking Changes in 2.0 + +#### Remove Mapping types +* [Type removal] Remove redundant _type in pipeline simulate action ([#3371](https://github.com/opensearch-project/OpenSearch/pull/3371)) +* [Type removal] Remove _type deprecation from script and conditional processor ([#3239](https://github.com/opensearch-project/OpenSearch/pull/3239)) +* [Type removal] Remove _type from _bulk yaml test, scripts, unused constants ([#3372](https://github.com/opensearch-project/OpenSearch/pull/3372)) +* [Type removal] _type removal from mocked responses of scroll hit tests ([#3377](https://github.com/opensearch-project/OpenSearch/pull/3377)) +* [Remove] TypeFieldMapper ([#3196](https://github.com/opensearch-project/OpenSearch/pull/3196)) +* [Type Removal] Remove TypeFieldMapper usage, remove support of `_type` in searches and from LeafFieldsLookup ([#3016](https://github.com/opensearch-project/OpenSearch/pull/3016)) +* [Type removal] Remove _type support in NOOP bulk indexing from client benchmark ([#3076](https://github.com/opensearch-project/OpenSearch/pull/3076)) +* [Type removal] Remove deprecation warning on use of _type in doc scripts ([#2564](https://github.com/opensearch-project/OpenSearch/pull/2564)) +* [Remove] AliasesExistAction ([#3149](https://github.com/opensearch-project/OpenSearch/pull/3149)) +* [Remove] TypesExist Action ([#3139](https://github.com/opensearch-project/OpenSearch/pull/3139)) +* [Remove] Type from nested fields using new metadata field mapper([#3004](https://github.com/opensearch-project/OpenSearch/pull/3004)) +* [Remove] types from rest-api-spec endpoints ([#2689](https://github.com/opensearch-project/OpenSearch/pull/2689)) +* [Remove] Types from PutIndexTemplateRequest and builder to reduce mapping to a string ([#2510](https://github.com/opensearch-project/OpenSearch/pull/2510)) +* [Remove] Type from Percolate query API ([#2490](https://github.com/opensearch-project/OpenSearch/pull/2490)) +* [Remove] types from CreateIndexRequest and companion Builder's mapping method ([#2498](https://github.com/opensearch-project/OpenSearch/pull/2498)) +* [Remove] Type from PutIndexTemplateRequest and PITRB ([#2497](https://github.com/opensearch-project/OpenSearch/pull/2497)) +* [Remove] Type metadata from ingest documents ([#2491](https://github.com/opensearch-project/OpenSearch/pull/2491)) +* [Remove] type from CIR.mapping and CIRB.mapping ([#2478](https://github.com/opensearch-project/OpenSearch/pull/2478)) +* [Remove] types based addMapping method from CreateIndexRequest and Builder ([#2460](https://github.com/opensearch-project/OpenSearch/pull/2460)) +* [Remove] type from TaskResults index and IndexMetadata.getMappings ([#2469](https://github.com/opensearch-project/OpenSearch/pull/2469)) +* [Remove] Type query ([#2448](https://github.com/opensearch-project/OpenSearch/pull/2448)) +* [Remove] Type from TermsLookUp ([#2459](https://github.com/opensearch-project/OpenSearch/pull/2459)) +* [Remove] types from Uid and remaining types/Uid from translog ([#2450](https://github.com/opensearch-project/OpenSearch/pull/2450)) +* [Remove] types from translog ([#2439](https://github.com/opensearch-project/OpenSearch/pull/2439)) +* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) +* [Remove] Multiple Types from IndexTemplateMetadata ([#2400](https://github.com/opensearch-project/OpenSearch/pull/2400)) + +#### Upgrades +* [Upgrade] Lucene 9.1 release ([#2560](https://github.com/opensearch-project/OpenSearch/pull/2560)) +* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) + +#### Deprecations +* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) +* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) +* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) +* Deprecated reserved node id '_must_join_elected_master_' that used by DetachClusterCommand and replace with '_must_join_elected_cluster_manager_' ([#3138](https://github.com/opensearch-project/OpenSearch/pull/3138)) + +### Security Fixes +* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 ([#2599](https://github.com/opensearch-project/OpenSearch/pull/2599)) + +### Features/Enhancements +* Removing hard coded value of max concurrent shard requests ([#3364](https://github.com/opensearch-project/OpenSearch/pull/3364)) +* Update generated ANTLR lexer/parser to match runtime version ([#3297](https://github.com/opensearch-project/OpenSearch/pull/3297)) +* Rename BecomeMasterTask to BecomeClusterManagerTask in JoinTaskExecutor ([#3099](https://github.com/opensearch-project/OpenSearch/pull/3099)) +* Replace 'master' terminology with 'cluster manager' in log messages in 'server/src/main' directory - Part 2 ([#3174](https://github.com/opensearch-project/OpenSearch/pull/3174)) +* Remove deprecation warning of using REST API request parameter 'master_timeout' ([#2920](https://github.com/opensearch-project/OpenSearch/pull/2920)) +* Add deprecated API for creating History Ops Snapshot from translog ([#2886](https://github.com/opensearch-project/OpenSearch/pull/2886)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs ([#2682](https://github.com/opensearch-project/OpenSearch/pull/2682)) +* Change deprecation message for API parameter value 'master_node' of parameter 'metric' ([#2880](https://github.com/opensearch-project/OpenSearch/pull/2880)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs ([#2680](https://github.com/opensearch-project/OpenSearch/pull/2680)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs ([#2678](https://github.com/opensearch-project/OpenSearch/pull/2678)) +* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal ([#2863](https://github.com/opensearch-project/OpenSearch/pull/2863)) +* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* Remove endpoint_suffix dependency on account key ([#2485](https://github.com/opensearch-project/OpenSearch/pull/2485)) +* Replace remaining 'blacklist' with 'denylist' in internal class and method names ([#2784](https://github.com/opensearch-project/OpenSearch/pull/2784)) +* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe ([#2641](https://github.com/opensearch-project/OpenSearch/pull/2641)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs ([#2660](https://github.com/opensearch-project/OpenSearch/pull/2660)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs ([#2658](https://github.com/opensearch-project/OpenSearch/pull/2658)) +* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' ([#2702](https://github.com/opensearch-project/OpenSearch/pull/2702)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Add mapping method back referenced in other repos ([#2636](https://github.com/opensearch-project/OpenSearch/pull/2636)) +* Replaced "master" terminology in Log message ([#2575](https://github.com/opensearch-project/OpenSearch/pull/2575)) +* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) +* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) +* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) +* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) +* Add 'cluster_manager_node' into ClusterState Metric as an alternative to 'master_node' ([#2415](https://github.com/opensearch-project/OpenSearch/pull/2415)) +* Add a new node role 'cluster_manager' as the alternative for 'master' role and deprecate 'master' role ([#2424](https://github.com/opensearch-project/OpenSearch/pull/2424)) +* Replace 'master' with 'cluster_manager' in 'GET Cat Nodes' API ([#2441](https://github.com/opensearch-project/OpenSearch/pull/2441)) +* Replace 'discovered_master' with 'discovered_cluster_manager' in 'GET Cat Health' API ([#2438](https://github.com/opensearch-project/OpenSearch/pull/2438)) +* Add a field discovered_cluster_manager in get cluster health api ([#2437](https://github.com/opensearch-project/OpenSearch/pull/2437)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT Nodes API ([#2435](https://github.com/opensearch-project/OpenSearch/pull/2435)) +* Add a new REST API endpoint 'GET _cat/cluster_manager' as the replacement of 'GET _cat/master' ([#2404](https://github.com/opensearch-project/OpenSearch/pull/2404)) +* Add default for EnginePlugin.getEngineFactory ([#2419](https://github.com/opensearch-project/OpenSearch/pull/2419)) + +### Bug Fixes +* Fixing PublishTests tests (running against unclean build folders) ([#3253](https://github.com/opensearch-project/OpenSearch/pull/3253)) +* Fixing Scaled float field mapper to respect ignoreMalformed setting ([#2918](https://github.com/opensearch-project/OpenSearch/pull/2918)) +* Fixing plugin installation URL to consume build qualifier ([#3193](https://github.com/opensearch-project/OpenSearch/pull/3193)) +* Fix minimum index compatibility error message ([#3159](https://github.com/opensearch-project/OpenSearch/pull/3159)) +* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues ([#3048](https://github.com/opensearch-project/OpenSearch/pull/3048)) +* Adding a null pointer check to fix index_prefix query ([#2879](https://github.com/opensearch-project/OpenSearch/pull/2879)) +* Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) +* Fix InboundDecoder version compat check ([#2570](https://github.com/opensearch-project/OpenSearch/pull/2570)) +* ignore_malformed parameter on ip_range data_type throws mapper_parsing_exception ([#2429](https://github.com/opensearch-project/OpenSearch/pull/2429)) +* Discrepancy in result from _validate/query API and actual query validity ([#2416](https://github.com/opensearch-project/OpenSearch/pull/2416)) + +### Build & Infrastructure +* Allow to configure POM for ZIP publication ([#3252](https://github.com/opensearch-project/OpenSearch/pull/3252)) +* Gradle plugin `opensearch.pluginzip` Add implicit dependency. ([#3189](https://github.com/opensearch-project/OpenSearch/pull/3189)) +* Gradle custom java zippublish plugin ([#2988](https://github.com/opensearch-project/OpenSearch/pull/2988)) +* Added Adoptium JDK8 support and updated DistroTestPlugin JDK version used by Gradle ([#3324](https://github.com/opensearch-project/OpenSearch/pull/3324)) +* Update bundled JDK to 17.0.3+7 ([#3093](https://github.com/opensearch-project/OpenSearch/pull/3093)) +* Use G1GC on JDK11+ ([#2964](https://github.com/opensearch-project/OpenSearch/pull/2964)) +* Removed java11 source folders since JDK-11 is the baseline now ([#2898](https://github.com/opensearch-project/OpenSearch/pull/2898)) +* Changed JAVA_HOME to jdk-17 ([#2656](https://github.com/opensearch-project/OpenSearch/pull/2656)) +* Fix build-tools/reaper source/target compatibility to be JDK-11 ([#2596](https://github.com/opensearch-project/OpenSearch/pull/2596)) +* Adding workflow to create documentation related issues in documentation-website repo ([#2929](https://github.com/opensearch-project/OpenSearch/pull/2929)) +* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check ([#2779](https://github.com/opensearch-project/OpenSearch/pull/2779)) +* Replace blacklist in Gradle build environment configuration ([#2752](https://github.com/opensearch-project/OpenSearch/pull/2752)) +* Update ThirdPartyAuditTask to check for and list pointless exclusions. ([#2760](https://github.com/opensearch-project/OpenSearch/pull/2760)) +* Add Shadow jar publication to lang-painless module. ([#2681](https://github.com/opensearch-project/OpenSearch/pull/2681)) +* Add 1.3.2 to main causing gradle check failures ([#2679](https://github.com/opensearch-project/OpenSearch/pull/2679)) +* Added jenkinsfile to run gradle check in OpenSearch ([#2166](https://github.com/opensearch-project/OpenSearch/pull/2166)) +* Gradle check retry ([#2638](https://github.com/opensearch-project/OpenSearch/pull/2638)) +* Override Default Distribution Download Url with Custom Distribution Url when it is passed from Plugin ([#2420](https://github.com/opensearch-project/OpenSearch/pull/2420)) + +### Documentation +* [Javadocs] add remaining internal classes and reenable missingJavadoc on server ([#3296](https://github.com/opensearch-project/OpenSearch/pull/3296)) +* [Javadocs] add to o.o.cluster ([#3170](https://github.com/opensearch-project/OpenSearch/pull/3170)) +* [Javadocs] add to o.o.bootstrap, cli, and client ([#3163](https://github.com/opensearch-project/OpenSearch/pull/3163)) +* [Javadocs] add to o.o.search.rescore,searchafter,slice, sort, and suggest ([#3264](https://github.com/opensearch-project/OpenSearch/pull/3264)) +* [Javadocs] add to o.o.transport ([#3220](https://github.com/opensearch-project/OpenSearch/pull/3220)) +* [Javadocs] add to o.o.action, index, and transport ([#3277](https://github.com/opensearch-project/OpenSearch/pull/3277)) +* [Javadocs] add to internal classes in o.o.http, indices, and search ([#3288](https://github.com/opensearch-project/OpenSearch/pull/3288)) +* [Javadocs] Add to remaining o.o.action classes ([#3182](https://github.com/opensearch-project/OpenSearch/pull/3182)) +* [Javadocs] add to o.o.rest, snapshots, and tasks packages ([#3219](https://github.com/opensearch-project/OpenSearch/pull/3219)) +* [Javadocs] add to o.o.common ([#3289](https://github.com/opensearch-project/OpenSearch/pull/3289)) +* [Javadocs] add to o.o.dfs,fetch,internal,lookup,profile, and query packages ([#3261](https://github.com/opensearch-project/OpenSearch/pull/3261)) +* [Javadocs] add to o.o.search.aggs, builder, and collapse packages ([#3254](https://github.com/opensearch-project/OpenSearch/pull/3254)) +* [Javadocs] add to o.o.index and indices ([#3209](https://github.com/opensearch-project/OpenSearch/pull/3209)) +* [Javadocs] add to o.o.monitor,persistance,plugins,repo,script,threadpool,usage,watcher ([#3186](https://github.com/opensearch-project/OpenSearch/pull/3186)) +* [Javadocs] Add to o.o.disovery, env, gateway, http, ingest, lucene and node pkgs ([#3185](https://github.com/opensearch-project/OpenSearch/pull/3185)) +* [Javadocs] add to o.o.action.admin ([#3155](https://github.com/opensearch-project/OpenSearch/pull/3155)) +* [Javadocs] Add missing package-info.java files to server ([#3128](https://github.com/opensearch-project/OpenSearch/pull/3128)) + +### Maintenance +* Bump re2j from 1.1 to 1.6 in /plugins/repository-hdfs ([#3337](https://github.com/opensearch-project/OpenSearch/pull/3337)) +* Bump google-oauth-client from 1.33.1 to 1.33.2 in /plugins/discovery-gce ([#2828](https://github.com/opensearch-project/OpenSearch/pull/2828)) +* Bump protobuf-java-util from 3.19.3 to 3.20.0 in /plugins/repository-gcs ([#2834](https://github.com/opensearch-project/OpenSearch/pull/2834)) +* Bump cdi-api from 1.2 to 2.0 in /qa/wildfly ([#2835](https://github.com/opensearch-project/OpenSearch/pull/2835)) +* Bump azure-core from 1.26.0 to 1.27.0 in /plugins/repository-azure ([#2837](https://github.com/opensearch-project/OpenSearch/pull/2837)) +* Bump asm-analysis from 9.2 to 9.3 in /test/logger-usage ([#2829](https://github.com/opensearch-project/OpenSearch/pull/2829)) +* Bump protobuf-java from 3.19.3 to 3.20.0 in /plugins/repository-hdfs ([#2836](https://github.com/opensearch-project/OpenSearch/pull/2836)) +* Bump joni from 2.1.41 to 2.1.43 in /libs/grok ([#2832](https://github.com/opensearch-project/OpenSearch/pull/2832)) +* Bump geoip2 from 2.16.1 to 3.0.1 in /modules/ingest-geoip ([#2646](https://github.com/opensearch-project/OpenSearch/pull/2646)) +* Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic ([#2614](https://github.com/opensearch-project/OpenSearch/pull/2614)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/repository-gcs ([#2616](https://github.com/opensearch-project/OpenSearch/pull/2616)) +* Bump jboss-annotations-api_1.2_spec in /qa/wildfly ([#2615](https://github.com/opensearch-project/OpenSearch/pull/2615)) +* Bump forbiddenapis in /buildSrc/src/testKit/thirdPartyAudit ([#2611](https://github.com/opensearch-project/OpenSearch/pull/2611)) +* Bump json-schema-validator from 1.0.67 to 1.0.68 in /buildSrc ([#2610](https://github.com/opensearch-project/OpenSearch/pull/2610)) +* Bump htrace-core4 from 4.1.0-incubating to 4.2.0-incubating in /plugins/repository-hdfs ([#2618](https://github.com/opensearch-project/OpenSearch/pull/2618)) +* Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless ([#2617](https://github.com/opensearch-project/OpenSearch/pull/2617)) +* Bump antlr4 from 4.5.3 to 4.9.3 in /modules/lang-painless ([#2537](https://github.com/opensearch-project/OpenSearch/pull/2537)) +* Bump commons-lang3 from 3.7 to 3.12.0 in /plugins/repository-hdfs ([#2552](https://github.com/opensearch-project/OpenSearch/pull/2552)) +* Bump gson from 2.8.9 to 2.9.0 in /plugins/repository-gcs ([#2550](https://github.com/opensearch-project/OpenSearch/pull/2550)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/discovery-gce ([#2524](https://github.com/opensearch-project/OpenSearch/pull/2524)) +* Bump google-cloud-core from 1.93.3 to 2.5.10 in /plugins/repository-gcs ([#2536](https://github.com/opensearch-project/OpenSearch/pull/2536)) +* Bump wiremock-jre8-standalone from 2.23.2 to 2.32.0 in /buildSrc ([#2525](https://github.com/opensearch-project/OpenSearch/pull/2525)) +* Bump com.gradle.enterprise from 3.8.1 to 3.9 ([#2523](https://github.com/opensearch-project/OpenSearch/pull/2523)) +* Bump commons-io from 2.7 to 2.11.0 in /plugins/discovery-azure-classic ([#2527](https://github.com/opensearch-project/OpenSearch/pull/2527)) +* Bump asm-analysis from 7.1 to 9.2 in /test/logger-usage ([#2273](https://github.com/opensearch-project/OpenSearch/pull/2273)) +* Bump asm-commons from 7.2 to 9.2 in /modules/lang-painless ([#2234](https://github.com/opensearch-project/OpenSearch/pull/2234)) +* Bump jna from 5.5.0 to 5.10.0 in /buildSrc ([#2512](https://github.com/opensearch-project/OpenSearch/pull/2512)) +* Bump jsr305 from 1.3.9 to 3.0.2 in /plugins/discovery-gce ([#2137](https://github.com/opensearch-project/OpenSearch/pull/2137)) +* Bump json-schema-validator from 1.0.36 to 1.0.67 in /buildSrc ([#2454](https://github.com/opensearch-project/OpenSearch/pull/2454)) +* Bump woodstox-core from 6.1.1 to 6.2.8 in /plugins/repository-azure ([#2456](https://github.com/opensearch-project/OpenSearch/pull/2456)) +* Bump commons-lang3 from 3.4 to 3.12.0 in /plugins/repository-azure ([#2455](https://github.com/opensearch-project/OpenSearch/pull/2455)) +* Update azure-storage-blob to 12.15.0 ([#2774](https://github.com/opensearch-project/OpenSearch/pull/2774)) +* Move Jackson-databind to 2.13.2 ([#2548](https://github.com/opensearch-project/OpenSearch/pull/2548)) +* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) +* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) +* Sync maintainers with actual permissions. ([#3127](https://github.com/opensearch-project/OpenSearch/pull/3127)) + +### Refactoring +* [Remove] remaining AllFieldMapper references ([#3007](https://github.com/opensearch-project/OpenSearch/pull/3007)) +* Clear up some confusing code in IndexShardHotSpotTests ([#1534](https://github.com/opensearch-project/OpenSearch/pull/1534)) +* [Remove] ShrinkAction, ShardUpgradeRequest, UpgradeSettingsRequestBuilder ([#3169](https://github.com/opensearch-project/OpenSearch/pull/3169)) +* [Rename] ESTestCase stragglers to OpenSearchTestCase ([#3053](https://github.com/opensearch-project/OpenSearch/pull/3053)) +* [Remove] MainResponse version override cluster setting ([#3031](https://github.com/opensearch-project/OpenSearch/pull/3031)) +* [Version] Don't spoof major for 3.0+ clusters ([#2722](https://github.com/opensearch-project/OpenSearch/pull/2722)) +* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API ([#2670](https://github.com/opensearch-project/OpenSearch/pull/2670)) +* Rename reference to project OpenSearch was forked from ([#2483](https://github.com/opensearch-project/OpenSearch/pull/2483)) +* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) +* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) +* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) + +### Tests +* Add type mapping removal bwc tests for indexing, searching, snapshots ([#2901](https://github.com/opensearch-project/OpenSearch/pull/2901)) +* Removing SLM check in tests for OpenSearch versions ([#2604](https://github.com/opensearch-project/OpenSearch/pull/2604)) +* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) +* Use Hamcrest matchers and assertThat() in ReindexRenamedSettingTests ([#2503](https://github.com/opensearch-project/OpenSearch/pull/2503)) +* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) +* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) From 535850210743e5361c50ab5fe8f58abeec76f96a Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Fri, 20 May 2022 16:29:51 -0500 Subject: [PATCH 04/16] [Upgrade] Lucene-9.2.0-snapshot-ba8c3a8 (#3416) Upgrades to latest snapshot of lucene 9.2.0 in preparation for GA release. Signed-off-by: Nicholas Walter Knize --- .../forbidden/opensearch-test-signatures.txt | 1 - buildSrc/version.properties | 2 +- .../opensearch/core/internal/io/IOUtilsTests.java | 11 ++++------- ...ucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...cene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...cene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...alysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...alysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...e-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...e-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...e-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...e-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - ...ne-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + ...ne-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 | 1 + .../lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 | 1 - .../java/org/opensearch/common/lucene/Lucene.java | 2 +- .../org/opensearch/index/codec/CodecService.java | 8 ++++---- .../codec/PerFieldMappingPostingFormatCodec.java | 4 ++-- .../fielddata/ordinals/GlobalOrdinalMapping.java | 4 ++++ .../index/fielddata/ordinals/MultiOrdinals.java | 5 +++++ .../search/aggregations/support/MissingValues.java | 10 ++++++++++ .../java/org/opensearch/index/codec/CodecTests.java | 12 ++++++------ .../index/engine/CompletionStatsCacheTests.java | 4 ++-- .../org/opensearch/search/MultiValueModeTests.java | 5 +++++ .../bucket/range/BinaryRangeAggregatorTests.java | 4 ++++ .../aggregations/support/IncludeExcludeTests.java | 4 ++++ .../aggregations/support/MissingValuesTests.java | 10 ++++++++++ .../indices/analysis/AnalysisFactoryTestCase.java | 1 + 60 files changed, 85 insertions(+), 46 deletions(-) create mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 modules/lang-expression/licenses/lucene-expressions-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-icu/licenses/lucene-analysis-icu-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-kuromoji/licenses/lucene-analysis-kuromoji-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-nori/licenses/lucene-analysis-nori-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-phonetic/licenses/lucene-analysis-phonetic-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-smartcn/licenses/lucene-analysis-smartcn-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-stempel/licenses/lucene-analysis-stempel-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 plugins/analysis-ukrainian/licenses/lucene-analysis-morfologik-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-analysis-common-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-backward-codecs-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-core-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-core-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-grouping-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-highlighter-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-join-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-join-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-memory-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-misc-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-queries-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-queryparser-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-sandbox-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-spatial-extras-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-spatial3d-9.2.0-snapshot-f4f1f70.jar.sha1 create mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-ba8c3a8.jar.sha1 delete mode 100644 server/licenses/lucene-suggest-9.2.0-snapshot-f4f1f70.jar.sha1 diff --git a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt index 03dead38bd8b4..43568b3209baf 100644 --- a/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt +++ b/buildSrc/src/main/resources/forbidden/opensearch-test-signatures.txt @@ -19,7 +19,6 @@ com.carrotsearch.randomizedtesting.annotations.Seed @ Don't commit hardcoded see com.carrotsearch.randomizedtesting.annotations.Repeat @ Don't commit hardcoded repeats org.apache.lucene.codecs.Codec#setDefault(org.apache.lucene.codecs.Codec) @ Use the SuppressCodecs("*") annotation instead -org.apache.lucene.tests.util.LuceneTestCase$Slow @ Don't write slow tests org.junit.Ignore @ Use AwaitsFix instead org.apache.lucene.tests.util.LuceneTestCase$Nightly @ We don't run nightly tests at this point! com.carrotsearch.randomizedtesting.annotations.Nightly @ We don't run nightly tests at this point! diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 121f88dd0aac0..7a8a9531ebda8 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -1,5 +1,5 @@ opensearch = 3.0.0 -lucene = 9.2.0-snapshot-f4f1f70 +lucene = 9.2.0-snapshot-ba8c3a8 bundled_jdk_vendor = adoptium bundled_jdk = 17.0.3+7 diff --git a/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java b/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java index f1c8642b73044..e1f3cb7520a7e 100644 --- a/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java +++ b/libs/core/src/test/java/org/opensearch/core/internal/io/IOUtilsTests.java @@ -40,7 +40,6 @@ import java.io.Closeable; import java.io.IOException; import java.io.OutputStream; -import java.net.URI; import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.nio.file.AccessDeniedException; @@ -172,10 +171,8 @@ public void runTestRm(final boolean exception) throws IOException { for (int i = 0; i < numberOfLocations; i++) { if (exception && randomBoolean()) { final Path location = createTempDir(); - final FileSystem fs = new AccessDeniedWhileDeletingFileSystem(location.getFileSystem()).getFileSystem( - URI.create("file:///") - ); - final Path wrapped = new FilterPath(location, fs); + final FilterFileSystemProvider ffsp = new AccessDeniedWhileDeletingFileSystem(location.getFileSystem()); + final Path wrapped = ffsp.wrapPath(location); locations[i] = wrapped.resolve(randomAlphaOfLength(8)); Files.createDirectory(locations[i]); locationsThrowingException.add(locations[i]); @@ -256,8 +253,8 @@ public FileChannel newFileChannel(final Path path, final Set codecs = MapBuilder.newMapBuilder(); if (mapperService == null) { - codecs.put(DEFAULT_CODEC, new Lucene91Codec()); - codecs.put(BEST_COMPRESSION_CODEC, new Lucene91Codec(Mode.BEST_COMPRESSION)); + codecs.put(DEFAULT_CODEC, new Lucene92Codec()); + codecs.put(BEST_COMPRESSION_CODEC, new Lucene92Codec(Mode.BEST_COMPRESSION)); } else { codecs.put(DEFAULT_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_SPEED, mapperService, logger)); codecs.put(BEST_COMPRESSION_CODEC, new PerFieldMappingPostingFormatCodec(Mode.BEST_COMPRESSION, mapperService, logger)); diff --git a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java index 52e940a25ddd6..fd0c66983208a 100644 --- a/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java +++ b/server/src/main/java/org/opensearch/index/codec/PerFieldMappingPostingFormatCodec.java @@ -36,7 +36,7 @@ import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.DocValuesFormat; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene91.Lucene91Codec; +import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.apache.lucene.codecs.lucene90.Lucene90DocValuesFormat; import org.opensearch.common.lucene.Lucene; import org.opensearch.index.mapper.CompletionFieldMapper; @@ -53,7 +53,7 @@ * * @opensearch.internal */ -public class PerFieldMappingPostingFormatCodec extends Lucene91Codec { +public class PerFieldMappingPostingFormatCodec extends Lucene92Codec { private final Logger logger; private final MapperService mapperService; private final DocValuesFormat dvFormat = new Lucene90DocValuesFormat(); diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java index 8fc6eb1a74056..884e0d66ffd8d 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/GlobalOrdinalMapping.java @@ -112,4 +112,8 @@ public long cost() { return values.cost(); } + @Override + public long docValueCount() { + return values.docValueCount(); + } } diff --git a/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java b/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java index 6131bc33841b6..6e3f83690a872 100644 --- a/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java +++ b/server/src/main/java/org/opensearch/index/fielddata/ordinals/MultiOrdinals.java @@ -229,5 +229,10 @@ public long nextOrd() throws IOException { public BytesRef lookupOrd(long ord) { return values.lookupOrd(ord); } + + @Override + public long docValueCount() { + return currentEndOffset - currentOffset; + } } } diff --git a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java index 6e0778f9a0a2d..179e4f18a1ea1 100644 --- a/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java +++ b/server/src/main/java/org/opensearch/search/aggregations/support/MissingValues.java @@ -310,6 +310,11 @@ public boolean advanceExact(int doc) throws IOException { return true; } + @Override + public long docValueCount() { + return values.docValueCount(); + } + @Override public String toString() { return "anon AbstractSortedDocValues of [" + super.toString() + "]"; @@ -340,6 +345,11 @@ public long getValueCount() { return 1 + values.getValueCount(); } + @Override + public long docValueCount() { + return values.docValueCount(); + } + @Override public long nextOrd() throws IOException { if (hasOrds) { diff --git a/server/src/test/java/org/opensearch/index/codec/CodecTests.java b/server/src/test/java/org/opensearch/index/codec/CodecTests.java index 94b78da402b44..0275066f9af1b 100644 --- a/server/src/test/java/org/opensearch/index/codec/CodecTests.java +++ b/server/src/test/java/org/opensearch/index/codec/CodecTests.java @@ -34,7 +34,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.lucene.codecs.Codec; -import org.apache.lucene.codecs.lucene91.Lucene91Codec; +import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.apache.lucene.codecs.lucene90.Lucene90StoredFieldsFormat; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; @@ -65,21 +65,21 @@ public class CodecTests extends OpenSearchTestCase { public void testResolveDefaultCodecs() throws Exception { CodecService codecService = createCodecService(); assertThat(codecService.codec("default"), instanceOf(PerFieldMappingPostingFormatCodec.class)); - assertThat(codecService.codec("default"), instanceOf(Lucene91Codec.class)); + assertThat(codecService.codec("default"), instanceOf(Lucene92Codec.class)); } public void testDefault() throws Exception { Codec codec = createCodecService().codec("default"); - assertStoredFieldsCompressionEquals(Lucene91Codec.Mode.BEST_SPEED, codec); + assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_SPEED, codec); } public void testBestCompression() throws Exception { Codec codec = createCodecService().codec("best_compression"); - assertStoredFieldsCompressionEquals(Lucene91Codec.Mode.BEST_COMPRESSION, codec); + assertStoredFieldsCompressionEquals(Lucene92Codec.Mode.BEST_COMPRESSION, codec); } // write some docs with it, inspect .si to see this was the used compression - private void assertStoredFieldsCompressionEquals(Lucene91Codec.Mode expected, Codec actual) throws Exception { + private void assertStoredFieldsCompressionEquals(Lucene92Codec.Mode expected, Codec actual) throws Exception { Directory dir = newDirectory(); IndexWriterConfig iwc = newIndexWriterConfig(null); iwc.setCodec(actual); @@ -91,7 +91,7 @@ private void assertStoredFieldsCompressionEquals(Lucene91Codec.Mode expected, Co SegmentReader sr = (SegmentReader) ir.leaves().get(0).reader(); String v = sr.getSegmentInfo().info.getAttribute(Lucene90StoredFieldsFormat.MODE_KEY); assertNotNull(v); - assertEquals(expected, Lucene91Codec.Mode.valueOf(v)); + assertEquals(expected, Lucene92Codec.Mode.valueOf(v)); ir.close(); dir.close(); } diff --git a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java index 66b066b907100..340811352a203 100644 --- a/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java +++ b/server/src/test/java/org/opensearch/index/engine/CompletionStatsCacheTests.java @@ -32,7 +32,7 @@ package org.opensearch.index.engine; import org.apache.lucene.codecs.PostingsFormat; -import org.apache.lucene.codecs.lucene91.Lucene91Codec; +import org.apache.lucene.codecs.lucene92.Lucene92Codec; import org.apache.lucene.document.Document; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; @@ -70,7 +70,7 @@ public void testExceptionsAreNotCached() { public void testCompletionStatsCache() throws IOException, InterruptedException { final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); final PostingsFormat postingsFormat = new Completion90PostingsFormat(); - indexWriterConfig.setCodec(new Lucene91Codec() { + indexWriterConfig.setCodec(new Lucene92Codec() { @Override public PostingsFormat getPostingsFormatForField(String field) { return postingsFormat; // all fields are suggest fields diff --git a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java index bfb4466e53e43..525621c02fd32 100644 --- a/server/src/test/java/org/opensearch/search/MultiValueModeTests.java +++ b/server/src/test/java/org/opensearch/search/MultiValueModeTests.java @@ -763,6 +763,11 @@ public BytesRef lookupOrd(long ord) { public long getValueCount() { return 1 << 20; } + + @Override + public long docValueCount() { + return array[doc].length; + } }; verifySortedSet(multiValues, numDocs); final FixedBitSet rootDocs = randomRootDocs(numDocs); diff --git a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java index 9c2578a2378cc..ea4dc09e6a601 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/bucket/range/BinaryRangeAggregatorTests.java @@ -83,6 +83,10 @@ public long getValueCount() { return terms.length; } + @Override + public long docValueCount() { + return ords.length; + } } private void doTestSortedSetRangeLeafCollector(int maxNumValuesPerDoc) throws Exception { diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java index d0995abd07f32..51f135ec0b56b 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/IncludeExcludeTests.java @@ -98,6 +98,10 @@ public long getValueCount() { return 1; } + @Override + public long docValueCount() { + return 1; + } }; IncludeExclude inexcl = new IncludeExclude(new TreeSet<>(Collections.singleton(new BytesRef("foo"))), null); OrdinalsFilter filter = inexcl.convertToOrdinalsFilter(DocValueFormat.RAW); diff --git a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java index 598c1323fc13f..0eca61d825a2d 100644 --- a/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java +++ b/server/src/test/java/org/opensearch/search/aggregations/support/MissingValuesTests.java @@ -155,6 +155,11 @@ public long nextOrd() { return NO_MORE_ORDS; } } + + @Override + public long docValueCount() { + return ords[doc].length; + } }; final BytesRef existingMissing = RandomPicks.randomFrom(random(), values); @@ -257,6 +262,11 @@ public BytesRef lookupOrd(long ord) throws IOException { return values[Math.toIntExact(ord)]; } + @Override + public long docValueCount() { + throw new UnsupportedOperationException(); + } + @Override public long getValueCount() { return values.length; diff --git a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java index fd762289caddb..27f3312626e48 100644 --- a/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java +++ b/test/framework/src/main/java/org/opensearch/indices/analysis/AnalysisFactoryTestCase.java @@ -221,6 +221,7 @@ public abstract class AnalysisFactoryTestCase extends OpenSearchTestCase { .put("spanishpluralstem", Void.class) // LUCENE-10352 .put("daitchmokotoffsoundex", Void.class) + .put("persianstem", Void.class) .immutableMap(); static final Map> KNOWN_CHARFILTERS = new MapBuilder>() From 4a87906362a03517f3c90b9953126ba835739f50 Mon Sep 17 00:00:00 2001 From: Rabi Panda Date: Fri, 20 May 2022 15:29:56 -0700 Subject: [PATCH 05/16] Fix release notes for 2.0.0-rc1 version (#3418) This change removes some old commits from the 2.0.0-rc1 release notes. These commits were already released as part of 1.x releases. Add back some missing type removal commits to the 2.0.0 release notes Signed-off-by: Rabi Panda --- .../opensearch.release-notes-2.0.0-rc1.md | 640 +++--------------- .../opensearch.release-notes-2.0.0.md | 23 +- 2 files changed, 112 insertions(+), 551 deletions(-) diff --git a/release-notes/opensearch.release-notes-2.0.0-rc1.md b/release-notes/opensearch.release-notes-2.0.0-rc1.md index 5171424203c62..26a721d013bb6 100644 --- a/release-notes/opensearch.release-notes-2.0.0-rc1.md +++ b/release-notes/opensearch.release-notes-2.0.0-rc1.md @@ -45,63 +45,39 @@ #### Upgrades - -* [Upgrade] 1.2 BWC to Lucene 8.10.1 ([#1460](https://github.com/opensearch-project/OpenSearch/pull/1460)) -* [Upgrade] Lucene 9.1 release (#2560) ([#2565](https://github.com/opensearch-project/OpenSearch/pull/2565)) -* [Upgrade] Lucene 9.1.0-snapshot-ea989fe8f30 ([#2487](https://github.com/opensearch-project/OpenSearch/pull/2487)) -* [Upgrade] Lucene 9.0.0 release ([#1109](https://github.com/opensearch-project/OpenSearch/pull/1109)) -* Set target and source compatibility to 11, required by Lucene 9. ([#2407](https://github.com/opensearch-project/OpenSearch/pull/2407)) -* Upgrade to Lucene 8.10.1 ([#1440](https://github.com/opensearch-project/OpenSearch/pull/1440)) -* Upgrade to Lucene 8.9 ([#1080](https://github.com/opensearch-project/OpenSearch/pull/1080)) -* Update lucene version to 8.8.2 ([#557](https://github.com/opensearch-project/OpenSearch/pull/557)) -* Support Gradle 7. Fixing 'eclipse' plugin dependencies ([#1648](https://github.com/opensearch-project/OpenSearch/pull/1648)) -* Update to Gradle 7.3.3 ([#1803](https://github.com/opensearch-project/OpenSearch/pull/1803)) -* Support Gradle 7. More reliable tasks dependencies for Maven plugins publishing ([#1630](https://github.com/opensearch-project/OpenSearch/pull/1630)) -* Support Gradle 7. Fixing publishing to Maven Local for plugins ([#1624](https://github.com/opensearch-project/OpenSearch/pull/1624)) -* Support Gradle 7 ([#1609](https://github.com/opensearch-project/OpenSearch/pull/1609)) +* [Upgrade] Lucene 9.1 release ([#2560](https://github.com/opensearch-project/OpenSearch/pull/2560)) +* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) #### Deprecations - -* [Remove] Deprecated Synced Flush API ([#1761](https://github.com/opensearch-project/OpenSearch/pull/1761)) -* Remove deprecated search.remote settings ([#1870](https://github.com/opensearch-project/OpenSearch/pull/1870)) -* [Remove] Default Mapping ([#2151](https://github.com/opensearch-project/OpenSearch/pull/2151)) -* Remove Deprecated SimpleFS ([#1639](https://github.com/opensearch-project/OpenSearch/pull/1639)) -* [Remove] Deprecated Zen1 Discovery ([#1216](https://github.com/opensearch-project/OpenSearch/pull/1216)) -* Remove LegacyESVersion.V_6_8_x constants ([#1869](https://github.com/opensearch-project/OpenSearch/pull/1869)) -* Remove LegacyESVersion.V_6_7_x constants ([#1807](https://github.com/opensearch-project/OpenSearch/pull/1807)) -* Remove LegacyESVersion.V_6_6_x constants ([#1804](https://github.com/opensearch-project/OpenSearch/pull/1804)) -* Remove LegacyESVersion.V_6_5_x constants ([#1794](https://github.com/opensearch-project/OpenSearch/pull/1794)) -* Remove deprecated transport client ([#1781](https://github.com/opensearch-project/OpenSearch/pull/1781)) -* Remove LegacyVersion.v6.4.x constants ([#1787](https://github.com/opensearch-project/OpenSearch/pull/1787)) -* Remove LegacyESVersion.V_6_3_x constants ([#1691](https://github.com/opensearch-project/OpenSearch/pull/1691)) -* Remove LegacyESVersion.V_6_2_x constants ([#1686](https://github.com/opensearch-project/OpenSearch/pull/1686)) -* Remove LegacyESVersion.V_6_1_x constants ([#1681](https://github.com/opensearch-project/OpenSearch/pull/1681)) -* Remove 6.0.* version constants ([#1658](https://github.com/opensearch-project/OpenSearch/pull/1658)) -* [Remove] 6x skip from yml ([#2153](https://github.com/opensearch-project/OpenSearch/pull/2153)) +* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) +* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) +* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) ### Security Fixes - -* [CVE] Upgrade dependencies for Azure related plugins to mitigate CVEs ([#688](https://github.com/opensearch-project/OpenSearch/pull/688)) -* [CVE] Upgrade dependencies to mitigate CVEs ([#657](https://github.com/opensearch-project/OpenSearch/pull/657)) -* [CVE-2018-11765] Upgrade hadoop dependencies for hdfs plugin ([#654](https://github.com/opensearch-project/OpenSearch/pull/654)) -* [CVE-2020-7692] Upgrade google-oauth clients for goolge cloud plugins ([#662](https://github.com/opensearch-project/OpenSearch/pull/662)) -* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 (#2599) ([#2647](https://github.com/opensearch-project/OpenSearch/pull/2647)) -* Remove old ES libraries used in reindex due to CVEs ([#1359](https://github.com/opensearch-project/OpenSearch/pull/1359)) +* [CVE-2020-36518] Update jackson-databind to 2.13.2.2 ([#2599](https://github.com/opensearch-project/OpenSearch/pull/2599)) ### Features/Enhancements - -* Allowing custom folder name for plugin installation ([#848](https://github.com/opensearch-project/OpenSearch/pull/848)) -* A CLI tool to assist during an upgrade to OpenSearch. ([#846](https://github.com/opensearch-project/OpenSearch/pull/846)) -* Enable adding experimental features through sandbox modules ([#691](https://github.com/opensearch-project/OpenSearch/pull/691)) -* Rank feature - unknown field linear ([#983](https://github.com/opensearch-project/OpenSearch/pull/983)) -* [FEATURE] Add OPENSEARCH_JAVA_HOME env to override JAVA_HOME ([#2001](https://github.com/opensearch-project/OpenSearch/pull/2001)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs (#2682) ([#2891](https://github.com/opensearch-project/OpenSearch/pull/2891)) -* Change deprecation message for API parameter value 'master_node' of parameter 'metric' (#2880) ([#2882](https://github.com/opensearch-project/OpenSearch/pull/2882)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs (#2680) ([#2871](https://github.com/opensearch-project/OpenSearch/pull/2871)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs (#2678) ([#2867](https://github.com/opensearch-project/OpenSearch/pull/2867)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs (#2660) ([#2771](https://github.com/opensearch-project/OpenSearch/pull/2771)) -* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs (#2658) ([#2755](https://github.com/opensearch-project/OpenSearch/pull/2755)) -* [Backport 2.0] Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Remove deprecation warning of using REST API request parameter 'master_timeout' ([#2920](https://github.com/opensearch-project/OpenSearch/pull/2920)) +* Add deprecated API for creating History Ops Snapshot from translog ([#2886](https://github.com/opensearch-project/OpenSearch/pull/2886)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Ingest APIs and Script APIs ([#2682](https://github.com/opensearch-project/OpenSearch/pull/2682)) +* Change deprecation message for API parameter value 'master_node' of parameter 'metric' ([#2880](https://github.com/opensearch-project/OpenSearch/pull/2880)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Snapshot APIs ([#2680](https://github.com/opensearch-project/OpenSearch/pull/2680)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index Template APIs ([#2678](https://github.com/opensearch-project/OpenSearch/pull/2678)) +* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal ([#2863](https://github.com/opensearch-project/OpenSearch/pull/2863)) +* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* Remove endpoint_suffix dependency on account key ([#2485](https://github.com/opensearch-project/OpenSearch/pull/2485)) +* Replace remaining 'blacklist' with 'denylist' in internal class and method names ([#2784](https://github.com/opensearch-project/OpenSearch/pull/2784)) +* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe ([#2641](https://github.com/opensearch-project/OpenSearch/pull/2641)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Index APIs except index template APIs ([#2660](https://github.com/opensearch-project/OpenSearch/pull/2660)) +* Add request parameter 'cluster_manager_timeout' and deprecate 'master_timeout' - in Cluster APIs ([#2658](https://github.com/opensearch-project/OpenSearch/pull/2658)) +* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' ([#2702](https://github.com/opensearch-project/OpenSearch/pull/2702)) +* Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT APIs ([#2717](https://github.com/opensearch-project/OpenSearch/pull/2717)) +* Add mapping method back referenced in other repos ([#2636](https://github.com/opensearch-project/OpenSearch/pull/2636)) +* Replaced "master" terminology in Log message ([#2575](https://github.com/opensearch-project/OpenSearch/pull/2575)) +* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) +* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) +* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) +* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) * Add 'cluster_manager_node' into ClusterState Metric as an alternative to 'master_node' ([#2415](https://github.com/opensearch-project/OpenSearch/pull/2415)) * Add a new node role 'cluster_manager' as the alternative for 'master' role and deprecate 'master' role ([#2424](https://github.com/opensearch-project/OpenSearch/pull/2424)) * Replace 'master' with 'cluster_manager' in 'GET Cat Nodes' API ([#2441](https://github.com/opensearch-project/OpenSearch/pull/2441)) @@ -109,520 +85,84 @@ * Add a field discovered_cluster_manager in get cluster health api ([#2437](https://github.com/opensearch-project/OpenSearch/pull/2437)) * Add request parameter 'cluster_manager_timeout' as the alternative for 'master_timeout', and deprecate 'master_timeout' - in CAT Nodes API ([#2435](https://github.com/opensearch-project/OpenSearch/pull/2435)) * Add a new REST API endpoint 'GET _cat/cluster_manager' as the replacement of 'GET _cat/master' ([#2404](https://github.com/opensearch-project/OpenSearch/pull/2404)) -* Deprecate setting 'cluster.no_master_block' and introduce the alternative setting 'cluster.no_cluster_manager_block' ([#2453](https://github.com/opensearch-project/OpenSearch/pull/2453)) -* Deprecate setting 'cluster.service.slow_master_task_logging_threshold' and introduce the alternative setting 'cluster.service.slow_cluster_manager_task_logging_threshold' ([#2451](https://github.com/opensearch-project/OpenSearch/pull/2451)) -* Deprecate setting 'cluster.initial_master_nodes' and introduce the alternative setting 'cluster.initial_cluster_manager_nodes' ([#2463](https://github.com/opensearch-project/OpenSearch/pull/2463)) -* Replace remaining 'blacklist' with 'denylist' in internal class and method names (#2784) ([#2813](https://github.com/opensearch-project/OpenSearch/pull/2813)) -* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API (#2670) ([#2696](https://github.com/opensearch-project/OpenSearch/pull/2696)) -* Make Rest-High-Rest-Level tests allow deprecation warning temporarily, during deprecation of request parameter 'master_timeout' (#2702) ([#2741](https://github.com/opensearch-project/OpenSearch/pull/2741)) -* Replaced "master" terminology in Log message (#2575) ([#2594](https://github.com/opensearch-project/OpenSearch/pull/2594)) -* Deprecate setting 'reindex.remote.whitelist' and introduce the alternative setting 'reindex.remote.allowlist' ([#2221](https://github.com/opensearch-project/OpenSearch/pull/2221)) -* Replace exclusionary words whitelist and blacklist in the places that won't impact backwards compatibility ([#2178](https://github.com/opensearch-project/OpenSearch/pull/2178)) -* Support for geo_bounding_box queries on geo_shape fields ([#2506](https://github.com/opensearch-project/OpenSearch/pull/2506)) -* Support for geo_distance queries on geo_shape fields ([#2516](https://github.com/opensearch-project/OpenSearch/pull/2516)) -* Add '_name' field support to score functions and provide it back in explanation response ([#2244](https://github.com/opensearch-project/OpenSearch/pull/2244)) -* Add support of SOCKS proxies for S3 repository ([#2160](https://github.com/opensearch-project/OpenSearch/pull/2160)) -* Case Insensitive Support in Regexp Interval ([#2237](https://github.com/opensearch-project/OpenSearch/pull/2237)) -* Support unordered non-overlapping intervals ([#2103](https://github.com/opensearch-project/OpenSearch/pull/2103)) -* Support _first and _last parameter for missing bucket ordering in composite aggregation ([#1942](https://github.com/opensearch-project/OpenSearch/pull/1942)) -* Concurrent Searching (Experimental): modify profiling implementation to support concurrent data collection ([#1673](https://github.com/opensearch-project/OpenSearch/pull/1673)) -* Changes to support retrieval of operations from translog based on specified range ([#1210](https://github.com/opensearch-project/OpenSearch/pull/1210)) -* Support for translog pruning based on retention leases ([#1038](https://github.com/opensearch-project/OpenSearch/pull/1038)) -* Support for bwc tests for plugins ([#1051](https://github.com/opensearch-project/OpenSearch/pull/1051)) -* Part 1: Support for cancel_after_timeinterval parameter in search and msearch request ([#986](https://github.com/opensearch-project/OpenSearch/pull/986)) -* alt bash path support ([#1047](https://github.com/opensearch-project/OpenSearch/pull/1047)) -* Support Data Streams in OpenSearch ([#690](https://github.com/opensearch-project/OpenSearch/pull/690)) -* Support for Heap after GC stats (correction after backport to 1.2.0) ([#1315](https://github.com/opensearch-project/OpenSearch/pull/1315)) -* Support for Heap after GC stats ([#1265](https://github.com/opensearch-project/OpenSearch/pull/1265)) -* Add deprecated API for creating History Ops Snapshot from translog (#2886) ([#2917](https://github.com/opensearch-project/OpenSearch/pull/2917)) -* Introduce QueryPhaseSearcher extension point (SearchPlugin) ([#1931](https://github.com/opensearch-project/OpenSearch/pull/1931)) * Add default for EnginePlugin.getEngineFactory ([#2419](https://github.com/opensearch-project/OpenSearch/pull/2419)) -* Add valuesField in PercentilesAggregationBuilder streamInput constructor ([#2308](https://github.com/opensearch-project/OpenSearch/pull/2308)) -* Reintroduce negative epoch_millis #1991 ([#2232](https://github.com/opensearch-project/OpenSearch/pull/2232)) -* Install plugin command help ([#2193](https://github.com/opensearch-project/OpenSearch/pull/2193)) -* Always use Lucene index in peer recovery ([#2077](https://github.com/opensearch-project/OpenSearch/pull/2077)) -* Add Factory to enable Lucene ConcatenateGraphFilter (#1278) ([#2152](https://github.com/opensearch-project/OpenSearch/pull/2152)) -* Add proxy settings for GCS repository ([#2096](https://github.com/opensearch-project/OpenSearch/pull/2096)) -* Add proxy username and password settings for Azure repository ([#2098](https://github.com/opensearch-project/OpenSearch/pull/2098)) -* Add regexp interval source ([#1917](https://github.com/opensearch-project/OpenSearch/pull/1917)) -* Delay the request size calculation until required by the indexing pressure framework ([#1592](https://github.com/opensearch-project/OpenSearch/pull/1592)) -* Enabling Sort Optimization to make use of Lucene ([#1974](https://github.com/opensearch-project/OpenSearch/pull/1974)) -* Add max_expansions option to wildcard interval ([#1916](https://github.com/opensearch-project/OpenSearch/pull/1916)) -* Prefer adaptive replica selection over awareness attribute based routing ([#1107](https://github.com/opensearch-project/OpenSearch/pull/1107)) -* Prioritize primary shard movement during shard allocation ([#1445](https://github.com/opensearch-project/OpenSearch/pull/1445)) -* Enforce soft deletes ([#1903](https://github.com/opensearch-project/OpenSearch/pull/1903)) -* Make SortBuilders pluggable ([#1856](https://github.com/opensearch-project/OpenSearch/pull/1856)) -* Use try-with-resources with MockLogAppender ([#1595](https://github.com/opensearch-project/OpenSearch/pull/1595)) -* Bridging the gap in network overhead measurement in the profiler ([#1360](https://github.com/opensearch-project/OpenSearch/pull/1360)) -* Adding a cancelled field to tell if a cancellable task is cancelled ([#1732](https://github.com/opensearch-project/OpenSearch/pull/1732)) -* Avoid logging duplicate deprecation warnings multiple times ([#1660](https://github.com/opensearch-project/OpenSearch/pull/1660)) -* Added more detailed logging for SSLHandshakeException ([#1602](https://github.com/opensearch-project/OpenSearch/pull/1602)) -* Rename field_masking_span to span_field_masking ([#1606](https://github.com/opensearch-project/OpenSearch/pull/1606)) -* Giving informative error messages for double slashes in API call URLs ([#1568](https://github.com/opensearch-project/OpenSearch/pull/1568)) -* Renaming slave to replica in filebeat-6.0.template.json file. ([#1569](https://github.com/opensearch-project/OpenSearch/pull/1569)) -* Enable RestHighLevel-Client to set parameter require_alias for bulk index and reindex requests ([#1533](https://github.com/opensearch-project/OpenSearch/pull/1533)) -* Improve leader node-left logging to indicate timeout/coordination state rejection ([#1584](https://github.com/opensearch-project/OpenSearch/pull/1584)) -* Added logic to allow {dot} files on startup ([#1437](https://github.com/opensearch-project/OpenSearch/pull/1437)) -* remove codeQL warning about implicit narrowing conversion in compound assignment ([#1403](https://github.com/opensearch-project/OpenSearch/pull/1403)) -* Make TranslogDeletionPolicy abstract for extension ([#1456](https://github.com/opensearch-project/OpenSearch/pull/1456)) -* Remove deprecated settings and logic for translog pruning by retention lease. ([#1416](https://github.com/opensearch-project/OpenSearch/pull/1416)) -* Adjust CodeCache size to eliminate JVM warnings (and crashes) ([#1426](https://github.com/opensearch-project/OpenSearch/pull/1426)) -* Add extension point for custom TranslogDeletionPolicy in EnginePlugin. ([#1404](https://github.com/opensearch-project/OpenSearch/pull/1404)) -* Update node attribute check to version update (1.2) check for shard indexing pressure serialization. ([#1395](https://github.com/opensearch-project/OpenSearch/pull/1395)) -* Add EngineConfig extensions to EnginePlugin ([#1387](https://github.com/opensearch-project/OpenSearch/pull/1387)) -* Add Shard Level Indexing Pressure ([#1336](https://github.com/opensearch-project/OpenSearch/pull/1336)) -* Making GeneralScriptException an Implementation of OpensearchWrapperException ([#1066](https://github.com/opensearch-project/OpenSearch/pull/1066)) -* Handle shard over allocation during partial zone/rack or independent node failures ([#1149](https://github.com/opensearch-project/OpenSearch/pull/1149)) -* Introduce FS Health HEALTHY threshold to fail stuck node ([#1167](https://github.com/opensearch-project/OpenSearch/pull/1167)) -* Drop mocksocket in favour of custom security manager checks (tests only) ([#1205](https://github.com/opensearch-project/OpenSearch/pull/1205)) -* Improving the Grok circular reference check to prevent stack overflow ([#1079](https://github.com/opensearch-project/OpenSearch/pull/1079)) -* Introduce replaceRoutes() method and 2 new constructors to RestHandler.java ([#947](https://github.com/opensearch-project/OpenSearch/pull/947)) -* Fail fast when BytesRestResponse ctor throws exception ([#923](https://github.com/opensearch-project/OpenSearch/pull/923)) -* Restricting logs permissions ([#966](https://github.com/opensearch-project/OpenSearch/pull/966)) -* Avoid override of routes() in BaseRestHandler to respect the default behavior defined in RestHandler ([#889](https://github.com/opensearch-project/OpenSearch/pull/889)) -* Replacing docs-beta links with /docs ([#957](https://github.com/opensearch-project/OpenSearch/pull/957)) -* Adding broken links checker ([#877](https://github.com/opensearch-project/OpenSearch/pull/877)) -* Pass interceptor to super constructor ([#876](https://github.com/opensearch-project/OpenSearch/pull/876)) -* Add 'tagline' back to MainResponse in server that was removed in PR #427 ([#913](https://github.com/opensearch-project/OpenSearch/pull/913)) -* Remove distribution from main response in compatibility mode ([#898](https://github.com/opensearch-project/OpenSearch/pull/898)) -* Replace metadata keys in OpenSearchException during serialization and deserialization ([#905](https://github.com/opensearch-project/OpenSearch/pull/905)) -* Add cluster setting to spoof version number returned from MainResponse ([#847](https://github.com/opensearch-project/OpenSearch/pull/847)) -* Add URL for lucene snapshots ([#858](https://github.com/opensearch-project/OpenSearch/pull/858)) -* Decouple throttling limits for new and old indices. ([#778](https://github.com/opensearch-project/OpenSearch/pull/778)) -* Verbose plugin not found exception ([#849](https://github.com/opensearch-project/OpenSearch/pull/849)) -* Enable BWC checks ([#796](https://github.com/opensearch-project/OpenSearch/pull/796)) -* Add a method to use fallback setting to set the memory size ([#755](https://github.com/opensearch-project/OpenSearch/pull/755)) -* An allocation constraint mechanism, that de-prioritizes nodes from getting picked for allocation if they breach certain constraints ([#680](https://github.com/opensearch-project/OpenSearch/pull/680)) -* Create group settings with fallback. ([#743](https://github.com/opensearch-project/OpenSearch/pull/743)) -* Add timeout on cat/stats API ([#552](https://github.com/opensearch-project/OpenSearch/pull/552)) -* Make allocation decisions at node level first for pending task optimi… ([#534](https://github.com/opensearch-project/OpenSearch/pull/534)) -* Decouples primaries_recoveries limit from concurrent recoveries limit. ([#546](https://github.com/opensearch-project/OpenSearch/pull/546)) -* Merging javadoc feature branch changes to main ([#715](https://github.com/opensearch-project/OpenSearch/pull/715)) -* Add read_only block argument to opensearch-node unsafe-bootstrap command ([#599](https://github.com/opensearch-project/OpenSearch/pull/599)) -* Catch runtime exceptions to make class loader race conditions easier to debug. ([#608](https://github.com/opensearch-project/OpenSearch/pull/608)) -* Remove URL content from Reindex error response ([#630](https://github.com/opensearch-project/OpenSearch/pull/630)) -* Standardize int, long, double and float Setting constructors. ([#665](https://github.com/opensearch-project/OpenSearch/pull/665)) -* Add Remote Reindex SPI extension ([#547](https://github.com/opensearch-project/OpenSearch/pull/547)) -* Make default number of shards configurable ([#625](https://github.com/opensearch-project/OpenSearch/pull/625)) -* Converted all .asciidoc to .md. ([#658](https://github.com/opensearch-project/OpenSearch/pull/658)) -* Make -Dtests.output=always actually work. ([#648](https://github.com/opensearch-project/OpenSearch/pull/648)) -* Handle inefficiencies while fetching the delayed unassigned shards during cluster health ([#588](https://github.com/opensearch-project/OpenSearch/pull/588)) -* Replace elastic.co with opensearch.org ([#611](https://github.com/opensearch-project/OpenSearch/pull/611)) -* Speedup lang-painless tests ([#605](https://github.com/opensearch-project/OpenSearch/pull/605)) -* Speedup snapshot stale indices delete ([#613](https://github.com/opensearch-project/OpenSearch/pull/613)) -* Speed ups to test suite and precommit tasks. ([#580](https://github.com/opensearch-project/OpenSearch/pull/580)) -* [Versioning] Rebase to OpenSearch version 1.0.0 ([#555](https://github.com/opensearch-project/OpenSearch/pull/555)) -* Prevent setting maxParallelForks=0 on single-cpu machines ([#558](https://github.com/opensearch-project/OpenSearch/pull/558)) -* Use alternate example data in OpenSearch test cases. ([#454](https://github.com/opensearch-project/OpenSearch/pull/454)) ### Bug Fixes - -* Adding a null pointer check to fix index_prefix query (#2879) ([#2903](https://github.com/opensearch-project/OpenSearch/pull/2903)) -* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check (#2779) ([#2794](https://github.com/opensearch-project/OpenSearch/pull/2794)) -* [Bug] Fix InboundDecoder version compat check (#2570) ([#2573](https://github.com/opensearch-project/OpenSearch/pull/2573)) -* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) -* Fixing the --release flag usage for javac (#2343) ([#2352](https://github.com/opensearch-project/OpenSearch/pull/2352)) -* Fix flaky test case - string profiler via global ordinals ([#2226](https://github.com/opensearch-project/OpenSearch/pull/2226)) -* Fixing the indentation in version.yml ([#2163](https://github.com/opensearch-project/OpenSearch/pull/2163)) -* Fixing org.opensearch.monitor.os.OsProbeTests::testLogWarnCpuMessageOnlyOnes when CGroups are not available ([#2101](https://github.com/opensearch-project/OpenSearch/pull/2101)) -* Fix integration tests failure ([#2067](https://github.com/opensearch-project/OpenSearch/pull/2067)) -* Another attempt to fix o.o.transport.netty4.OpenSearchLoggingHandlerIT fails w/ stack overflow ([#2051](https://github.com/opensearch-project/OpenSearch/pull/2051)) -* Fix AssertionError message ([#2044](https://github.com/opensearch-project/OpenSearch/pull/2044)) -* Fix composite aggregation failed test cases introduce by missing_order parameter (#1942) ([#2005](https://github.com/opensearch-project/OpenSearch/pull/2005)) -* Fixing allocation filters to persist existing state on settings update ([#1718](https://github.com/opensearch-project/OpenSearch/pull/1718)) -* Fix more failing tests as a result of renaming ([#457](https://github.com/opensearch-project/OpenSearch/pull/457)) -* Fix failing rest-api-spec tests as part of renaming. ([#451](https://github.com/opensearch-project/OpenSearch/pull/451)) -* Fix multiple failing server tests. ([#453](https://github.com/opensearch-project/OpenSearch/pull/453)) -* [TEST] Fix FsHealthServiceTest by increasing the timeout period before checking the FS health after restoring the FS status ([#1813](https://github.com/opensearch-project/OpenSearch/pull/1813)) -* [BUG] Wait for outstanding requests to complete in LastSuccessfulSett… ([#1939](https://github.com/opensearch-project/OpenSearch/pull/1939)) -* [Bug] Wait for outstanding requests to complete ([#1925](https://github.com/opensearch-project/OpenSearch/pull/1925)) -* [BUG] Serialization bugs can cause node drops ([#1885](https://github.com/opensearch-project/OpenSearch/pull/1885)) -* [BUG] Docker distribution builds are failing. Switching to http://vault.centos.org ([#2024](https://github.com/opensearch-project/OpenSearch/pull/2024)) -* [BUG] SymbolicLinkPreservingUntarTransform fails on Windows ([#1433](https://github.com/opensearch-project/OpenSearch/pull/1433)) -* [BUG] ConcurrentSnapshotsIT#testAssertMultipleSnapshotsAndPrimaryFailOver fails intermittently ([#1311](https://github.com/opensearch-project/OpenSearch/pull/1311)) -* [Bug] Fix InstallPluginCommand to use proper key signatures ([#1233](https://github.com/opensearch-project/OpenSearch/pull/1233)) -* [Bug] Fix mixed cluster support for OpenSearch 2+ ([#1191](https://github.com/opensearch-project/OpenSearch/pull/1191)) -* [BUG] Fix cat.health test failures in pre 1.0.0 mixed cluster test ([#928](https://github.com/opensearch-project/OpenSearch/pull/928)) -* [BUG] Fix versioning issues discovered through version bump ([#884](https://github.com/opensearch-project/OpenSearch/pull/884)) -* [BUG] fix MainResponse to spoof version number for legacy clients ([#708](https://github.com/opensearch-project/OpenSearch/pull/708)) -* [Bug] Fix gradle build on Windows failing from a recent change ([#758](https://github.com/opensearch-project/OpenSearch/pull/758)) -* Apply fix for health API response to distinguish no master ([#656](https://github.com/opensearch-project/OpenSearch/pull/656)) -* Rename translog pruning setting to CCR specific setting and addressed Bug in the test case ([#1243](https://github.com/opensearch-project/OpenSearch/pull/1243)) -* fix gradle check fail due to renameing -min in #1094 ([#1289](https://github.com/opensearch-project/OpenSearch/pull/1289)) -* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues (#3048) ([#3050](https://github.com/opensearch-project/OpenSearch/pull/3050)) -* [Backport] [2.0] Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) -* [Bug] Change 1.0.0 version check in PluginInfo -* TEST BUG: MergeSchedulerSettingsTests fails always on small machines ([#559](https://github.com/opensearch-project/OpenSearch/pull/559)) -* Fix bwcVersions after bumping version 1.3.1 ([#2532](https://github.com/opensearch-project/OpenSearch/pull/2532)) -* Fixing bwcVersions and bwc builds (#2430) - adding 1.4.0 into main bwcVersions -* Fixing invalid Java code example in JavaDoc ([#2008](https://github.com/opensearch-project/OpenSearch/pull/2008)) -* Fixing org.opensearch.common.network.InetAddressesTests.testForStringIPv6WithScopeIdInput ([#1913](https://github.com/opensearch-project/OpenSearch/pull/1913)) -* Fix o.o.transport.netty4.OpenSearchLoggingHandlerIT stack overflow test failure ([#1900](https://github.com/opensearch-project/OpenSearch/pull/1900)) -* Fix verifyVersions gradle task and cleanup bwcVersions ([#1878](https://github.com/opensearch-project/OpenSearch/pull/1878)) -* Attempt to fix :test:fixtures:s3-fixture:composeUp fails due to HTTP connection issue ([#1866](https://github.com/opensearch-project/OpenSearch/pull/1866)) -* Fixing build failures after Flavor Serialization backport ([#1867](https://github.com/opensearch-project/OpenSearch/pull/1867)) -* Fixing auto backport workflow ([#1845](https://github.com/opensearch-project/OpenSearch/pull/1845)) -* Upgrade and fix link checker to 1.2. ([#1811](https://github.com/opensearch-project/OpenSearch/pull/1811)) -* link checker fix - only run on opensearch-project/OpenSearch ([#1719](https://github.com/opensearch-project/OpenSearch/pull/1719)) -* Fixing .gitattributes for binary content, removing *.class files ([#1717](https://github.com/opensearch-project/OpenSearch/pull/1717)) -* Fix unit test testFailsHealthOnHungIOBeyondHealthyTimeout() by incresing the max waiting time before assertion ([#1692](https://github.com/opensearch-project/OpenSearch/pull/1692)) -* Fixing bwc test for repository-multi-version ([#1441](https://github.com/opensearch-project/OpenSearch/pull/1441)) -* Fixing support for a multi-node cluster via "gradle run" ([#1455](https://github.com/opensearch-project/OpenSearch/pull/1455)) -* Fix windows build (mostly) ([#1412](https://github.com/opensearch-project/OpenSearch/pull/1412)) -* Fixing post merge 3rd party audit issues ([#1384](https://github.com/opensearch-project/OpenSearch/pull/1384)) -* Minor fix for the flaky test to reduce concurrency (#1361) ([#1364](https://github.com/opensearch-project/OpenSearch/pull/1364)) -* Fixing org.opensearch.repositories.azure.AzureBlobContainerRetriesTests and org.opensearch.action.admin.cluster.node.stats.NodeStatsTests ([#1390](https://github.com/opensearch-project/OpenSearch/pull/1390)) -* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) -* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) -* fixed broken anchor link. ([#436](https://github.com/opensearch-project/OpenSearch/pull/436)) -* [Rename] fix painless test ([#446](https://github.com/opensearch-project/OpenSearch/pull/446)) -* Fix name of the log appender. ([#445](https://github.com/opensearch-project/OpenSearch/pull/445)) -* [Rename] Fixing lingering rename and ./gradlew run will start ([#443](https://github.com/opensearch-project/OpenSearch/pull/443)) -* Fixed copyright to OpenSearch ([#1175](https://github.com/opensearch-project/OpenSearch/pull/1175)) -* Fix defects in code-coverage.gralde to generate code coverage report properly ([#1214](https://github.com/opensearch-project/OpenSearch/pull/1214)) -* Fix failure in SearchCancellationIT.testMSearchChildReqCancellationWithHybridTimeout ([#1103](https://github.com/opensearch-project/OpenSearch/pull/1103)) -* Fix Snapshot pattern in DistributionDownloader. ([#916](https://github.com/opensearch-project/OpenSearch/pull/916)) -* Fix stragglers from renaming to OpenSearch work. ([#483](https://github.com/opensearch-project/OpenSearch/pull/483)) -* Fix rename issues and failing repository-hdfs tests. ([#518](https://github.com/opensearch-project/OpenSearch/pull/518)) -* Fix build-tools integ test failures. ([#465](https://github.com/opensearch-project/OpenSearch/pull/465)) -* Fix a few more renaming issues. ([#464](https://github.com/opensearch-project/OpenSearch/pull/464)) -* Fix org.opensearch.index.reindex.ReindexRestClientSslTests#testClientSucceedsWithCertificateAuthorities - javax.net.ssl.SSLPeerUnverifiedException ([#1212](https://github.com/opensearch-project/OpenSearch/pull/1212)) -* Fix opensearch-env always sources the environment from hardcoded file ([#875](https://github.com/opensearch-project/OpenSearch/pull/875)) -* Fix resource leak issues suggested by Amazon CodeGuru ([#816](https://github.com/opensearch-project/OpenSearch/pull/816)) -* Fix arm architecture translation issue ([#809](https://github.com/opensearch-project/OpenSearch/pull/809)) -* Fix Javadoc errors in `client/sniffer` ([#802](https://github.com/opensearch-project/OpenSearch/pull/802)) -* [BWC] fix mixedCluster and rolling upgrades ([#775](https://github.com/opensearch-project/OpenSearch/pull/775)) -* Fix #649: Properly escape @ in JavaDoc. ([#651](https://github.com/opensearch-project/OpenSearch/pull/651)) -* Fix snapshot deletion task getting stuck in the event of exceptions ([#629](https://github.com/opensearch-project/OpenSearch/pull/629)) -* Fix failing test caused by versioning change. ([#598](https://github.com/opensearch-project/OpenSearch/pull/598)) -* Use the correct domain to fix failing integration tests. ([#519](https://github.com/opensearch-project/OpenSearch/pull/519)) -* Change OpenSearch Version to OpenSearch version to fix failed test case org.opensearch.plugins.ListPluginsCommandTests.testPluginWithNativeController ([#460](https://github.com/opensearch-project/OpenSearch/pull/460)) -* [Rename] Fix env variables and old es maven repo ([#439](https://github.com/opensearch-project/OpenSearch/pull/439)) +* Added explicit 'null' check for response listener to prevent obscure NullPointerException issues ([#3048](https://github.com/opensearch-project/OpenSearch/pull/3048)) +* Adding a null pointer check to fix index_prefix query ([#2879](https://github.com/opensearch-project/OpenSearch/pull/2879)) +* Bugfix to guard against stack overflow errors caused by very large reg-ex input ([#2816](https://github.com/opensearch-project/OpenSearch/pull/2816)) +* Fix InboundDecoder version compat check ([#2570](https://github.com/opensearch-project/OpenSearch/pull/2570)) * ignore_malformed parameter on ip_range data_type throws mapper_parsing_exception ([#2429](https://github.com/opensearch-project/OpenSearch/pull/2429)) * Discrepancy in result from _validate/query API and actual query validity ([#2416](https://github.com/opensearch-project/OpenSearch/pull/2416)) -* MapperService has to be passed in as null for EnginePlugins CodecService constructor ([#2177](https://github.com/opensearch-project/OpenSearch/pull/2177)) -* Adding shards per node constraint for predictability to testClusterGr… ([#2110](https://github.com/opensearch-project/OpenSearch/pull/2110)) -* Mapping update for “date_range” field type is not idempotent ([#2094](https://github.com/opensearch-project/OpenSearch/pull/2094)) -* Use Version.compareMajor instead of using equals operator ([#1876](https://github.com/opensearch-project/OpenSearch/pull/1876)) -* Execution failed for task ':test:fixtures:azure/s3/hdfs/gcs-fixture:composeDown' ([#1824](https://github.com/opensearch-project/OpenSearch/pull/1824)) -* RestIntegTestTask fails because of missed log4j-core dependency ([#1815](https://github.com/opensearch-project/OpenSearch/pull/1815)) -* Start MockLogAppender before adding to static context ([#1587](https://github.com/opensearch-project/OpenSearch/pull/1587)) -* Use a non-default port for upgrade-cli unit tests ([#1512](https://github.com/opensearch-project/OpenSearch/pull/1512)) -* Close first engine instance before creating second ([#1457](https://github.com/opensearch-project/OpenSearch/pull/1457)) -* Avoid crashing on using the index.lifecycle.name in the API body ([#1060](https://github.com/opensearch-project/OpenSearch/pull/1060)) -* Max scroll limit breach to throw a OpenSearchRejectedExecutionException ([#1054](https://github.com/opensearch-project/OpenSearch/pull/1054)) -* Extract excludes into a file, fix the link checker by adding http://site.icu-project.org/. ([#1189](https://github.com/opensearch-project/OpenSearch/pull/1189)) -* Prevent /_cat/master from getting tripped by the CB ([#1036](https://github.com/opensearch-project/OpenSearch/pull/1036)) -* Excluding missed broken links from link checker ([#1010](https://github.com/opensearch-project/OpenSearch/pull/1010)) -* Excluding links from link checker ([#995](https://github.com/opensearch-project/OpenSearch/pull/995)) -* Version checks are incorrectly returning versions < 1.0.0. ([#797](https://github.com/opensearch-project/OpenSearch/pull/797)) -* Make `:server:check` pass successfully ([#471](https://github.com/opensearch-project/OpenSearch/pull/471)) -* Correct the regex pattern for class path in testDieWithDignity() ([#466](https://github.com/opensearch-project/OpenSearch/pull/466)) -* Change ESLoggingHandler to OpenSearchLoggingHandler to pass failing test case org.opensearch.transport.netty4.OpenSearchLoggingHandlerIT.testLoggingHandler due to renaming ([#461](https://github.com/opensearch-project/OpenSearch/pull/461)) - -### Infrastructure - -* Using Github App token to trigger CI for version increment PRs ([#2157](https://github.com/opensearch-project/OpenSearch/pull/2157)) -* Using Github App to trigger CI for auto-backport ([#2071](https://github.com/opensearch-project/OpenSearch/pull/2071)) -* Remove precommit and wrapper validation workflows for gradle as we migrate it to internal CI tools ([#452](https://github.com/opensearch-project/OpenSearch/pull/452)) -* Updated the url for docker distribution ([#2325](https://github.com/opensearch-project/OpenSearch/pull/2325)) -* Recommend Docker 3.6.0. ([#1427](https://github.com/opensearch-project/OpenSearch/pull/1427)) -* docker build: use OSS `log4j2.properties` ([#878](https://github.com/opensearch-project/OpenSearch/pull/878)) -* [DOCKER] add apt update to test fixture krb5kdc ([#565](https://github.com/opensearch-project/OpenSearch/pull/565)) -* Cleanup `default` flavor stragglers from docker distributions. ([#481](https://github.com/opensearch-project/OpenSearch/pull/481)) -* Replace blacklist in Gradle build environment configuration (#2752) ([#2781](https://github.com/opensearch-project/OpenSearch/pull/2781)) -* Add 1.3.2 to main causing gradle check failures (#2679) ([#2684](https://github.com/opensearch-project/OpenSearch/pull/2684)) -* Added jenkinsfile to run gradle check in OpenSearch (#2166) ([#2629](https://github.com/opensearch-project/OpenSearch/pull/2629)) -* Gradle check retry (#2638) ([#2661](https://github.com/opensearch-project/OpenSearch/pull/2661)) -* Move Gradle wrapper and precommit checks into OpenSearch repo. ([#1664](https://github.com/opensearch-project/OpenSearch/pull/1664)) -* Enabling missingJavadoc validation in gradle check ([#721](https://github.com/opensearch-project/OpenSearch/pull/721)) -* Removing Jenkinsfile (not used), replaced by opensearch-build/jenkins/opensearch/Jenkinsfile ([#1408](https://github.com/opensearch-project/OpenSearch/pull/1408)) -* Changed JAVA_HOME to jdk-17 (#2656) ([#2671](https://github.com/opensearch-project/OpenSearch/pull/2671)) -* Adding support for JDK17 and removing JDK8 ([#2025](https://github.com/opensearch-project/OpenSearch/pull/2025)) -* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) -* Better JDK-18 EA (and beyond) support of SecurityManager ([#1750](https://github.com/opensearch-project/OpenSearch/pull/1750)) -* Support JDK 18 EA builds ([#1710](https://github.com/opensearch-project/OpenSearch/pull/1710)) -* Adding 1.2.2 ([#1731](https://github.com/opensearch-project/OpenSearch/pull/1731)) -* Add version 1.2.1. ([#1701](https://github.com/opensearch-project/OpenSearch/pull/1701)) -* Add version 1.2.3. ([#1760](https://github.com/opensearch-project/OpenSearch/pull/1760)) -* Modernize and consolidate JDKs usage across all stages of the build. Use JDK-17 as bundled JDK distribution to run tests ([#1358](https://github.com/opensearch-project/OpenSearch/pull/1358)) -* Fix build-tools/reaper source/target compatibility to be JDK-11 (#2596) ([#2606](https://github.com/opensearch-project/OpenSearch/pull/2606)) -* Add darwin-arm64-tar and no-jdk-darwin-arm64-tar archive distributions. ([#1668](https://github.com/opensearch-project/OpenSearch/pull/1668)) -* Remove Github DCO action since DCO runs via Github App now ([#2317](https://github.com/opensearch-project/OpenSearch/pull/2317)) -* Adding Github action for auto backport PR creation ([#1600](https://github.com/opensearch-project/OpenSearch/pull/1600)) -* Add a whitesource unified agent file and update the config ([#1540](https://github.com/opensearch-project/OpenSearch/pull/1540)) -* Run link checker GitHub action on schedule. ([#1221](https://github.com/opensearch-project/OpenSearch/pull/1221)) -* Clarify opensearch.version to not include -SNAPSHOT. ([#1186](https://github.com/opensearch-project/OpenSearch/pull/1186)) -* Move pr template to .github as default since folder design required manually added to url ([#458](https://github.com/opensearch-project/OpenSearch/pull/458)) -* changed label from low hanging fruit to help wanted. added link to filter for that label. Added link to forum ([#435](https://github.com/opensearch-project/OpenSearch/pull/435)) -* adding in untriaged label to features ([#1419](https://github.com/opensearch-project/OpenSearch/pull/1419)) -* Run spotless and exclude checkstyle on plugins module ([#1417](https://github.com/opensearch-project/OpenSearch/pull/1417)) -* Adding spotless support for subprojects under :test ([#1464](https://github.com/opensearch-project/OpenSearch/pull/1464)) -* Run spotless and exclude checkstyle on rest-api-spec module ([#1462](https://github.com/opensearch-project/OpenSearch/pull/1462)) -* Run spotless and exclude checkstyle on modules module ([#1442](https://github.com/opensearch-project/OpenSearch/pull/1442)) -* Enabling spotless, disabling checkstyle check on plugins ([#1488](https://github.com/opensearch-project/OpenSearch/pull/1488)) -* Cleanup for Checkstyle ([#1370](https://github.com/opensearch-project/OpenSearch/pull/1370)) -* Run spotless and exclude checkstyle on libs module ([#1428](https://github.com/opensearch-project/OpenSearch/pull/1428)) -* Run spotless and exclude checkstyle on client module ([#1392](https://github.com/opensearch-project/OpenSearch/pull/1392)) -* Run spotless and exclude checkstyle on server module ([#1380](https://github.com/opensearch-project/OpenSearch/pull/1380)) -* Change whitesource integration to scan on 1.x branch ([#1786](https://github.com/opensearch-project/OpenSearch/pull/1786)) -* Add .whitesource configuration file ([#1525](https://github.com/opensearch-project/OpenSearch/pull/1525)) -* add codeowners file ([#1530](https://github.com/opensearch-project/OpenSearch/pull/1530)) -* Updated links for linkchecker ([#1539](https://github.com/opensearch-project/OpenSearch/pull/1539)) -* Updating dependabot open pr limits ([#1875](https://github.com/opensearch-project/OpenSearch/pull/1875)) -* Updating .gitattributes for additional file types ([#1727](https://github.com/opensearch-project/OpenSearch/pull/1727)) -* Updating the Ivy repository to point to real url for Releases ([#602](https://github.com/opensearch-project/OpenSearch/pull/602)) -* build: introduce support for reproducible builds ([#1995](https://github.com/opensearch-project/OpenSearch/pull/1995)) -* Add support to generate code coverage report with JaCoCo ([#971](https://github.com/opensearch-project/OpenSearch/pull/971)) -* Support running elasticsearch-oss distribution in test cluster for BWC ([#764](https://github.com/opensearch-project/OpenSearch/pull/764)) -* FreeBSD Java support ([#1014](https://github.com/opensearch-project/OpenSearch/pull/1014)) +### Build & Infrastructure +* Gradle custom java zippublish plugin ([#2988](https://github.com/opensearch-project/OpenSearch/pull/2988)) +* Use G1GC on JDK11+ ([#2964](https://github.com/opensearch-project/OpenSearch/pull/2964)) +* Removed java11 source folders since JDK-11 is the baseline now ([#2898](https://github.com/opensearch-project/OpenSearch/pull/2898)) +* Changed JAVA_HOME to jdk-17 ([#2656](https://github.com/opensearch-project/OpenSearch/pull/2656)) +* Fix build-tools/reaper source/target compatibility to be JDK-11 ([#2596](https://github.com/opensearch-project/OpenSearch/pull/2596)) +* Adding workflow to create documentation related issues in documentation-website repo ([#2929](https://github.com/opensearch-project/OpenSearch/pull/2929)) +* Fix issue that deprecated setting 'cluster.initial_master_nodes' is not identified in node bootstrap check ([#2779](https://github.com/opensearch-project/OpenSearch/pull/2779)) +* Replace blacklist in Gradle build environment configuration ([#2752](https://github.com/opensearch-project/OpenSearch/pull/2752)) +* Update ThirdPartyAuditTask to check for and list pointless exclusions. ([#2760](https://github.com/opensearch-project/OpenSearch/pull/2760)) +* Add Shadow jar publication to lang-painless module. ([#2681](https://github.com/opensearch-project/OpenSearch/pull/2681)) +* Add 1.3.2 to main causing gradle check failures ([#2679](https://github.com/opensearch-project/OpenSearch/pull/2679)) +* Added jenkinsfile to run gradle check in OpenSearch ([#2166](https://github.com/opensearch-project/OpenSearch/pull/2166)) +* Gradle check retry ([#2638](https://github.com/opensearch-project/OpenSearch/pull/2638)) * Override Default Distribution Download Url with Custom Distribution Url when it is passed from Plugin ([#2420](https://github.com/opensearch-project/OpenSearch/pull/2420)) -* Restore Java 8 compatibility for build tools. (#2300) ([#2321](https://github.com/opensearch-project/OpenSearch/pull/2321)) -* Revert "Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url" ([#2256](https://github.com/opensearch-project/OpenSearch/pull/2256)) -* Override Default Distribution Download Url with Custom Distribution Url When User Passes a Url ([#2086](https://github.com/opensearch-project/OpenSearch/pull/2086)) -* added config file to git issue template directory to disable blank issue creation ([#2158](https://github.com/opensearch-project/OpenSearch/pull/2158)) -* Add JetBrains Gateway setup details ([#1944](https://github.com/opensearch-project/OpenSearch/pull/1944)) -* Adding workflow to auto delete backport merged branches from backport workflow ([#2050](https://github.com/opensearch-project/OpenSearch/pull/2050)) -* Add IssueNavigationLink ([#1964](https://github.com/opensearch-project/OpenSearch/pull/1964)) -* Using pull_request_target in place of pull_request ([#1952](https://github.com/opensearch-project/OpenSearch/pull/1952)) -* Using custom branch name for auto backporting PRs ([#1862](https://github.com/opensearch-project/OpenSearch/pull/1862)) -* Added help to build distributions in docs ([#1898](https://github.com/opensearch-project/OpenSearch/pull/1898)) -* Auto-increment next development iteration. ([#1816](https://github.com/opensearch-project/OpenSearch/pull/1816)) -* Catching Maintainers up for Q4 2021 new additions/removals ([#1841](https://github.com/opensearch-project/OpenSearch/pull/1841)) -* Added .gitattributes to manage end-of-line checks for Windows/*nix systems ([#1638](https://github.com/opensearch-project/OpenSearch/pull/1638)) -* Add staged version 1.1.1 ([#1506](https://github.com/opensearch-project/OpenSearch/pull/1506)) -* [BWC] Diable BWC tests until branch versions are synced ([#1508](https://github.com/opensearch-project/OpenSearch/pull/1508)) -* Moving DCO to workflows ([#1458](https://github.com/opensearch-project/OpenSearch/pull/1458)) -* changed work-in-progress language ([#1275](https://github.com/opensearch-project/OpenSearch/pull/1275)) -* Removed beta from new issues. ([#1071](https://github.com/opensearch-project/OpenSearch/pull/1071)) -* Include sources and javadoc artifacts while publishing to a Maven repository ([#1049](https://github.com/opensearch-project/OpenSearch/pull/1049)) -* Replaced custom built JNA by official JNA distribution. ([#1003](https://github.com/opensearch-project/OpenSearch/pull/1003)) -* [Version] Don't spoof major for 3.0+ clusters (#2722) ([#2749](https://github.com/opensearch-project/OpenSearch/pull/2749)) -* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) -* Add Version.V_1_2_5 constant -* add 1.2.5 to bwcVersions -* [Deprecate] Setting explicit version on analysis component ([#1978](https://github.com/opensearch-project/OpenSearch/pull/1978)) -* [Deprecate] index.merge.policy.max_merge_at_once_explicit ([#1981](https://github.com/opensearch-project/OpenSearch/pull/1981)) -* [plugin] repository-azure: add configuration settings for connect/write/response/read timeouts ([#1789](https://github.com/opensearch-project/OpenSearch/pull/1789)) -* [plugin] repository-azure is not working properly hangs on basic operations (#1740) ([#1749](https://github.com/opensearch-project/OpenSearch/pull/1749)) -* [main] Add staged version 1.3.0 for bwc ([#1510](https://github.com/opensearch-project/OpenSearch/pull/1510)) -* [repository-azure] plugin should use Azure Storage SDK v12 for Java ([#1302](https://github.com/opensearch-project/OpenSearch/pull/1302)) -* Allow building on FreeBSD ([#1091](https://github.com/opensearch-project/OpenSearch/pull/1091)) -* initial commit to add in a dependabot.yml file ([#1353](https://github.com/opensearch-project/OpenSearch/pull/1353)) -* Rename artifact produced by the build to include -min ([#1251](https://github.com/opensearch-project/OpenSearch/pull/1251)) -* [Version] Add 1.2 for BWC testing ([#1241](https://github.com/opensearch-project/OpenSearch/pull/1241)) -* Exclude failing links from plugins/modules ([#1223](https://github.com/opensearch-project/OpenSearch/pull/1223)) -* Kept the original constructor for PluginInfo to maintain bwc ([#1206](https://github.com/opensearch-project/OpenSearch/pull/1206)) -* [Version] Increment main to 2.0 ([#1192](https://github.com/opensearch-project/OpenSearch/pull/1192)) -* Added all icu-project.org websites to the link checker exclusions. ([#1201](https://github.com/opensearch-project/OpenSearch/pull/1201)) -* Add 1.0.1 revision ([#1152](https://github.com/opensearch-project/OpenSearch/pull/1152)) -* distribution/packages: Fix filename format for deb archives ([#621](https://github.com/opensearch-project/OpenSearch/pull/621)) -* [Versioning] Fix Version.fromString logic for legacy version ([#604](https://github.com/opensearch-project/OpenSearch/pull/604)) -* Rename the distribution used in test clusters. ([#603](https://github.com/opensearch-project/OpenSearch/pull/603)) -* clean up rpm artifact naming ([#590](https://github.com/opensearch-project/OpenSearch/pull/590)) -* changed to point to open issues rather than the project board -* Update Plugin Signing Key ([#512](https://github.com/opensearch-project/OpenSearch/pull/512)) -* Use OpenSearch artifacts URL for official plugin installation. ([#490](https://github.com/opensearch-project/OpenSearch/pull/490)) -* Perform more renaming to OpenSearch. ([#470](https://github.com/opensearch-project/OpenSearch/pull/470)) -* Adding instructions on License and DCO practices to PR template ([#462](https://github.com/opensearch-project/OpenSearch/pull/462)) -* Remove lingering instances of Default distribution in favour of Oss ([#440](https://github.com/opensearch-project/OpenSearch/pull/440)) -* Validation for official plugins for upgrade tool ([#973](https://github.com/opensearch-project/OpenSearch/pull/973)) -* Lower build requirement from Java 14+ to Java 11+ ([#940](https://github.com/opensearch-project/OpenSearch/pull/940)) -* Add Snapshot maven repository ([#829](https://github.com/opensearch-project/OpenSearch/pull/829)) -* distribution/packages: Fix RPM architecture name for 64-bit x86 ([#620](https://github.com/opensearch-project/OpenSearch/pull/620)) -* Update issue template with multiple labels ([#668](https://github.com/opensearch-project/OpenSearch/pull/668)) -* Renaming CPU architecture to have consistent naming ([#612](https://github.com/opensearch-project/OpenSearch/pull/612)) - -### Documentation - -* Adding workflow to create documentation related issues in documentation-website repo (#2929) ([#2976](https://github.com/opensearch-project/OpenSearch/pull/2976)) -* Updating auto backport documentation ([#1620](https://github.com/opensearch-project/OpenSearch/pull/1620)) -* Updating README and CONTRIBUTING guide to get ready for beta1 release. ([#672](https://github.com/opensearch-project/OpenSearch/pull/672)) -* Update instructions on debugging OpenSearch. ([#689](https://github.com/opensearch-project/OpenSearch/pull/689)) -* Fixing typo in TESTING.md ([#1849](https://github.com/opensearch-project/OpenSearch/pull/1849)) -* Fix JavaDoc typo in XContentBuilder ([#1739](https://github.com/opensearch-project/OpenSearch/pull/1739)) -* Update Readme ([#433](https://github.com/opensearch-project/OpenSearch/pull/433)) -* Fix DCO CLI example in CONTRIBUTING.md ([#576](https://github.com/opensearch-project/OpenSearch/pull/576)) -* Change comment to point to DEVELOPER_GUIDE.md ([#1415](https://github.com/opensearch-project/OpenSearch/pull/1415)) -* [typos] typos in DEVELOPER_GUIDE.md ([#1381](https://github.com/opensearch-project/OpenSearch/pull/1381)) -* Adding Security Reporting Instructions in README.md file Signed-off-by: Rishikesh Reddy Pasham rishireddy1159@gmail.com ([#1326](https://github.com/opensearch-project/OpenSearch/pull/1326)) -* Add guide for generating code coverage report in TESTING.md ([#1264](https://github.com/opensearch-project/OpenSearch/pull/1264)) -* Added Eclipse import instructions to DEVELOPER_GUIDE.md ([#1215](https://github.com/opensearch-project/OpenSearch/pull/1215)) -* Update/maintainers.md ([#723](https://github.com/opensearch-project/OpenSearch/pull/723)) -* Added a link to the maintainer file in contribution guides ([#589](https://github.com/opensearch-project/OpenSearch/pull/589)) -* Updated READMEs on releasing, maintaining, admins and security. ([#853](https://github.com/opensearch-project/OpenSearch/pull/853)) -* adding components to DEVELOPER_GUIDE ([#1200](https://github.com/opensearch-project/OpenSearch/pull/1200)) -* Update developer guide reference to download JDK 14 ([#1452](https://github.com/opensearch-project/OpenSearch/pull/1452)) -* [WIP] Developer guide updates ([#595](https://github.com/opensearch-project/OpenSearch/pull/595)) -* Update README with getting started ([#549](https://github.com/opensearch-project/OpenSearch/pull/549)) -* Update Developers Guide. ([#522](https://github.com/opensearch-project/OpenSearch/pull/522)) -* Update LICENSE.txt -* [License] Add SPDX and OpenSearch Modification license header ([#509](https://github.com/opensearch-project/OpenSearch/pull/509)) -* [License] Update SPDX License Header ([#510](https://github.com/opensearch-project/OpenSearch/pull/510)) -* Cleanup TESTING and DEVELOPER_GUIDE markdowns ([#946](https://github.com/opensearch-project/OpenSearch/pull/946)) -* Add 1.3.0 release notes in main ([#2489](https://github.com/opensearch-project/OpenSearch/pull/2489)) -* Add release notes for 1.2.4 ([#1934](https://github.com/opensearch-project/OpenSearch/pull/1934)) -* Added release notes for 1.2.3. ([#1791](https://github.com/opensearch-project/OpenSearch/pull/1791)) -* Adding release notes for 1.2.2 ([#1730](https://github.com/opensearch-project/OpenSearch/pull/1730)) -* Adding release notes for 1.2.1 ([#1725](https://github.com/opensearch-project/OpenSearch/pull/1725)) -* Add 1.2 release notes and correct 1.1 release notes. ([#1581](https://github.com/opensearch-project/OpenSearch/pull/1581)) -* Generate release notes for 1.1 ([#1230](https://github.com/opensearch-project/OpenSearch/pull/1230)) -* Update release note for GA 1.0 with new commits and removes #547 ([#953](https://github.com/opensearch-project/OpenSearch/pull/953)) -* Adding release notes for 1.0.0 ([#885](https://github.com/opensearch-project/OpenSearch/pull/885)) -* Adding release notes for 1.0.0-rc1 ([#794](https://github.com/opensearch-project/OpenSearch/pull/794)) -* Modified TESTING instructions to clarify use of testing classes ([#1930](https://github.com/opensearch-project/OpenSearch/pull/1930)) -* Clarify JDK requirement in the developer guide ([#1153](https://github.com/opensearch-project/OpenSearch/pull/1153)) -* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) -* Expand SearchPlugin javadocs. ([#1909](https://github.com/opensearch-project/OpenSearch/pull/1909)) -* Linked the formatting setting file ([#1860](https://github.com/opensearch-project/OpenSearch/pull/1860)) -* Add more instructions how to install/configure git secrets ([#1202](https://github.com/opensearch-project/OpenSearch/pull/1202)) -* Add themed logo to README ([#988](https://github.com/opensearch-project/OpenSearch/pull/988)) -* Replace Elasticsearch docs links in scripts ([#994](https://github.com/opensearch-project/OpenSearch/pull/994)) -* Cleaned up developer guide, added TOC. ([#572](https://github.com/opensearch-project/OpenSearch/pull/572)) -* Document running individual tests. ([#741](https://github.com/opensearch-project/OpenSearch/pull/741)) -* [License] Add SPDX License Header to security policies ([#531](https://github.com/opensearch-project/OpenSearch/pull/531)) -* Added a maintainers file ([#523](https://github.com/opensearch-project/OpenSearch/pull/523)) -* Remove extra greater-thans from README ([#527](https://github.com/opensearch-project/OpenSearch/pull/527)) -* [Rename] Update Vagrantfile ([#515](https://github.com/opensearch-project/OpenSearch/pull/515)) -* [README] Remove stale information ([#513](https://github.com/opensearch-project/OpenSearch/pull/513)) -* [Rename] Change license header and copyright notice to SPDX ([#437](https://github.com/opensearch-project/OpenSearch/pull/437)) - ### Maintenance - -* Make discovered_master field optional on the client to support compatibility for opensearch client with odfe (#2641) ([#2653](https://github.com/opensearch-project/OpenSearch/pull/2653)) -* Update azure-storage-blob to 12.15.0: fix test flakiness (#2795) ([#2799](https://github.com/opensearch-project/OpenSearch/pull/2799)) -* Update azure-storage-blob to 12.15.0 (#2774) ([#2778](https://github.com/opensearch-project/OpenSearch/pull/2778)) -* Update the BWC versions (post 1.x backport) ([#2390](https://github.com/opensearch-project/OpenSearch/pull/2390)) -* Update bwc verions for (#2237) ([#2248](https://github.com/opensearch-project/OpenSearch/pull/2248)) -* Update #2103 BWC Versions ([#2173](https://github.com/opensearch-project/OpenSearch/pull/2173)) -* Update bundled JDK distribution to 17.0.2+8 ([#2007](https://github.com/opensearch-project/OpenSearch/pull/2007)) -* Update Mockito to 4.3.1 ([#1973](https://github.com/opensearch-project/OpenSearch/pull/1973)) -* Update protobuf-java to 3.19.3 ([#1945](https://github.com/opensearch-project/OpenSearch/pull/1945)) -* Update Netty to 4.1.73.Final ([#1936](https://github.com/opensearch-project/OpenSearch/pull/1936)) -* Update FIPS API libraries of Bouncy Castle ([#1853](https://github.com/opensearch-project/OpenSearch/pull/1853)) -* Update junit to 4.13.1 ([#1837](https://github.com/opensearch-project/OpenSearch/pull/1837)) -* Update Mockito to 4.2.x ([#1830](https://github.com/opensearch-project/OpenSearch/pull/1830)) -* Upgrading bouncycastle to 1.70 ([#1832](https://github.com/opensearch-project/OpenSearch/pull/1832)) -* Updating Netty to 4.1.72.Final ([#1831](https://github.com/opensearch-project/OpenSearch/pull/1831)) -* Update to log4j 2.17.1 ([#1820](https://github.com/opensearch-project/OpenSearch/pull/1820)) -* Update to log4j 2.17.0 ([#1771](https://github.com/opensearch-project/OpenSearch/pull/1771)) -* [repository-azure] Update to the latest Azure Storage SDK v12, remove privileged runnable wrapper in favor of access helper ([#1521](https://github.com/opensearch-project/OpenSearch/pull/1521)) -* Update bundled JDK distribution to 17.0.1+12 ([#1476](https://github.com/opensearch-project/OpenSearch/pull/1476)) -* Upgrading netty version to 4.1.69.Final ([#1363](https://github.com/opensearch-project/OpenSearch/pull/1363)) -* Modernize and consolidate JDKs usage across all stages of the build. Update JDK-14 requirement, switch to JDK-17 instead ([#1368](https://github.com/opensearch-project/OpenSearch/pull/1368)) -* Upgrade hadoop dependencies for hdfs plugin ([#1335](https://github.com/opensearch-project/OpenSearch/pull/1335)) -* Replace securemock with mock-maker (test support), update Mockito to 3.12.4 ([#1332](https://github.com/opensearch-project/OpenSearch/pull/1332)) -* Update Jackson to 2.12.5 ([#1247](https://github.com/opensearch-project/OpenSearch/pull/1247)) -* Update DistributionDownloader to support fetching arm64 bundles. ([#929](https://github.com/opensearch-project/OpenSearch/pull/929)) -* Update favicon for OpenSearch ([#932](https://github.com/opensearch-project/OpenSearch/pull/932)) -* Update DistributionDownloader to fetch snapshots and staging bundles. ([#904](https://github.com/opensearch-project/OpenSearch/pull/904)) -* Version bump for 1.1 release ([#772](https://github.com/opensearch-project/OpenSearch/pull/772)) -* update external library 'pdfbox' version to 2.0.24 to reduce vulnerability ([#883](https://github.com/opensearch-project/OpenSearch/pull/883)) -* Update dependencies for ingest-attachment plugin. ([#666](https://github.com/opensearch-project/OpenSearch/pull/666)) -* Update hadoop-minicluster version for test fixture. ([#645](https://github.com/opensearch-project/OpenSearch/pull/645)) -* Update remote repo for BWC checks. ([#482](https://github.com/opensearch-project/OpenSearch/pull/482)) -* Update year and developer info in generated POMs. ([#444](https://github.com/opensearch-project/OpenSearch/pull/444)) -* Refresh OpenSearch nodes version in cluster state after upgrade ([#865](https://github.com/opensearch-project/OpenSearch/pull/865)) -* [Upgrade] ICU4j from 68.2 to 70.1 ([#2504](https://github.com/opensearch-project/OpenSearch/pull/2504)) -* Upgrade to log4j 2.16.0 ([#1721](https://github.com/opensearch-project/OpenSearch/pull/1721)) -* Upgrade to logj4 2.15.0 ([#1698](https://github.com/opensearch-project/OpenSearch/pull/1698)) -* Updating Log4j to 2.11.2 ([#1696](https://github.com/opensearch-project/OpenSearch/pull/1696)) -* Upgrade dependency ([#1571](https://github.com/opensearch-project/OpenSearch/pull/1571)) -* Upgrade apache commons-compress to 1.21 ([#1197](https://github.com/opensearch-project/OpenSearch/pull/1197)) -* Removed java11 source folders since JDK-11 is the baseline now (#2898) ([#2953](https://github.com/opensearch-project/OpenSearch/pull/2953)) -* [Remove] MainResponse version override cluster setting (#3031) ([#3033](https://github.com/opensearch-project/OpenSearch/pull/3033)) -* [Remove] remaining AllFieldMapper references (#3007) ([#3010](https://github.com/opensearch-project/OpenSearch/pull/3010)) -* [2.x] Remove deprecation warning of using REST API request parameter 'master_timeout' (#2920) ([#2931](https://github.com/opensearch-project/OpenSearch/pull/2931)) -* [Rename] ESTestCase stragglers to OpenSearchTestCase (#3053) ([#3064](https://github.com/opensearch-project/OpenSearch/pull/3064)) -* Use G1GC on JDK11+ (#2964) ([#2970](https://github.com/opensearch-project/OpenSearch/pull/2970)) -* Remove endpoint_suffix dependency on account key (#2485) ([#2808](https://github.com/opensearch-project/OpenSearch/pull/2808)) -* Updating repository commons logging version ([#2541](https://github.com/opensearch-project/OpenSearch/pull/2541)) -* Upgrading Shadow plugin to 7.1.2 ([#2033](https://github.com/opensearch-project/OpenSearch/pull/2033)) -* Upgrading Jackson-Databind version ([#1982](https://github.com/opensearch-project/OpenSearch/pull/1982)) -* Upgrading commons-codec in hdfs-fixture and cleaning up dependencies in repository-hdfs ([#1603](https://github.com/opensearch-project/OpenSearch/pull/1603)) -* Upgrading gson to 2.8.9 ([#1541](https://github.com/opensearch-project/OpenSearch/pull/1541)) -* Upgrading dependencies ([#1491](https://github.com/opensearch-project/OpenSearch/pull/1491)) -* Upgrading dependencies in hdfs plugin ([#1466](https://github.com/opensearch-project/OpenSearch/pull/1466)) -* Upgrading mockito version to make it consistent across the repo ([#1410](https://github.com/opensearch-project/OpenSearch/pull/1410)) -* Change deprecation message for REST API parameter 'master_timeout' to specify the version of removal (#2863) ([#2865](https://github.com/opensearch-project/OpenSearch/pull/2865)) -* Update ThirdPartyAuditTask to check for and list pointless exclusions. (#2760) ([#2765](https://github.com/opensearch-project/OpenSearch/pull/2765)) -* Add Shadow jar publication to lang-painless module. (#2681) ([#2712](https://github.com/opensearch-project/OpenSearch/pull/2712)) -* Add mapping method back referenced in other repos (#2636) ([#2649](https://github.com/opensearch-project/OpenSearch/pull/2649)) +* Bump google-oauth-client from 1.33.1 to 1.33.2 in /plugins/discovery-gce ([#2828](https://github.com/opensearch-project/OpenSearch/pull/2828)) +* Bump protobuf-java-util from 3.19.3 to 3.20.0 in /plugins/repository-gcs ([#2834](https://github.com/opensearch-project/OpenSearch/pull/2834)) +* Bump cdi-api from 1.2 to 2.0 in /qa/wildfly ([#2835](https://github.com/opensearch-project/OpenSearch/pull/2835)) +* Bump azure-core from 1.26.0 to 1.27.0 in /plugins/repository-azure ([#2837](https://github.com/opensearch-project/OpenSearch/pull/2837)) +* Bump asm-analysis from 9.2 to 9.3 in /test/logger-usage ([#2829](https://github.com/opensearch-project/OpenSearch/pull/2829)) +* Bump protobuf-java from 3.19.3 to 3.20.0 in /plugins/repository-hdfs ([#2836](https://github.com/opensearch-project/OpenSearch/pull/2836)) +* Bump joni from 2.1.41 to 2.1.43 in /libs/grok ([#2832](https://github.com/opensearch-project/OpenSearch/pull/2832)) +* Bump geoip2 from 2.16.1 to 3.0.1 in /modules/ingest-geoip ([#2646](https://github.com/opensearch-project/OpenSearch/pull/2646)) +* Bump jettison from 1.1 to 1.4.1 in /plugins/discovery-azure-classic ([#2614](https://github.com/opensearch-project/OpenSearch/pull/2614)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/repository-gcs ([#2616](https://github.com/opensearch-project/OpenSearch/pull/2616)) +* Bump jboss-annotations-api_1.2_spec in /qa/wildfly ([#2615](https://github.com/opensearch-project/OpenSearch/pull/2615)) +* Bump forbiddenapis in /buildSrc/src/testKit/thirdPartyAudit ([#2611](https://github.com/opensearch-project/OpenSearch/pull/2611)) +* Bump json-schema-validator from 1.0.67 to 1.0.68 in /buildSrc ([#2610](https://github.com/opensearch-project/OpenSearch/pull/2610)) +* Bump htrace-core4 from 4.1.0-incubating to 4.2.0-incubating in /plugins/repository-hdfs ([#2618](https://github.com/opensearch-project/OpenSearch/pull/2618)) +* Bump asm-tree from 7.2 to 9.2 in /modules/lang-painless ([#2617](https://github.com/opensearch-project/OpenSearch/pull/2617)) +* Bump antlr4 from 4.5.3 to 4.9.3 in /modules/lang-painless ([#2537](https://github.com/opensearch-project/OpenSearch/pull/2537)) +* Bump commons-lang3 from 3.7 to 3.12.0 in /plugins/repository-hdfs ([#2552](https://github.com/opensearch-project/OpenSearch/pull/2552)) +* Bump gson from 2.8.9 to 2.9.0 in /plugins/repository-gcs ([#2550](https://github.com/opensearch-project/OpenSearch/pull/2550)) +* Bump google-oauth-client from 1.31.0 to 1.33.1 in /plugins/discovery-gce ([#2524](https://github.com/opensearch-project/OpenSearch/pull/2524)) +* Bump google-cloud-core from 1.93.3 to 2.5.10 in /plugins/repository-gcs ([#2536](https://github.com/opensearch-project/OpenSearch/pull/2536)) +* Bump wiremock-jre8-standalone from 2.23.2 to 2.32.0 in /buildSrc ([#2525](https://github.com/opensearch-project/OpenSearch/pull/2525)) +* Bump com.gradle.enterprise from 3.8.1 to 3.9 ([#2523](https://github.com/opensearch-project/OpenSearch/pull/2523)) +* Bump commons-io from 2.7 to 2.11.0 in /plugins/discovery-azure-classic ([#2527](https://github.com/opensearch-project/OpenSearch/pull/2527)) +* Bump asm-analysis from 7.1 to 9.2 in /test/logger-usage ([#2273](https://github.com/opensearch-project/OpenSearch/pull/2273)) +* Bump asm-commons from 7.2 to 9.2 in /modules/lang-painless ([#2234](https://github.com/opensearch-project/OpenSearch/pull/2234)) +* Bump jna from 5.5.0 to 5.10.0 in /buildSrc ([#2512](https://github.com/opensearch-project/OpenSearch/pull/2512)) +* Bump jsr305 from 1.3.9 to 3.0.2 in /plugins/discovery-gce ([#2137](https://github.com/opensearch-project/OpenSearch/pull/2137)) +* Bump json-schema-validator from 1.0.36 to 1.0.67 in /buildSrc ([#2454](https://github.com/opensearch-project/OpenSearch/pull/2454)) +* Bump woodstox-core from 6.1.1 to 6.2.8 in /plugins/repository-azure ([#2456](https://github.com/opensearch-project/OpenSearch/pull/2456)) +* Bump commons-lang3 from 3.4 to 3.12.0 in /plugins/repository-azure ([#2455](https://github.com/opensearch-project/OpenSearch/pull/2455)) +* Update azure-storage-blob to 12.15.0 ([#2774](https://github.com/opensearch-project/OpenSearch/pull/2774)) * Move Jackson-databind to 2.13.2 ([#2548](https://github.com/opensearch-project/OpenSearch/pull/2548)) -* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) -* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) -* [Remove] TrimUnsafeCommit logic for legacy 6.x indexes ([#2225](https://github.com/opensearch-project/OpenSearch/pull/2225)) -* Adjust main version after backport to 1.x ([#2147](https://github.com/opensearch-project/OpenSearch/pull/2147)) -* [Remove] CircuitBreaker Accounting ([#2056](https://github.com/opensearch-project/OpenSearch/pull/2056)) -* [Remove] Segment memory estimation and tracking ([#2029](https://github.com/opensearch-project/OpenSearch/pull/2029)) -* [Remove] index.merge.policy.max_merge_at_once_explicit ([#1988](https://github.com/opensearch-project/OpenSearch/pull/1988)) -* [Remove] Setting explicit version on analysis component ([#1986](https://github.com/opensearch-project/OpenSearch/pull/1986)) -* Wildcard max_expansion version check update ([#1980](https://github.com/opensearch-project/OpenSearch/pull/1980)) -* Removing lingering transportclient ([#1955](https://github.com/opensearch-project/OpenSearch/pull/1955)) -* [BWC] Ensure 2.x compatibility with Legacy 7.10.x ([#1902](https://github.com/opensearch-project/OpenSearch/pull/1902)) -* File name correction to follow existing convention ([#1874](https://github.com/opensearch-project/OpenSearch/pull/1874)) -* [Remove] Old Translog Checkpoint Format ([#1884](https://github.com/opensearch-project/OpenSearch/pull/1884)) -* Remove unwanted unreleased versions ([#1877](https://github.com/opensearch-project/OpenSearch/pull/1877)) -* replace with opensearch-http-channel and opensearch-http-server-channel ([#1799](https://github.com/opensearch-project/OpenSearch/pull/1799)) -* Add bwc version 1.2.4 ([#1796](https://github.com/opensearch-project/OpenSearch/pull/1796)) -* [Remove] various builder and mapping deprecations ([#1752](https://github.com/opensearch-project/OpenSearch/pull/1752)) -* [Remove] Remaining Flavor Serialization ([#1751](https://github.com/opensearch-project/OpenSearch/pull/1751)) -* [Remove] DynamicTemplate deprecations ([#1742](https://github.com/opensearch-project/OpenSearch/pull/1742)) -* [Remove] Analyzer Deprecations ([#1741](https://github.com/opensearch-project/OpenSearch/pull/1741)) -* Drop mocksocket & securemock dependencies from sniffer and rest client (no needed) ([#1174](https://github.com/opensearch-project/OpenSearch/pull/1174)) -* [BWC] Temporarily disable bwc testing while bumping 1.0.1 -* [DEPRECATE] SimpleFS in favor of NIOFS ([#1073](https://github.com/opensearch-project/OpenSearch/pull/1073)) -* Replace JCenter with Maven Central. ([#1057](https://github.com/opensearch-project/OpenSearch/pull/1057)) -* Restoring alpha/beta/rc version semantics ([#1112](https://github.com/opensearch-project/OpenSearch/pull/1112)) -* Remove `client/sniffer` from Javadoc exemption list ([#818](https://github.com/opensearch-project/OpenSearch/pull/818)) -* Removed pre-alpha notes. ([#815](https://github.com/opensearch-project/OpenSearch/pull/815)) -* Remove checks for legacy .yaml and .json config files. ([#792](https://github.com/opensearch-project/OpenSearch/pull/792)) -* Remove reference to an EC2 instance type. ([#812](https://github.com/opensearch-project/OpenSearch/pull/812)) -* Remove all elastic.co references from javadocs ([#586](https://github.com/opensearch-project/OpenSearch/pull/586)) -* Remove the oss string from OpenSearch distributions ([#575](https://github.com/opensearch-project/OpenSearch/pull/575)) -* [Rename] Remove final references to legacy keystore ([#514](https://github.com/opensearch-project/OpenSearch/pull/514)) -* changed Apache to Apache 2.0. Numbered principles -* fixed apache to apache 2.0 -* Replace nio and nitty test endpoint ([#475](https://github.com/opensearch-project/OpenSearch/pull/475)) -* [Rename] org.elasticsearch.client.documentation.SearchDocumentationIT.testSearchRequestSuggestions ([#467](https://github.com/opensearch-project/OpenSearch/pull/467)) +* Add trademark notice ([#2473](https://github.com/opensearch-project/OpenSearch/pull/2473)) +* adds ToC ([#2546](https://github.com/opensearch-project/OpenSearch/pull/2546)) ### Refactoring - -* [Rename] Refactoring Elastic references in docker and kerberos builds (#428) ([#438](https://github.com/opensearch-project/OpenSearch/pull/438)) -* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) -* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) -* [Refactor] MapperService to QueryShardContext in valueFetcher ([#2027](https://github.com/opensearch-project/OpenSearch/pull/2027)) -* [Refactor] Lucene DataInput and DataOutput to StreamInput and StreamOutput ([#2035](https://github.com/opensearch-project/OpenSearch/pull/2035)) -* [Refactor] InternalEngine to always use soft deletes ([#1933](https://github.com/opensearch-project/OpenSearch/pull/1933)) -* Refactor LegacyESVersion tests from Version tests ([#1662](https://github.com/opensearch-project/OpenSearch/pull/1662)) -* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) -* Decouple IndexSettings from IncludeExclude ([#2860](https://github.com/opensearch-project/OpenSearch/pull/2860)) +* [Remove] remaining AllFieldMapper references ([#3007](https://github.com/opensearch-project/OpenSearch/pull/3007)) * Clear up some confusing code in IndexShardHotSpotTests ([#1534](https://github.com/opensearch-project/OpenSearch/pull/1534)) +* [Rename] ESTestCase stragglers to OpenSearchTestCase ([#3053](https://github.com/opensearch-project/OpenSearch/pull/3053)) +* [Remove] MainResponse version override cluster setting ([#3031](https://github.com/opensearch-project/OpenSearch/pull/3031)) +* [Version] Don't spoof major for 3.0+ clusters ([#2722](https://github.com/opensearch-project/OpenSearch/pull/2722)) +* Centralize codes related to 'master_timeout' deprecation for eaiser removal - in CAT Nodes API ([#2670](https://github.com/opensearch-project/OpenSearch/pull/2670)) * Rename reference to project OpenSearch was forked from ([#2483](https://github.com/opensearch-project/OpenSearch/pull/2483)) -* Introduce RestHandler.Wrapper to help with delegate implementations ([#1004](https://github.com/opensearch-project/OpenSearch/pull/1004)) +* Remove the IndexCommitRef class ([#2421](https://github.com/opensearch-project/OpenSearch/pull/2421)) +* Refactoring gated and ref-counted interfaces and their implementations ([#2396](https://github.com/opensearch-project/OpenSearch/pull/2396)) +* [Refactor] LuceneChangesSnapshot to use accurate ops history ([#2452](https://github.com/opensearch-project/OpenSearch/pull/2452)) ### Tests - * Add type mapping removal bwc tests for indexing, searching, snapshots ([#2901](https://github.com/opensearch-project/OpenSearch/pull/2901)) -* Removing SLM check in tests for OpenSearch versions (#2604) ([#2620](https://github.com/opensearch-project/OpenSearch/pull/2620)) +* Removing SLM check in tests for OpenSearch versions ([#2604](https://github.com/opensearch-project/OpenSearch/pull/2604)) +* [Unmute] NumberFieldTypeTests ([#2531](https://github.com/opensearch-project/OpenSearch/pull/2531)) * Use Hamcrest matchers and assertThat() in ReindexRenamedSettingTests ([#2503](https://github.com/opensearch-project/OpenSearch/pull/2503)) -* [Test-Failure] Mute TranslogPolicyIT ([#2342](https://github.com/opensearch-project/OpenSearch/pull/2342)) -* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#2074](https://github.com/opensearch-project/OpenSearch/pull/2074)) -* Stabilizing org.opensearch.cluster.routing.MovePrimaryFirstTests.test… ([#2048](https://github.com/opensearch-project/OpenSearch/pull/2048)) -* Added timeout to ensureGreen() for testClusterGreenAfterPartialRelocation ([#1983](https://github.com/opensearch-project/OpenSearch/pull/1983)) -* Add hook to execute logic before Integ test task starts ([#1969](https://github.com/opensearch-project/OpenSearch/pull/1969)) -* Remove transport client from tests. ([#1809](https://github.com/opensearch-project/OpenSearch/pull/1809)) -* [Tests] ClusterHealthIT:testHealthOnMasterFailover - Increase master node timeout ([#1812](https://github.com/opensearch-project/OpenSearch/pull/1812)) -* Ignore file order in test assertion ([#1755](https://github.com/opensearch-project/OpenSearch/pull/1755)) -* Integration test that checks for settings upgrade ([#1482](https://github.com/opensearch-project/OpenSearch/pull/1482)) -* [bwc] reenable bwc testing after syncing staged branches ([#1511](https://github.com/opensearch-project/OpenSearch/pull/1511)) -* [Tests] Translog Pruning tests to MetadataCreateIndexServiceTests ([#1295](https://github.com/opensearch-project/OpenSearch/pull/1295)) -* Reduce iterations to improve test run time ([#1168](https://github.com/opensearch-project/OpenSearch/pull/1168)) -* Tune datanode count and shards count to improve test run time ([#1170](https://github.com/opensearch-project/OpenSearch/pull/1170)) -* [BWC] Re-enable bwc testing after 1.0.1 version bump -* Add unit test for RestActionListener. Validate that onFailure() sends response even when BytesRestResponse can not be constructed using passed exception. Follow up on #923. ([#1024](https://github.com/opensearch-project/OpenSearch/pull/1024)) -* [TEST] Fix failing distro tests for linux packages ([#569](https://github.com/opensearch-project/OpenSearch/pull/569)) -* [TEST] Fix failing packaging tests for OpenSearch distributions. ([#541](https://github.com/opensearch-project/OpenSearch/pull/541)) -* Remove the references to xpack and elastic in tests. ([#516](https://github.com/opensearch-project/OpenSearch/pull/516)) +* [Unmute] IndexPrimaryRelocationIT ([#2488](https://github.com/opensearch-project/OpenSearch/pull/2488)) +* Fixing PluginsServiceTests (post Lucene 9 update) ([#2484](https://github.com/opensearch-project/OpenSearch/pull/2484)) diff --git a/release-notes/opensearch.release-notes-2.0.0.md b/release-notes/opensearch.release-notes-2.0.0.md index 8880d7a7bddf9..ab38069bbf67d 100644 --- a/release-notes/opensearch.release-notes-2.0.0.md +++ b/release-notes/opensearch.release-notes-2.0.0.md @@ -27,8 +27,29 @@ * [Remove] Type from TermsLookUp ([#2459](https://github.com/opensearch-project/OpenSearch/pull/2459)) * [Remove] types from Uid and remaining types/Uid from translog ([#2450](https://github.com/opensearch-project/OpenSearch/pull/2450)) * [Remove] types from translog ([#2439](https://github.com/opensearch-project/OpenSearch/pull/2439)) -* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) * [Remove] Multiple Types from IndexTemplateMetadata ([#2400](https://github.com/opensearch-project/OpenSearch/pull/2400)) +* Remove type mapping from document index API ([#2026](https://github.com/opensearch-project/OpenSearch/pull/2026)) +* [Remove] Type mapping parameter from document update API ([#2204](https://github.com/opensearch-project/OpenSearch/pull/2204)) +* [Remove] Types from DocWrite Request and Response ([#2239](https://github.com/opensearch-project/OpenSearch/pull/2239)) +* [Remove] Types from GET/MGET ([#2168](https://github.com/opensearch-project/OpenSearch/pull/2168)) +* [Remove] types from SearchHit and Explain API ([#2205](https://github.com/opensearch-project/OpenSearch/pull/2205)) +* [Remove] type support from Bulk API ([#2215](https://github.com/opensearch-project/OpenSearch/pull/2215)) +* Remove type end-points from no-op bulk and search action ([#2261](https://github.com/opensearch-project/OpenSearch/pull/2261)) +* Remove type end-points from search and related APIs ([#2263](https://github.com/opensearch-project/OpenSearch/pull/2263)) +* [Remove] Type mapping end-points from RestMultiSearchTemplateAction ([#2433](https://github.com/opensearch-project/OpenSearch/pull/2433)) +* Removes type mappings from mapping APIs ([#2238](https://github.com/opensearch-project/OpenSearch/pull/2238)) +* Remove type end-points from count action ([#2379](https://github.com/opensearch-project/OpenSearch/pull/2379)) +* Remove type from validate query API ([#2255](https://github.com/opensearch-project/OpenSearch/pull/2255)) +* [Remove] Type parameter from TermVectors API ([#2104](https://github.com/opensearch-project/OpenSearch/pull/2104)) +* Remove inclue_type_name parameter from rest api spec ([#2410](https://github.com/opensearch-project/OpenSearch/pull/2410)) +* [Remove] include_type_name from HLRC ([#2397](https://github.com/opensearch-project/OpenSearch/pull/2397)) +* [Remove] Type mappings from GeoShapeQueryBuilder ([#2322](https://github.com/opensearch-project/OpenSearch/pull/2322)) +* [Remove] types from PutMappingRequest ([#2335](https://github.com/opensearch-project/OpenSearch/pull/2335)) +* [Remove] deprecated getMapping API from IndicesClient ([#2262](https://github.com/opensearch-project/OpenSearch/pull/2262)) +* [Remove] remaining type usage in Client and AbstractClient ([#2258](https://github.com/opensearch-project/OpenSearch/pull/2258)) +* [Remove] Type from Client.prepare(Index,Delete,Update) ([#2253](https://github.com/opensearch-project/OpenSearch/pull/2253)) +* [Remove] Type Specific Index Stats ([#2198](https://github.com/opensearch-project/OpenSearch/pull/2198)) +* [Remove] Type from Search Internals ([#2109](https://github.com/opensearch-project/OpenSearch/pull/2109)) #### Upgrades * [Upgrade] Lucene 9.1 release ([#2560](https://github.com/opensearch-project/OpenSearch/pull/2560)) From cb6148eb0d10aa9deeb278d311f832d083814378 Mon Sep 17 00:00:00 2001 From: Nick Knize Date: Mon, 23 May 2022 10:28:13 -0500 Subject: [PATCH 06/16] Bump version 2.1 to Lucene 9.2 after upgrade (#3424) Bumps Version.V_2_1_0 lucene version to 9.2 after backporting upgrage. Signed-off-by: Nicholas Walter Knize --- server/src/main/java/org/opensearch/Version.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/main/java/org/opensearch/Version.java b/server/src/main/java/org/opensearch/Version.java index 4f0bb55c0f666..e309af54eac6e 100644 --- a/server/src/main/java/org/opensearch/Version.java +++ b/server/src/main/java/org/opensearch/Version.java @@ -88,7 +88,7 @@ public class Version implements Comparable, ToXContentFragment { public static final Version V_1_3_2 = new Version(1030299, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_1_3_3 = new Version(1030399, org.apache.lucene.util.Version.LUCENE_8_10_1); public static final Version V_2_0_0 = new Version(2000099, org.apache.lucene.util.Version.LUCENE_9_1_0); - public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_1_0); + public static final Version V_2_1_0 = new Version(2010099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version V_3_0_0 = new Version(3000099, org.apache.lucene.util.Version.LUCENE_9_2_0); public static final Version CURRENT = V_3_0_0; From e19ed093cd1a63642883037f707aca3e5e8fc921 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 11:24:07 -0500 Subject: [PATCH 07/16] Bump com.gradle.enterprise from 3.10 to 3.10.1 (#3425) Bumps com.gradle.enterprise from 3.10 to 3.10.1. --- updated-dependencies: - dependency-name: com.gradle.enterprise dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- settings.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.gradle b/settings.gradle index 52e1e16fc1c01..a24b063f9fa96 100644 --- a/settings.gradle +++ b/settings.gradle @@ -10,7 +10,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.10" + id "com.gradle.enterprise" version "3.10.1" } rootProject.name = "OpenSearch" From 53960cfc4a07b322030d84afa75b595a159e9b9d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 11:25:19 -0500 Subject: [PATCH 08/16] Bump reactor-core from 3.4.17 to 3.4.18 in /plugins/repository-azure (#3427) Bumps [reactor-core](https://github.com/reactor/reactor-core) from 3.4.17 to 3.4.18. - [Release notes](https://github.com/reactor/reactor-core/releases) - [Commits](https://github.com/reactor/reactor-core/compare/v3.4.17...v3.4.18) --- updated-dependencies: - dependency-name: io.projectreactor:reactor-core dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-azure/build.gradle | 2 +- plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 | 1 - plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 create mode 100644 plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 diff --git a/plugins/repository-azure/build.gradle b/plugins/repository-azure/build.gradle index 55b4fc638f07b..eb5fc1650a1b4 100644 --- a/plugins/repository-azure/build.gradle +++ b/plugins/repository-azure/build.gradle @@ -56,7 +56,7 @@ dependencies { implementation project(':modules:transport-netty4') api 'com.azure:azure-storage-blob:12.16.1' api 'org.reactivestreams:reactive-streams:1.0.3' - api 'io.projectreactor:reactor-core:3.4.17' + api 'io.projectreactor:reactor-core:3.4.18' api 'io.projectreactor.netty:reactor-netty:1.0.18' api 'io.projectreactor.netty:reactor-netty-core:1.0.19' api 'io.projectreactor.netty:reactor-netty-http:1.0.18' diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 deleted file mode 100644 index 3803458775631..0000000000000 --- a/plugins/repository-azure/licenses/reactor-core-3.4.17.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -52176b50d2191bc32a8a235124e7aff7f291754b \ No newline at end of file diff --git a/plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 b/plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 new file mode 100644 index 0000000000000..749954f62c77b --- /dev/null +++ b/plugins/repository-azure/licenses/reactor-core-3.4.18.jar.sha1 @@ -0,0 +1 @@ +29f4f3a4876a65861deffc0f7f189029bcaf7946 \ No newline at end of file From 55ca33103578527df65a58c6c9a0da2634cce037 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 23 May 2022 11:25:57 -0500 Subject: [PATCH 09/16] Bump gax-httpjson from 0.101.0 to 0.103.1 in /plugins/repository-gcs (#3426) Bumps [gax-httpjson](https://github.com/googleapis/gax-java) from 0.101.0 to 0.103.1. - [Release notes](https://github.com/googleapis/gax-java/releases) - [Changelog](https://github.com/googleapis/gax-java/blob/main/CHANGELOG.md) - [Commits](https://github.com/googleapis/gax-java/commits) --- updated-dependencies: - dependency-name: com.google.api:gax-httpjson dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-gcs/build.gradle | 2 +- plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 | 1 - plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index 67468639dc354..72964f9444026 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -74,7 +74,7 @@ dependencies { api 'com.google.http-client:google-http-client-appengine:1.41.8' api 'com.google.http-client:google-http-client-jackson2:1.35.0' api 'com.google.http-client:google-http-client-gson:1.41.4' - api 'com.google.api:gax-httpjson:0.101.0' + api 'com.google.api:gax-httpjson:0.103.1' api 'io.grpc:grpc-context:1.46.0' api 'io.opencensus:opencensus-api:0.18.0' api 'io.opencensus:opencensus-contrib-http-util:0.18.0' diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 deleted file mode 100644 index f722ccbd86c54..0000000000000 --- a/plugins/repository-gcs/licenses/gax-httpjson-0.101.0.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e056920e5df4086270e6c3d2e3a16d8a7585fd13 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 new file mode 100644 index 0000000000000..11315004e233d --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.103.1.jar.sha1 @@ -0,0 +1 @@ +041d99172fda933bc879bdfd8de9420c5c34107e \ No newline at end of file From a023ad9cbabdde9c2bf1afb18b73f94357bb478a Mon Sep 17 00:00:00 2001 From: Poojita Raj Date: Mon, 23 May 2022 12:19:54 -0700 Subject: [PATCH 10/16] [segment replication]Introducing common Replication interfaces for segment replication and recovery code paths (#3234) * RecoveryState inherits from ReplicationState + RecoveryTarget inherits from ReplicationTarget Signed-off-by: Poojita Raj * Refactoring: mixedClusterVersion error fix + move Stage to ReplicationState Signed-off-by: Poojita Raj * pull ReplicationListener into a top level class + add javadocs + address review comments Signed-off-by: Poojita Raj * fix javadoc Signed-off-by: Poojita Raj * review changes Signed-off-by: Poojita Raj * Refactoring the hierarchy relationship between repl and recovery Signed-off-by: Poojita Raj * style fix Signed-off-by: Poojita Raj * move package common under replication Signed-off-by: Poojita Raj * rename to replication Signed-off-by: Poojita Raj * rename and doc changes Signed-off-by: Poojita Raj --- .../indices/recovery/IndexRecoveryIT.java | 2 +- .../opensearch/index/shard/IndexShard.java | 11 +- .../opensearch/indices/IndicesService.java | 3 +- .../cluster/IndicesClusterStateService.java | 42 +-- .../recovery/PeerRecoveryTargetService.java | 80 ++--- .../recovery/RecoveriesCollection.java | 332 ------------------ .../indices/recovery/RecoveryListener.java | 55 +++ .../indices/recovery/RecoveryState.java | 3 +- .../indices/recovery/RecoveryTarget.java | 146 +++----- .../common/ReplicationCollection.java | 297 ++++++++++++++++ .../common/ReplicationListener.java | 23 ++ .../common/ReplicationRequestTracker.java} | 6 +- .../replication/common/ReplicationState.java | 18 + .../replication/common/ReplicationTarget.java | 175 +++++++++ .../RecoveryDuringReplicationTests.java | 4 +- ...actIndicesClusterStateServiceTestCase.java | 7 +- .../indices/recovery/RecoveryTests.java | 26 +- ...va => ReplicationRequestTrackerTests.java} | 5 +- ...s.java => ReplicationCollectionTests.java} | 88 +++-- .../index/shard/IndexShardTestCase.java | 9 +- 20 files changed, 750 insertions(+), 582 deletions(-) delete mode 100644 server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java create mode 100644 server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java rename server/src/main/java/org/opensearch/indices/{recovery/RecoveryRequestTracker.java => replication/common/ReplicationRequestTracker.java} (96%) create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java rename server/src/test/java/org/opensearch/indices/recovery/{RecoveryRequestTrackerTests.java => ReplicationRequestTrackerTests.java} (95%) rename server/src/test/java/org/opensearch/recovery/{RecoveriesCollectionTests.java => ReplicationCollectionTests.java} (65%) diff --git a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java index 4650000f1e20a..0ab3be3d63091 100644 --- a/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/indices/recovery/IndexRecoveryIT.java @@ -101,8 +101,8 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.NodeIndicesStats; import org.opensearch.indices.analysis.AnalysisModule; -import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.recovery.RecoveryState.Stage; import org.opensearch.node.NodeClosedException; import org.opensearch.node.RecoverySettingsChunkSizePlugin; import org.opensearch.plugins.AnalysisPlugin; diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 0f088a13d5c5a..8002dfe688def 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -157,6 +157,7 @@ import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryFailedException; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.repositories.RepositoriesService; @@ -2876,7 +2877,7 @@ protected Engine getEngineOrNull() { public void startRecovery( RecoveryState recoveryState, PeerRecoveryTargetService recoveryTargetService, - PeerRecoveryTargetService.RecoveryListener recoveryListener, + RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer mappingUpdateConsumer, IndicesService indicesService @@ -2909,7 +2910,7 @@ public void startRecovery( recoveryTargetService.startRecovery(this, recoveryState.getSourceNode(), recoveryListener); } catch (Exception e) { failShard("corrupted preexisting index", e); - recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); + recoveryListener.onFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true); } break; case SNAPSHOT: @@ -2984,15 +2985,15 @@ public void startRecovery( private void executeRecovery( String reason, RecoveryState recoveryState, - PeerRecoveryTargetService.RecoveryListener recoveryListener, + RecoveryListener recoveryListener, CheckedConsumer, Exception> action ) { markAsRecovering(reason, recoveryState); // mark the shard as recovering on the cluster state thread threadPool.generic().execute(ActionRunnable.wrap(ActionListener.wrap(r -> { if (r) { - recoveryListener.onRecoveryDone(recoveryState); + recoveryListener.onDone(recoveryState); } - }, e -> recoveryListener.onRecoveryFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true)), action)); + }, e -> recoveryListener.onFailure(recoveryState, new RecoveryFailedException(recoveryState, null, e), true)), action)); } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index b5da0ae1f7688..1c7e45323813c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -136,6 +136,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.node.Node; import org.opensearch.plugins.IndexStorePlugin; @@ -839,7 +840,7 @@ public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetada public IndexShard createShard( final ShardRouting shardRouting, final PeerRecoveryTargetService recoveryTargetService, - final PeerRecoveryTargetService.RecoveryListener recoveryListener, + final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, final Consumer globalCheckpointSyncer, diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index 29f74f8a86d85..d1623df156593 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -78,8 +78,9 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; -import org.opensearch.indices.recovery.RecoveryFailedException; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.search.SearchService; import org.opensearch.snapshots.SnapshotShardsService; @@ -624,7 +625,7 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR indicesService.createShard( shardRouting, recoveryTargetService, - new RecoveryListener(shardRouting, primaryTerm), + new RecoveryListener(shardRouting, primaryTerm, this), repositoriesService, failedShardHandler, globalCheckpointSyncer, @@ -739,39 +740,16 @@ private static DiscoveryNode findSourceNodeForPeerRecovery( return sourceNode; } - private class RecoveryListener implements PeerRecoveryTargetService.RecoveryListener { - - /** - * ShardRouting with which the shard was created - */ - private final ShardRouting shardRouting; - - /** - * Primary term with which the shard was created - */ - private final long primaryTerm; - - private RecoveryListener(final ShardRouting shardRouting, final long primaryTerm) { - this.shardRouting = shardRouting; - this.primaryTerm = primaryTerm; - } - - @Override - public void onRecoveryDone(final RecoveryState state) { - shardStateAction.shardStarted(shardRouting, primaryTerm, "after " + state.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); - } - - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - handleRecoveryFailure(shardRouting, sendShardFailure, e); - } - } - // package-private for testing - synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolean sendShardFailure, Exception failure) { + public synchronized void handleRecoveryFailure(ShardRouting shardRouting, boolean sendShardFailure, Exception failure) { failAndRemoveShard(shardRouting, sendShardFailure, "failed recovery", failure, clusterService.state()); } + public void handleRecoveryDone(ReplicationState state, ShardRouting shardRouting, long primaryTerm) { + RecoveryState RecState = (RecoveryState) state; + shardStateAction.shardStarted(shardRouting, primaryTerm, "after " + RecState.getRecoverySource(), SHARD_STATE_ACTION_LISTENER); + } + private void failAndRemoveShard( ShardRouting shardRouting, boolean sendShardFailure, @@ -1004,7 +982,7 @@ U createIndex(IndexMetadata indexMetadata, List builtInIndex T createShard( ShardRouting shardRouting, PeerRecoveryTargetService recoveryTargetService, - PeerRecoveryTargetService.RecoveryListener recoveryListener, + RecoveryListener recoveryListener, RepositoriesService repositoriesService, Consumer onShardFailure, Consumer globalCheckpointSyncer, diff --git a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java index 4ae188abe5896..e13022afa81ba 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/opensearch/indices/recovery/PeerRecoveryTargetService.java @@ -37,10 +37,10 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.RateLimiter; +import org.opensearch.ExceptionsHelper; import org.opensearch.LegacyESVersion; import org.opensearch.OpenSearchException; import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.ActionRunnable; import org.opensearch.action.support.ChannelActionListener; @@ -69,7 +69,8 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; import org.opensearch.index.translog.TranslogCorruptedException; -import org.opensearch.indices.recovery.RecoveriesCollection.RecoveryRef; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationCollection.ReplicationRef; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; import org.opensearch.tasks.Task; @@ -124,7 +125,7 @@ public static class Actions { private final RecoverySettings recoverySettings; private final ClusterService clusterService; - private final RecoveriesCollection onGoingRecoveries; + private final ReplicationCollection onGoingRecoveries; public PeerRecoveryTargetService( ThreadPool threadPool, @@ -136,7 +137,7 @@ public PeerRecoveryTargetService( this.transportService = transportService; this.recoverySettings = recoverySettings; this.clusterService = clusterService; - this.onGoingRecoveries = new RecoveriesCollection(logger, threadPool); + this.onGoingRecoveries = new ReplicationCollection<>(logger, threadPool); transportService.registerRequestHandler( Actions.FILES_INFO, @@ -185,13 +186,16 @@ public PeerRecoveryTargetService( @Override public void beforeIndexShardClosed(ShardId shardId, @Nullable IndexShard indexShard, Settings indexSettings) { if (indexShard != null) { - onGoingRecoveries.cancelRecoveriesForShard(shardId, "shard closed"); + onGoingRecoveries.cancelForShard(shardId, "shard closed"); } } public void startRecovery(final IndexShard indexShard, final DiscoveryNode sourceNode, final RecoveryListener listener) { // create a new recovery status, and process... - final long recoveryId = onGoingRecoveries.startRecovery(indexShard, sourceNode, listener, recoverySettings.activityTimeout()); + final long recoveryId = onGoingRecoveries.start( + new RecoveryTarget(indexShard, sourceNode, listener), + recoverySettings.activityTimeout() + ); // we fork off quickly here and go async but this is called from the cluster state applier thread too and that can cause // assertions to trip if we executed it on the same thread hence we fork off to the generic threadpool. threadPool.generic().execute(new RecoveryRunner(recoveryId)); @@ -208,9 +212,9 @@ protected void retryRecovery(final long recoveryId, final String reason, TimeVal } private void retryRecovery(final long recoveryId, final TimeValue retryAfter, final TimeValue activityTimeout) { - RecoveryTarget newTarget = onGoingRecoveries.resetRecovery(recoveryId, activityTimeout); + RecoveryTarget newTarget = onGoingRecoveries.reset(recoveryId, activityTimeout); if (newTarget != null) { - threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(newTarget.recoveryId())); + threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(newTarget.getId())); } } @@ -225,7 +229,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi final TransportRequest requestToSend; final StartRecoveryRequest startRequest; final ReplicationTimer timer; - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + try (ReplicationRef recoveryRef = onGoingRecoveries.get(recoveryId)) { if (recoveryRef == null) { logger.trace("not running recovery with id [{}] - can not find it (probably finished)", recoveryId); return; @@ -248,7 +252,7 @@ private void doRecovery(final long recoveryId, final StartRecoveryRequest preExi } catch (final Exception e) { // this will be logged as warning later on... logger.trace("unexpected error while preparing shard for peer recovery, failing recovery", e); - onGoingRecoveries.failRecovery( + onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryTarget.state(), "failed to prepare shard for recovery", e), true @@ -339,28 +343,17 @@ public static StartRecoveryRequest getStartRecoveryRequest( localNode, metadataSnapshot, recoveryTarget.state().getPrimary(), - recoveryTarget.recoveryId(), + recoveryTarget.getId(), startingSeqNo ); return request; } - /** - * The recovery listener - * - * @opensearch.internal - */ - public interface RecoveryListener { - void onRecoveryDone(RecoveryState state); - - void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure); - } - class PrepareForTranslogOperationsRequestHandler implements TransportRequestHandler { @Override public void messageReceived(RecoveryPrepareForTranslogOperationsRequest request, TransportChannel channel, Task task) { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + try (ReplicationRef recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.PREPARE_TRANSLOG, request); if (listener == null) { return; @@ -375,7 +368,7 @@ class FinalizeRecoveryRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FINALIZE, request); if (listener == null) { return; @@ -391,7 +384,7 @@ class HandoffPrimaryContextRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { recoveryRef.get().handoffPrimaryContext(request.primaryContext()); } channel.sendResponse(TransportResponse.Empty.INSTANCE); @@ -404,7 +397,7 @@ class TranslogOperationsRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); final ActionListener listener = createOrFinishListener( recoveryRef, @@ -424,7 +417,7 @@ public void messageReceived(final RecoveryTranslogOperationsRequest request, fin private void performTranslogOps( final RecoveryTranslogOperationsRequest request, final ActionListener listener, - final RecoveryRef recoveryRef + final ReplicationRef recoveryRef ) { final RecoveryTarget recoveryTarget = recoveryRef.get(); @@ -439,7 +432,12 @@ private void performTranslogOps( @Override public void onNewClusterState(ClusterState state) { threadPool.generic().execute(ActionRunnable.wrap(listener, l -> { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecoverySafe(request.recoveryId(), request.shardId())) { + try ( + ReplicationRef recoveryRef = onGoingRecoveries.getSafe( + request.recoveryId(), + request.shardId() + ) + ) { performTranslogOps(request, listener, recoveryRef); } })); @@ -485,7 +483,7 @@ class FilesInfoRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILES_INFO, request); if (listener == null) { return; @@ -508,7 +506,7 @@ class CleanFilesRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.CLEAN_FILES, request); if (listener == null) { return; @@ -527,7 +525,7 @@ class FileChunkTransportRequestHandler implements TransportRequestHandler recoveryRef = onGoingRecoveries.getSafe(request.recoveryId(), request.shardId())) { final RecoveryTarget recoveryTarget = recoveryRef.get(); final ActionListener listener = createOrFinishListener(recoveryRef, channel, Actions.FILE_CHUNK, request); if (listener == null) { @@ -563,7 +561,7 @@ public void messageReceived(final RecoveryFileChunkRequest request, TransportCha } private ActionListener createOrFinishListener( - final RecoveryRef recoveryRef, + final ReplicationRef recoveryRef, final TransportChannel channel, final String action, final RecoveryTransportRequest request @@ -572,7 +570,7 @@ private ActionListener createOrFinishListener( } private ActionListener createOrFinishListener( - final RecoveryRef recoveryRef, + final ReplicationRef recoveryRef, final TransportChannel channel, final String action, final RecoveryTransportRequest request, @@ -609,10 +607,10 @@ class RecoveryRunner extends AbstractRunnable { @Override public void onFailure(Exception e) { - try (RecoveryRef recoveryRef = onGoingRecoveries.getRecovery(recoveryId)) { + try (ReplicationRef recoveryRef = onGoingRecoveries.get(recoveryId)) { if (recoveryRef != null) { logger.error(() -> new ParameterizedMessage("unexpected error during recovery [{}], failing shard", recoveryId), e); - onGoingRecoveries.failRecovery( + onGoingRecoveries.fail( recoveryId, new RecoveryFailedException(recoveryRef.get().state(), "unexpected error", e), true // be safe @@ -648,7 +646,7 @@ private RecoveryResponseHandler(final StartRecoveryRequest request, final Replic public void handleResponse(RecoveryResponse recoveryResponse) { final TimeValue recoveryTime = new TimeValue(timer.time()); // do this through ongoing recoveries to remove it from the collection - onGoingRecoveries.markRecoveryAsDone(recoveryId); + onGoingRecoveries.markAsDone(recoveryId); if (logger.isTraceEnabled()) { StringBuilder sb = new StringBuilder(); sb.append('[') @@ -709,11 +707,7 @@ private void onException(Exception e) { Throwable cause = ExceptionsHelper.unwrapCause(e); if (cause instanceof CancellableThreads.ExecutionCancelledException) { // this can also come from the source wrapped in a RemoteTransportException - onGoingRecoveries.failRecovery( - recoveryId, - new RecoveryFailedException(request, "source has canceled the recovery", cause), - false - ); + onGoingRecoveries.fail(recoveryId, new RecoveryFailedException(request, "source has canceled the recovery", cause), false); return; } if (cause instanceof RecoveryEngineException) { @@ -766,11 +760,11 @@ private void onException(Exception e) { } if (cause instanceof AlreadyClosedException) { - onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false); + onGoingRecoveries.fail(recoveryId, new RecoveryFailedException(request, "source shard is closed", cause), false); return; } - onGoingRecoveries.failRecovery(recoveryId, new RecoveryFailedException(request, e), true); + onGoingRecoveries.fail(recoveryId, new RecoveryFailedException(request, e), true); } @Override diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java deleted file mode 100644 index 38b72dd0f7dee..0000000000000 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveriesCollection.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - -package org.opensearch.indices.recovery; - -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.OpenSearchTimeoutException; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.concurrent.AutoCloseableRefCounted; -import org.opensearch.common.unit.TimeValue; -import org.opensearch.common.util.concurrent.AbstractRunnable; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.index.shard.IndexShard; -import org.opensearch.index.shard.IndexShardClosedException; -import org.opensearch.index.shard.ShardId; -import org.opensearch.threadpool.ThreadPool; - -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.ConcurrentMap; - -/** - * This class holds a collection of all on going recoveries on the current node (i.e., the node is the target node - * of those recoveries). The class is used to guarantee concurrent semantics such that once a recoveries was done/cancelled/failed - * no other thread will be able to find it. Last, the {@link RecoveryRef} inner class verifies that recovery temporary files - * and store will only be cleared once on going usage is finished. - * - * @opensearch.internal - */ -public class RecoveriesCollection { - - /** This is the single source of truth for ongoing recoveries. If it's not here, it was canceled or done */ - private final ConcurrentMap onGoingRecoveries = ConcurrentCollections.newConcurrentMap(); - - private final Logger logger; - private final ThreadPool threadPool; - - public RecoveriesCollection(Logger logger, ThreadPool threadPool) { - this.logger = logger; - this.threadPool = threadPool; - } - - /** - * Starts are new recovery for the given shard, source node and state - * - * @return the id of the new recovery. - */ - public long startRecovery( - IndexShard indexShard, - DiscoveryNode sourceNode, - PeerRecoveryTargetService.RecoveryListener listener, - TimeValue activityTimeout - ) { - RecoveryTarget recoveryTarget = new RecoveryTarget(indexShard, sourceNode, listener); - startRecoveryInternal(recoveryTarget, activityTimeout); - return recoveryTarget.recoveryId(); - } - - private void startRecoveryInternal(RecoveryTarget recoveryTarget, TimeValue activityTimeout) { - RecoveryTarget existingTarget = onGoingRecoveries.putIfAbsent(recoveryTarget.recoveryId(), recoveryTarget); - assert existingTarget == null : "found two RecoveryStatus instances with the same id"; - logger.trace( - "{} started recovery from {}, id [{}]", - recoveryTarget.shardId(), - recoveryTarget.sourceNode(), - recoveryTarget.recoveryId() - ); - threadPool.schedule( - new RecoveryMonitor(recoveryTarget.recoveryId(), recoveryTarget.lastAccessTime(), activityTimeout), - activityTimeout, - ThreadPool.Names.GENERIC - ); - } - - /** - * Resets the recovery and performs a recovery restart on the currently recovering index shard - * - * @see IndexShard#performRecoveryRestart() - * @return newly created RecoveryTarget - */ - public RecoveryTarget resetRecovery(final long recoveryId, final TimeValue activityTimeout) { - RecoveryTarget oldRecoveryTarget = null; - final RecoveryTarget newRecoveryTarget; - - try { - synchronized (onGoingRecoveries) { - // swap recovery targets in a synchronized block to ensure that the newly added recovery target is picked up by - // cancelRecoveriesForShard whenever the old recovery target is picked up - oldRecoveryTarget = onGoingRecoveries.remove(recoveryId); - if (oldRecoveryTarget == null) { - return null; - } - - newRecoveryTarget = oldRecoveryTarget.retryCopy(); - startRecoveryInternal(newRecoveryTarget, activityTimeout); - } - - // Closes the current recovery target - boolean successfulReset = oldRecoveryTarget.resetRecovery(newRecoveryTarget.cancellableThreads()); - if (successfulReset) { - logger.trace( - "{} restarted recovery from {}, id [{}], previous id [{}]", - newRecoveryTarget.shardId(), - newRecoveryTarget.sourceNode(), - newRecoveryTarget.recoveryId(), - oldRecoveryTarget.recoveryId() - ); - return newRecoveryTarget; - } else { - logger.trace( - "{} recovery could not be reset as it is already cancelled, recovery from {}, id [{}], previous id [{}]", - newRecoveryTarget.shardId(), - newRecoveryTarget.sourceNode(), - newRecoveryTarget.recoveryId(), - oldRecoveryTarget.recoveryId() - ); - cancelRecovery(newRecoveryTarget.recoveryId(), "recovery cancelled during reset"); - return null; - } - } catch (Exception e) { - // fail shard to be safe - oldRecoveryTarget.notifyListener(new RecoveryFailedException(oldRecoveryTarget.state(), "failed to retry recovery", e), true); - return null; - } - } - - public RecoveryTarget getRecoveryTarget(long id) { - return onGoingRecoveries.get(id); - } - - /** - * gets the {@link RecoveryTarget } for a given id. The RecoveryStatus returned has it's ref count already incremented - * to make sure it's safe to use. However, you must call {@link RecoveryTarget#decRef()} when you are done with it, typically - * by using this method in a try-with-resources clause. - *

- * Returns null if recovery is not found - */ - public RecoveryRef getRecovery(long id) { - RecoveryTarget status = onGoingRecoveries.get(id); - if (status != null && status.tryIncRef()) { - return new RecoveryRef(status); - } - return null; - } - - /** Similar to {@link #getRecovery(long)} but throws an exception if no recovery is found */ - public RecoveryRef getRecoverySafe(long id, ShardId shardId) { - RecoveryRef recoveryRef = getRecovery(id); - if (recoveryRef == null) { - throw new IndexShardClosedException(shardId); - } - assert recoveryRef.get().shardId().equals(shardId); - return recoveryRef; - } - - /** cancel the recovery with the given id (if found) and remove it from the recovery collection */ - public boolean cancelRecovery(long id, String reason) { - RecoveryTarget removed = onGoingRecoveries.remove(id); - boolean cancelled = false; - if (removed != null) { - logger.trace( - "{} canceled recovery from {}, id [{}] (reason [{}])", - removed.shardId(), - removed.sourceNode(), - removed.recoveryId(), - reason - ); - removed.cancel(reason); - cancelled = true; - } - return cancelled; - } - - /** - * fail the recovery with the given id (if found) and remove it from the recovery collection - * - * @param id id of the recovery to fail - * @param e exception with reason for the failure - * @param sendShardFailure true a shard failed message should be sent to the master - */ - public void failRecovery(long id, RecoveryFailedException e, boolean sendShardFailure) { - RecoveryTarget removed = onGoingRecoveries.remove(id); - if (removed != null) { - logger.trace( - "{} failing recovery from {}, id [{}]. Send shard failure: [{}]", - removed.shardId(), - removed.sourceNode(), - removed.recoveryId(), - sendShardFailure - ); - removed.fail(e, sendShardFailure); - } - } - - /** mark the recovery with the given id as done (if found) */ - public void markRecoveryAsDone(long id) { - RecoveryTarget removed = onGoingRecoveries.remove(id); - if (removed != null) { - logger.trace("{} marking recovery from {} as done, id [{}]", removed.shardId(), removed.sourceNode(), removed.recoveryId()); - removed.markAsDone(); - } - } - - /** the number of ongoing recoveries */ - public int size() { - return onGoingRecoveries.size(); - } - - /** - * cancel all ongoing recoveries for the given shard - * - * @param reason reason for cancellation - * @param shardId shardId for which to cancel recoveries - * @return true if a recovery was cancelled - */ - public boolean cancelRecoveriesForShard(ShardId shardId, String reason) { - boolean cancelled = false; - List matchedRecoveries = new ArrayList<>(); - synchronized (onGoingRecoveries) { - for (Iterator it = onGoingRecoveries.values().iterator(); it.hasNext();) { - RecoveryTarget status = it.next(); - if (status.shardId().equals(shardId)) { - matchedRecoveries.add(status); - it.remove(); - } - } - } - for (RecoveryTarget removed : matchedRecoveries) { - logger.trace( - "{} canceled recovery from {}, id [{}] (reason [{}])", - removed.shardId(), - removed.sourceNode(), - removed.recoveryId(), - reason - ); - removed.cancel(reason); - cancelled = true; - } - return cancelled; - } - - /** - * a reference to {@link RecoveryTarget}, which implements {@link AutoCloseable}. closing the reference - * causes {@link RecoveryTarget#decRef()} to be called. This makes sure that the underlying resources - * will not be freed until {@link RecoveryRef#close()} is called. - * - * @opensearch.internal - */ - public static class RecoveryRef extends AutoCloseableRefCounted { - - /** - * Important: {@link RecoveryTarget#tryIncRef()} should - * be *successfully* called on status before - */ - public RecoveryRef(RecoveryTarget status) { - super(status); - status.setLastAccessTime(); - } - } - - private class RecoveryMonitor extends AbstractRunnable { - private final long recoveryId; - private final TimeValue checkInterval; - - private volatile long lastSeenAccessTime; - - private RecoveryMonitor(long recoveryId, long lastSeenAccessTime, TimeValue checkInterval) { - this.recoveryId = recoveryId; - this.checkInterval = checkInterval; - this.lastSeenAccessTime = lastSeenAccessTime; - } - - @Override - public void onFailure(Exception e) { - logger.error(() -> new ParameterizedMessage("unexpected error while monitoring recovery [{}]", recoveryId), e); - } - - @Override - protected void doRun() throws Exception { - RecoveryTarget status = onGoingRecoveries.get(recoveryId); - if (status == null) { - logger.trace("[monitor] no status found for [{}], shutting down", recoveryId); - return; - } - long accessTime = status.lastAccessTime(); - if (accessTime == lastSeenAccessTime) { - String message = "no activity after [" + checkInterval + "]"; - failRecovery( - recoveryId, - new RecoveryFailedException(status.state(), message, new OpenSearchTimeoutException(message)), - true // to be safe, we don't know what go stuck - ); - return; - } - lastSeenAccessTime = accessTime; - logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime); - threadPool.schedule(this, checkInterval, ThreadPool.Names.GENERIC); - } - } - -} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java new file mode 100644 index 0000000000000..b93c054ffa4bf --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryListener.java @@ -0,0 +1,55 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.recovery; + +import org.opensearch.OpenSearchException; +import org.opensearch.cluster.routing.ShardRouting; +import org.opensearch.indices.cluster.IndicesClusterStateService; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; + +/** + * Listener that runs on changes in Recovery state + * + * @opensearch.internal + */ +public class RecoveryListener implements ReplicationListener { + + /** + * ShardRouting with which the shard was created + */ + private final ShardRouting shardRouting; + + /** + * Primary term with which the shard was created + */ + private final long primaryTerm; + + private final IndicesClusterStateService indicesClusterStateService; + + public RecoveryListener( + final ShardRouting shardRouting, + final long primaryTerm, + IndicesClusterStateService indicesClusterStateService + ) { + this.shardRouting = shardRouting; + this.primaryTerm = primaryTerm; + this.indicesClusterStateService = indicesClusterStateService; + } + + @Override + public void onDone(ReplicationState state) { + indicesClusterStateService.handleRecoveryDone(state, shardRouting, primaryTerm); + } + + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + indicesClusterStateService.handleRecoveryFailure(shardRouting, sendShardFailure, e); + } +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java index 35ac5cbc12bde..a3c7adb755145 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryState.java @@ -45,6 +45,7 @@ import org.opensearch.common.xcontent.XContentBuilder; import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.indices.replication.common.ReplicationTimer; @@ -56,7 +57,7 @@ * * @opensearch.internal */ -public class RecoveryState implements ToXContentFragment, Writeable { +public class RecoveryState implements ReplicationState, ToXContentFragment, Writeable { /** * The stage of the recovery state diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java index bb557cc6837ab..92897ab19ad64 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java +++ b/server/src/main/java/org/opensearch/indices/recovery/RecoveryTarget.java @@ -32,22 +32,18 @@ package org.opensearch.indices.recovery; -import org.apache.logging.log4j.Logger; import org.apache.lucene.index.CorruptIndexException; import org.apache.lucene.index.IndexFormatTooNewException; import org.apache.lucene.index.IndexFormatTooOldException; import org.opensearch.Assertions; -import org.opensearch.OpenSearchException; import org.opensearch.ExceptionsHelper; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.cluster.node.DiscoveryNode; import org.opensearch.common.UUIDs; import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.logging.Loggers; import org.opensearch.common.lucene.Lucene; import org.opensearch.common.util.CancellableThreads; -import org.opensearch.common.util.concurrent.AbstractRefCounted; import org.opensearch.index.engine.Engine; import org.opensearch.index.mapper.MapperException; import org.opensearch.index.seqno.ReplicationTracker; @@ -56,48 +52,33 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.IndexShardNotRecoveringException; import org.opensearch.index.shard.IndexShardState; -import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; import org.opensearch.index.store.StoreFileMetadata; import org.opensearch.index.translog.Translog; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationTarget; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationCollection; import java.io.IOException; import java.nio.file.Path; import java.util.List; import java.util.concurrent.CountDownLatch; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; /** * Represents a recovery where the current node is the target node of the recovery. To track recoveries in a central place, instances of - * this class are created through {@link RecoveriesCollection}. + * this class are created through {@link ReplicationCollection}. * * @opensearch.internal */ -public class RecoveryTarget extends AbstractRefCounted implements RecoveryTargetHandler { - - private final Logger logger; - - private static final AtomicLong idGenerator = new AtomicLong(); +public class RecoveryTarget extends ReplicationTarget implements RecoveryTargetHandler { private static final String RECOVERY_PREFIX = "recovery."; - private final ShardId shardId; - private final long recoveryId; - private final IndexShard indexShard; private final DiscoveryNode sourceNode; - private final MultiFileWriter multiFileWriter; - private final RecoveryRequestTracker requestTracker = new RecoveryRequestTracker(); - private final Store store; - private final PeerRecoveryTargetService.RecoveryListener listener; - - private final AtomicBoolean finished = new AtomicBoolean(); - private final CancellableThreads cancellableThreads; - - // last time this status was accessed - private volatile long lastAccessTime = System.nanoTime(); + protected final MultiFileWriter multiFileWriter; + protected final Store store; // latch that can be used to blockingly wait for RecoveryTarget to be closed private final CountDownLatch closedLatch = new CountDownLatch(1); @@ -109,27 +90,15 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget * @param sourceNode source node of the recovery where we recover from * @param listener called when recovery is completed/failed */ - public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, PeerRecoveryTargetService.RecoveryListener listener) { - super("recovery_status"); + public RecoveryTarget(IndexShard indexShard, DiscoveryNode sourceNode, ReplicationListener listener) { + super("recovery_status", indexShard, indexShard.recoveryState().getIndex(), listener); this.cancellableThreads = new CancellableThreads(); - this.recoveryId = idGenerator.incrementAndGet(); - this.listener = listener; - this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); - this.indexShard = indexShard; this.sourceNode = sourceNode; - this.shardId = indexShard.shardId(); - final String tempFilePrefix = RECOVERY_PREFIX + UUIDs.randomBase64UUID() + "."; - this.multiFileWriter = new MultiFileWriter( - indexShard.store(), - indexShard.recoveryState().getIndex(), - tempFilePrefix, - logger, - this::ensureRefCount - ); + indexShard.recoveryStats().incCurrentAsTarget(); this.store = indexShard.store(); - // make sure the store is not released until we are done. + final String tempFilePrefix = getPrefix() + UUIDs.randomBase64UUID() + "."; + this.multiFileWriter = new MultiFileWriter(indexShard.store(), stateIndex, tempFilePrefix, logger, this::ensureRefCount); store.incRef(); - indexShard.recoveryStats().incCurrentAsTarget(); } /** @@ -141,23 +110,15 @@ public RecoveryTarget retryCopy() { return new RecoveryTarget(indexShard, sourceNode, listener); } - public ActionListener markRequestReceivedAndCreateListener(long requestSeqNo, ActionListener listener) { - return requestTracker.markReceivedAndCreateListener(requestSeqNo, listener); - } - - public long recoveryId() { - return recoveryId; - } - - public ShardId shardId() { - return shardId; - } - public IndexShard indexShard() { ensureRefCount(); return indexShard; } + public String source() { + return sourceNode.toString(); + } + public DiscoveryNode sourceNode() { return this.sourceNode; } @@ -170,29 +131,29 @@ public CancellableThreads cancellableThreads() { return cancellableThreads; } - /** return the last time this RecoveryStatus was used (based on System.nanoTime() */ - public long lastAccessTime() { - return lastAccessTime; + public Store store() { + ensureRefCount(); + return store; } - /** sets the lasAccessTime flag to now */ - public void setLastAccessTime() { - lastAccessTime = System.nanoTime(); + public String description() { + return "recovery from " + source(); } - public Store store() { - ensureRefCount(); - return store; + @Override + public void notifyListener(Exception e, boolean sendShardFailure) { + listener.onFailure(state(), new RecoveryFailedException(state(), e.getMessage(), e), sendShardFailure); } /** * Closes the current recovery target and waits up to a certain timeout for resources to be freed. * Returns true if resetting the recovery was successful, false if the recovery target is already cancelled / failed or marked as done. */ - boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOException { + public boolean reset(CancellableThreads newTargetCancellableThreads) throws IOException { + final long recoveryId = getId(); if (finished.compareAndSet(false, true)) { try { - logger.debug("reset of recovery with shard {} and id [{}]", shardId, recoveryId); + logger.debug("reset of recovery with shard {} and id [{}]", shardId(), recoveryId); } finally { // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now. decRef(); @@ -202,7 +163,7 @@ boolean resetRecovery(CancellableThreads newTargetCancellableThreads) throws IOE } catch (CancellableThreads.ExecutionCancelledException e) { logger.trace( "new recovery target cancelled for shard {} while waiting on old recovery target with id [{}] to close", - shardId, + shardId(), recoveryId ); return false; @@ -248,22 +209,7 @@ public void cancel(String reason) { * @param sendShardFailure indicates whether to notify the cluster-manager of the shard failure */ public void fail(RecoveryFailedException e, boolean sendShardFailure) { - if (finished.compareAndSet(false, true)) { - try { - notifyListener(e, sendShardFailure); - } finally { - try { - cancellableThreads.cancel("failed recovery [" + ExceptionsHelper.stackTrace(e) + "]"); - } finally { - // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now - decRef(); - } - } - } - } - - public void notifyListener(RecoveryFailedException e, boolean sendShardFailure) { - listener.onRecoveryFailure(state(), e, sendShardFailure); + super.fail(e, sendShardFailure); } /** mark the current recovery as done */ @@ -278,7 +224,7 @@ public void markAsDone() { // release the initial reference. recovery files will be cleaned as soon as ref count goes to zero, potentially now decRef(); } - listener.onRecoveryDone(state()); + listener.onDone(state()); } } @@ -287,7 +233,6 @@ protected void closeInternal() { try { multiFileWriter.close(); } finally { - // free store. increment happens in constructor store.decRef(); indexShard.recoveryStats().decCurrentAsTarget(); closedLatch.countDown(); @@ -296,15 +241,28 @@ protected void closeInternal() { @Override public String toString() { - return shardId + " [" + recoveryId + "]"; + return shardId() + " [" + getId() + "]"; } - private void ensureRefCount() { - if (refCount() <= 0) { - throw new OpenSearchException( - "RecoveryStatus is used but it's refcount is 0. Probably a mismatch between incRef/decRef " + "calls" - ); - } + @Override + protected String getPrefix() { + return RECOVERY_PREFIX; + } + + @Override + protected void onDone() { + assert multiFileWriter.tempFileNames.isEmpty() : "not all temporary files are renamed"; + // this might still throw an exception ie. if the shard is CLOSED due to some other event. + // it's safer to decrement the reference in a try finally here. + indexShard.postRecovery("peer recovery done"); + } + + /** + * if {@link #cancellableThreads()} was used, the threads will be interrupted. + */ + @Override + protected void onCancel(String reason) { + cancellableThreads.cancel(reason); } /*** Implementation of {@link RecoveryTargetHandler } */ @@ -374,7 +332,7 @@ public void indexTranslogOperations( translog.totalOperations(totalTranslogOps); assert indexShard().recoveryState() == state(); if (indexShard().state() != IndexShardState.RECOVERING) { - throw new IndexShardNotRecoveringException(shardId, indexShard().state()); + throw new IndexShardNotRecoveringException(shardId(), indexShard().state()); } /* * The maxSeenAutoIdTimestampOnPrimary received from the primary is at least the highest auto_id_timestamp from any operation @@ -460,7 +418,7 @@ public void cleanFiles( final String translogUUID = Translog.createEmptyTranslog( indexShard.shardPath().resolveTranslog(), globalCheckpoint, - shardId, + shardId(), indexShard.getPendingPrimaryTerm() ); store.associateIndexWithNewTranslog(translogUUID); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java new file mode 100644 index 0000000000000..609825eb5227b --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationCollection.java @@ -0,0 +1,297 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.OpenSearchTimeoutException; +import org.opensearch.common.concurrent.AutoCloseableRefCounted; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.index.shard.ShardId; +import org.opensearch.threadpool.ThreadPool; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.ConcurrentMap; + +/** + * This class holds a collection of all on going replication events on the current node (i.e., the node is the target node + * of those events). The class is used to guarantee concurrent semantics such that once an event was done/cancelled/failed + * no other thread will be able to find it. Last, the {@link ReplicationRef} inner class verifies that temporary files + * and store will only be cleared once on going usage is finished. + * + * @opensearch.internal + */ +public class ReplicationCollection { + + /** This is the single source of truth for ongoing target events. If it's not here, it was canceled or done */ + private final ConcurrentMap onGoingTargetEvents = ConcurrentCollections.newConcurrentMap(); + + private final Logger logger; + private final ThreadPool threadPool; + + public ReplicationCollection(Logger logger, ThreadPool threadPool) { + this.logger = logger; + this.threadPool = threadPool; + } + + /** + * Starts a new target event for the given shard, source node and state + * + * @return the id of the new target event. + */ + public long start(T target, TimeValue activityTimeout) { + startInternal(target, activityTimeout); + return target.getId(); + } + + private void startInternal(T target, TimeValue activityTimeout) { + T existingTarget = onGoingTargetEvents.putIfAbsent(target.getId(), target); + assert existingTarget == null : "found two Target instances with the same id"; + logger.trace("started {}", target.description()); + threadPool.schedule( + new ReplicationMonitor(target.getId(), target.lastAccessTime(), activityTimeout), + activityTimeout, + ThreadPool.Names.GENERIC + ); + } + + /** + * Resets the target event and performs a restart on the current index shard + * + * @see IndexShard#performRecoveryRestart() + * @return newly created Target + */ + @SuppressWarnings(value = "unchecked") + public T reset(final long id, final TimeValue activityTimeout) { + T oldTarget = null; + final T newTarget; + + try { + synchronized (onGoingTargetEvents) { + // swap targets in a synchronized block to ensure that the newly added target is picked up by + // cancelForShard whenever the old target is picked up + oldTarget = onGoingTargetEvents.remove(id); + if (oldTarget == null) { + return null; + } + + newTarget = (T) oldTarget.retryCopy(); + startInternal(newTarget, activityTimeout); + } + + // Closes the current target + boolean successfulReset = oldTarget.reset(newTarget.cancellableThreads()); + if (successfulReset) { + logger.trace("restarted {}, previous id [{}]", newTarget.description(), oldTarget.getId()); + return newTarget; + } else { + logger.trace( + "{} could not be reset as it is already cancelled, previous id [{}]", + newTarget.description(), + oldTarget.getId() + ); + cancel(newTarget.getId(), "cancelled during reset"); + return null; + } + } catch (Exception e) { + // fail shard to be safe + assert oldTarget != null; + oldTarget.notifyListener(e, true); + return null; + } + } + + public T getTarget(long id) { + return onGoingTargetEvents.get(id); + } + + /** + * gets the {@link ReplicationTarget } for a given id. The ShardTarget returned has it's ref count already incremented + * to make sure it's safe to use. However, you must call {@link ReplicationTarget#decRef()} when you are done with it, typically + * by using this method in a try-with-resources clause. + *

+ * Returns null if target event is not found + */ + public ReplicationRef get(long id) { + T status = onGoingTargetEvents.get(id); + if (status != null && status.tryIncRef()) { + return new ReplicationRef(status); + } + return null; + } + + /** Similar to {@link #get(long)} but throws an exception if no target is found */ + public ReplicationRef getSafe(long id, ShardId shardId) { + ReplicationRef ref = get(id); + if (ref == null) { + throw new IndexShardClosedException(shardId); + } + assert ref.get().indexShard().shardId().equals(shardId); + return ref; + } + + /** cancel the target with the given id (if found) and remove it from the target collection */ + public boolean cancel(long id, String reason) { + T removed = onGoingTargetEvents.remove(id); + boolean cancelled = false; + if (removed != null) { + logger.trace("canceled {} (reason [{}])", removed.description(), reason); + removed.cancel(reason); + cancelled = true; + } + return cancelled; + } + + /** + * fail the target with the given id (if found) and remove it from the target collection + * + * @param id id of the target to fail + * @param e exception with reason for the failure + * @param sendShardFailure true a shard failed message should be sent to the master + */ + public void fail(long id, OpenSearchException e, boolean sendShardFailure) { + T removed = onGoingTargetEvents.remove(id); + if (removed != null) { + logger.trace("failing {}. Send shard failure: [{}]", removed.description(), sendShardFailure); + removed.fail(e, sendShardFailure); + } + } + + /** mark the target with the given id as done (if found) */ + public void markAsDone(long id) { + T removed = onGoingTargetEvents.remove(id); + if (removed != null) { + logger.trace("Marking {} as done", removed.description()); + removed.markAsDone(); + } + } + + /** the number of ongoing target events */ + public int size() { + return onGoingTargetEvents.size(); + } + + /** + * cancel all ongoing targets for the given shard + * + * @param reason reason for cancellation + * @param shardId shardId for which to cancel targets + * @return true if a target was cancelled + */ + public boolean cancelForShard(ShardId shardId, String reason) { + boolean cancelled = false; + List matchedTargets = new ArrayList<>(); + synchronized (onGoingTargetEvents) { + for (Iterator it = onGoingTargetEvents.values().iterator(); it.hasNext();) { + T status = it.next(); + if (status.indexShard().shardId().equals(shardId)) { + matchedTargets.add(status); + it.remove(); + } + } + } + for (T removed : matchedTargets) { + logger.trace("canceled {} (reason [{}])", removed.description(), reason); + removed.cancel(reason); + cancelled = true; + } + return cancelled; + } + + /** + * a reference to {@link ReplicationTarget}, which implements {@link AutoCloseable}. closing the reference + * causes {@link ReplicationTarget#decRef()} to be called. This makes sure that the underlying resources + * will not be freed until {@link ReplicationRef#close()} is called. + * + * @opensearch.internal + */ + public static class ReplicationRef extends AutoCloseableRefCounted { + + /** + * Important: {@link ReplicationTarget#tryIncRef()} should + * be *successfully* called on status before + */ + public ReplicationRef(T status) { + super(status); + status.setLastAccessTime(); + } + } + + private class ReplicationMonitor extends AbstractRunnable { + private final long id; + private final TimeValue checkInterval; + + private volatile long lastSeenAccessTime; + + private ReplicationMonitor(long id, long lastSeenAccessTime, TimeValue checkInterval) { + this.id = id; + this.checkInterval = checkInterval; + this.lastSeenAccessTime = lastSeenAccessTime; + } + + @Override + public void onFailure(Exception e) { + logger.error(() -> new ParameterizedMessage("unexpected error while monitoring [{}]", id), e); + } + + @Override + protected void doRun() throws Exception { + T status = onGoingTargetEvents.get(id); + if (status == null) { + logger.trace("[monitor] no status found for [{}], shutting down", id); + return; + } + long accessTime = status.lastAccessTime(); + if (accessTime == lastSeenAccessTime) { + String message = "no activity after [" + checkInterval + "]"; + fail( + id, + new OpenSearchTimeoutException(message), + true // to be safe, we don't know what go stuck + ); + return; + } + lastSeenAccessTime = accessTime; + logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", id, lastSeenAccessTime); + threadPool.schedule(this, checkInterval, ThreadPool.Names.GENERIC); + } + } + +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java new file mode 100644 index 0000000000000..0666f475d496a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationListener.java @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.opensearch.OpenSearchException; + +/** + * Interface for listeners that run when there's a change in {@link ReplicationState} + * + * @opensearch.internal + */ +public interface ReplicationListener { + + void onDone(ReplicationState state); + + void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure); +} diff --git a/server/src/main/java/org/opensearch/indices/recovery/RecoveryRequestTracker.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java similarity index 96% rename from server/src/main/java/org/opensearch/indices/recovery/RecoveryRequestTracker.java rename to server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java index 71a7f2776f324..0b0d20fc9f17e 100644 --- a/server/src/main/java/org/opensearch/indices/recovery/RecoveryRequestTracker.java +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationRequestTracker.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package org.opensearch.indices.recovery; +package org.opensearch.indices.replication.common; import org.opensearch.action.ActionListener; import org.opensearch.common.Nullable; @@ -45,11 +45,11 @@ import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; /** - * Tracks recovery requests + * Tracks replication requests * * @opensearch.internal */ -public class RecoveryRequestTracker { +public class ReplicationRequestTracker { private final Map> ongoingRequests = Collections.synchronizedMap(new HashMap<>()); private final LocalCheckpointTracker checkpointTracker = new LocalCheckpointTracker(NO_OPS_PERFORMED, NO_OPS_PERFORMED); diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java new file mode 100644 index 0000000000000..7942fa8938dd0 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationState.java @@ -0,0 +1,18 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +/** + * Represents a state object used to track copying of segments from an external source + * + * @opensearch.internal + */ +public interface ReplicationState { + +} diff --git a/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java new file mode 100644 index 0000000000000..0192270907fd2 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/common/ReplicationTarget.java @@ -0,0 +1,175 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.common; + +import org.apache.logging.log4j.Logger; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionListener; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.util.CancellableThreads; +import org.opensearch.common.util.concurrent.AbstractRefCounted; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Represents the target of a replication operation performed on a shard + * + * @opensearch.internal + */ +public abstract class ReplicationTarget extends AbstractRefCounted { + + private static final AtomicLong ID_GENERATOR = new AtomicLong(); + + // last time the target/status was accessed + private volatile long lastAccessTime = System.nanoTime(); + private final ReplicationRequestTracker requestTracker = new ReplicationRequestTracker(); + private final long id; + + protected final AtomicBoolean finished = new AtomicBoolean(); + private final ShardId shardId; + protected final IndexShard indexShard; + protected final ReplicationListener listener; + protected final Logger logger; + protected final CancellableThreads cancellableThreads; + protected final ReplicationLuceneIndex stateIndex; + + protected abstract String getPrefix(); + + protected abstract void onDone(); + + protected abstract void onCancel(String reason); + + public abstract ReplicationState state(); + + public abstract ReplicationTarget retryCopy(); + + public abstract String description(); + + public ReplicationListener getListener() { + return listener; + } + + public CancellableThreads cancellableThreads() { + return cancellableThreads; + } + + public abstract void notifyListener(Exception e, boolean sendShardFailure); + + public ReplicationTarget(String name, IndexShard indexShard, ReplicationLuceneIndex stateIndex, ReplicationListener listener) { + super(name); + this.logger = Loggers.getLogger(getClass(), indexShard.shardId()); + this.listener = listener; + this.id = ID_GENERATOR.incrementAndGet(); + this.stateIndex = stateIndex; + this.indexShard = indexShard; + this.shardId = indexShard.shardId(); + // make sure the store is not released until we are done. + this.cancellableThreads = new CancellableThreads(); + } + + public long getId() { + return id; + } + + public abstract boolean reset(CancellableThreads newTargetCancellableThreads) throws IOException; + + /** + * return the last time this ReplicationStatus was used (based on System.nanoTime() + */ + public long lastAccessTime() { + return lastAccessTime; + } + + /** + * sets the lasAccessTime flag to now + */ + public void setLastAccessTime() { + lastAccessTime = System.nanoTime(); + } + + public ActionListener markRequestReceivedAndCreateListener(long requestSeqNo, ActionListener listener) { + return requestTracker.markReceivedAndCreateListener(requestSeqNo, listener); + } + + public IndexShard indexShard() { + ensureRefCount(); + return indexShard; + } + + public ShardId shardId() { + return shardId; + } + + /** + * mark the current replication as done + */ + public void markAsDone() { + if (finished.compareAndSet(false, true)) { + try { + onDone(); + } finally { + // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + listener.onDone(state()); + } + } + + /** + * cancel the replication. calling this method will clean temporary files and release the store + * unless this object is in use (in which case it will be cleaned once all ongoing users call + * {@link #decRef()} + */ + public void cancel(String reason) { + if (finished.compareAndSet(false, true)) { + try { + logger.debug("replication cancelled (reason: [{}])", reason); + onCancel(reason); + } finally { + // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + } + } + + /** + * fail the replication and call listener + * + * @param e exception that encapsulates the failure + * @param sendShardFailure indicates whether to notify the master of the shard failure + */ + public void fail(OpenSearchException e, boolean sendShardFailure) { + if (finished.compareAndSet(false, true)) { + try { + notifyListener(e, sendShardFailure); + } finally { + try { + cancellableThreads.cancel("failed" + description() + "[" + ExceptionsHelper.stackTrace(e) + "]"); + } finally { + // release the initial reference. replication files will be cleaned as soon as ref count goes to zero, potentially now + decRef(); + } + } + } + } + + protected void ensureRefCount() { + if (refCount() <= 0) { + throw new OpenSearchException( + "ReplicationTarget is used but it's refcount is 0. Probably a mismatch between incRef/decRef calls" + ); + } + } + +} diff --git a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java index add2ecd34e3af..509d1f52daa0d 100644 --- a/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java +++ b/server/src/test/java/org/opensearch/index/replication/RecoveryDuringReplicationTests.java @@ -69,9 +69,9 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.store.Store; import org.opensearch.index.translog.Translog; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.common.ReplicationListener; import java.io.IOException; import java.util.ArrayList; @@ -809,7 +809,7 @@ public BlockingTarget( CountDownLatch releaseRecovery, IndexShard shard, DiscoveryNode sourceNode, - PeerRecoveryTargetService.RecoveryListener listener, + ReplicationListener listener, Logger logger ) { super(shard, sourceNode, listener); diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 9d83071c177f5..97cb1dc341b13 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -32,6 +32,7 @@ package org.opensearch.indices.cluster; +import org.junit.Before; import org.opensearch.action.ActionListener; import org.opensearch.cluster.ClusterState; import org.opensearch.cluster.metadata.IndexMetadata; @@ -56,10 +57,10 @@ import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices; import org.opensearch.indices.cluster.IndicesClusterStateService.Shard; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; -import org.junit.Before; import java.io.IOException; import java.util.HashMap; @@ -73,9 +74,9 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.unmodifiableMap; -import static org.opensearch.common.collect.MapBuilder.newMapBuilder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.opensearch.common.collect.MapBuilder.newMapBuilder; /** * Abstract base class for tests against {@link IndicesClusterStateService} @@ -253,7 +254,7 @@ public MockIndexService indexService(Index index) { public MockIndexShard createShard( final ShardRouting shardRouting, final PeerRecoveryTargetService recoveryTargetService, - final PeerRecoveryTargetService.RecoveryListener recoveryListener, + final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, final Consumer onShardFailure, final Consumer globalCheckpointSyncer, diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java index 5e09e0f2253df..5224a54a35e96 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/RecoveryTests.java @@ -41,6 +41,7 @@ import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.store.AlreadyClosedException; import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; import org.opensearch.action.ActionListener; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.bulk.BulkShardRequest; @@ -68,6 +69,8 @@ import org.opensearch.index.store.Store; import org.opensearch.index.translog.SnapshotMatchers; import org.opensearch.index.translog.Translog; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; import java.io.IOException; import java.util.HashMap; @@ -448,20 +451,17 @@ public long addDocument(Iterable doc) throws IOExcepti IndexShard replica = group.addReplica(); expectThrows( Exception.class, - () -> group.recoverReplica( - replica, - (shard, sourceNode) -> new RecoveryTarget(shard, sourceNode, new PeerRecoveryTargetService.RecoveryListener() { - @Override - public void onRecoveryDone(RecoveryState state) { - throw new AssertionError("recovery must fail"); - } + () -> group.recoverReplica(replica, (shard, sourceNode) -> new RecoveryTarget(shard, sourceNode, new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + throw new AssertionError("recovery must fail"); + } - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); - } - }) - ) + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + assertThat(ExceptionsHelper.unwrap(e, IOException.class).getMessage(), equalTo("simulated")); + } + })) ); expectThrows(AlreadyClosedException.class, () -> replica.refresh("test")); group.removeReplica(replica); diff --git a/server/src/test/java/org/opensearch/indices/recovery/RecoveryRequestTrackerTests.java b/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java similarity index 95% rename from server/src/test/java/org/opensearch/indices/recovery/RecoveryRequestTrackerTests.java rename to server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java index 931d36f587db8..afad385deabe4 100644 --- a/server/src/test/java/org/opensearch/indices/recovery/RecoveryRequestTrackerTests.java +++ b/server/src/test/java/org/opensearch/indices/recovery/ReplicationRequestTrackerTests.java @@ -36,6 +36,7 @@ import org.opensearch.action.ActionListener; import org.opensearch.action.support.PlainActionFuture; import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.indices.replication.common.ReplicationRequestTracker; import org.opensearch.test.OpenSearchTestCase; import org.opensearch.threadpool.TestThreadPool; @@ -44,7 +45,7 @@ import java.util.Set; import java.util.concurrent.ConcurrentMap; -public class RecoveryRequestTrackerTests extends OpenSearchTestCase { +public class ReplicationRequestTrackerTests extends OpenSearchTestCase { private TestThreadPool threadPool; @@ -64,7 +65,7 @@ public void testIdempotencyIsEnforced() { Set seqNosReturned = ConcurrentCollections.newConcurrentSet(); ConcurrentMap>> seqToResult = ConcurrentCollections.newConcurrentMap(); - RecoveryRequestTracker requestTracker = new RecoveryRequestTracker(); + ReplicationRequestTracker requestTracker = new ReplicationRequestTracker(); int numberOfRequests = randomIntBetween(100, 200); for (int j = 0; j < numberOfRequests; ++j) { diff --git a/server/src/test/java/org/opensearch/recovery/RecoveriesCollectionTests.java b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java similarity index 65% rename from server/src/test/java/org/opensearch/recovery/RecoveriesCollectionTests.java rename to server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java index 6a08f5115d1e2..7587f48503625 100644 --- a/server/src/test/java/org/opensearch/recovery/RecoveriesCollectionTests.java +++ b/server/src/test/java/org/opensearch/recovery/ReplicationCollectionTests.java @@ -38,10 +38,10 @@ import org.opensearch.index.shard.IndexShard; import org.opensearch.index.shard.ShardId; import org.opensearch.index.store.Store; -import org.opensearch.indices.recovery.RecoveriesCollection; -import org.opensearch.indices.recovery.RecoveryFailedException; +import org.opensearch.indices.replication.common.ReplicationCollection; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.indices.recovery.RecoveryState; -import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryTarget; import java.util.concurrent.CountDownLatch; @@ -51,64 +51,58 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.lessThan; -public class RecoveriesCollectionTests extends OpenSearchIndexLevelReplicationTestCase { - static final PeerRecoveryTargetService.RecoveryListener listener = new PeerRecoveryTargetService.RecoveryListener() { +public class ReplicationCollectionTests extends OpenSearchIndexLevelReplicationTestCase { + static final ReplicationListener listener = new ReplicationListener() { @Override - public void onRecoveryDone(RecoveryState state) { + public void onDone(ReplicationState state) { } @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { } }; public void testLastAccessTimeUpdate() throws Exception { try (ReplicationGroup shards = createGroup(0)) { - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); - try (RecoveriesCollection.RecoveryRef status = collection.getRecovery(recoveryId)) { + try (ReplicationCollection.ReplicationRef status = collection.get(recoveryId)) { final long lastSeenTime = status.get().lastAccessTime(); assertBusy(() -> { - try (RecoveriesCollection.RecoveryRef currentStatus = collection.getRecovery(recoveryId)) { + try (ReplicationCollection.ReplicationRef currentStatus = collection.get(recoveryId)) { assertThat("access time failed to update", lastSeenTime, lessThan(currentStatus.get().lastAccessTime())); } }); } finally { - collection.cancelRecovery(recoveryId, "life"); + collection.cancel(recoveryId, "life"); } } } public void testRecoveryTimeout() throws Exception { try (ReplicationGroup shards = createGroup(0)) { - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); final AtomicBoolean failed = new AtomicBoolean(); final CountDownLatch latch = new CountDownLatch(1); - final long recoveryId = startRecovery( - collection, - shards.getPrimaryNode(), - shards.addReplica(), - new PeerRecoveryTargetService.RecoveryListener() { - @Override - public void onRecoveryDone(RecoveryState state) { - latch.countDown(); - } - - @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { - failed.set(true); - latch.countDown(); - } - }, - TimeValue.timeValueMillis(100) - ); + final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica(), new ReplicationListener() { + @Override + public void onDone(ReplicationState state) { + latch.countDown(); + } + + @Override + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { + failed.set(true); + latch.countDown(); + } + }, TimeValue.timeValueMillis(100)); try { latch.await(30, TimeUnit.SECONDS); assertTrue("recovery failed to timeout", failed.get()); } finally { - collection.cancelRecovery(recoveryId, "meh"); + collection.cancel(recoveryId, "meh"); } } @@ -116,16 +110,16 @@ public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, bo public void testRecoveryCancellation() throws Exception { try (ReplicationGroup shards = createGroup(0)) { - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); final long recoveryId2 = startRecovery(collection, shards.getPrimaryNode(), shards.addReplica()); - try (RecoveriesCollection.RecoveryRef recoveryRef = collection.getRecovery(recoveryId)) { - ShardId shardId = recoveryRef.get().shardId(); - assertTrue("failed to cancel recoveries", collection.cancelRecoveriesForShard(shardId, "test")); + try (ReplicationCollection.ReplicationRef recoveryRef = collection.get(recoveryId)) { + ShardId shardId = recoveryRef.get().indexShard().shardId(); + assertTrue("failed to cancel recoveries", collection.cancelForShard(shardId, "test")); assertThat("all recoveries should be cancelled", collection.size(), equalTo(0)); } finally { - collection.cancelRecovery(recoveryId, "meh"); - collection.cancelRecovery(recoveryId2, "meh"); + collection.cancel(recoveryId, "meh"); + collection.cancel(recoveryId2, "meh"); } } } @@ -135,17 +129,17 @@ public void testResetRecovery() throws Exception { shards.startAll(); int numDocs = randomIntBetween(1, 15); shards.indexDocs(numDocs); - final RecoveriesCollection collection = new RecoveriesCollection(logger, threadPool); + final ReplicationCollection collection = new ReplicationCollection<>(logger, threadPool); IndexShard shard = shards.addReplica(); final long recoveryId = startRecovery(collection, shards.getPrimaryNode(), shard); - RecoveryTarget recoveryTarget = collection.getRecoveryTarget(recoveryId); + RecoveryTarget recoveryTarget = collection.getTarget(recoveryId); final int currentAsTarget = shard.recoveryStats().currentAsTarget(); final int referencesToStore = recoveryTarget.store().refCount(); IndexShard indexShard = recoveryTarget.indexShard(); Store store = recoveryTarget.store(); String tempFileName = recoveryTarget.getTempNameForFile("foobar"); - RecoveryTarget resetRecovery = collection.resetRecovery(recoveryId, TimeValue.timeValueMinutes(60)); - final long resetRecoveryId = resetRecovery.recoveryId(); + RecoveryTarget resetRecovery = collection.reset(recoveryId, TimeValue.timeValueMinutes(60)); + final long resetRecoveryId = resetRecovery.getId(); assertNotSame(recoveryTarget, resetRecovery); assertNotSame(recoveryTarget.cancellableThreads(), resetRecovery.cancellableThreads()); assertSame(indexShard, resetRecovery.indexShard()); @@ -158,31 +152,31 @@ public void testResetRecovery() throws Exception { String resetTempFileName = resetRecovery.getTempNameForFile("foobar"); assertNotEquals(tempFileName, resetTempFileName); assertEquals(currentAsTarget, shard.recoveryStats().currentAsTarget()); - try (RecoveriesCollection.RecoveryRef newRecoveryRef = collection.getRecovery(resetRecoveryId)) { + try (ReplicationCollection.ReplicationRef newRecoveryRef = collection.get(resetRecoveryId)) { shards.recoverReplica(shard, (s, n) -> { assertSame(s, newRecoveryRef.get().indexShard()); return newRecoveryRef.get(); }, false); } shards.assertAllEqual(numDocs); - assertNull("recovery is done", collection.getRecovery(recoveryId)); + assertNull("recovery is done", collection.get(recoveryId)); } } - long startRecovery(RecoveriesCollection collection, DiscoveryNode sourceNode, IndexShard shard) { + long startRecovery(ReplicationCollection collection, DiscoveryNode sourceNode, IndexShard shard) { return startRecovery(collection, sourceNode, shard, listener, TimeValue.timeValueMinutes(60)); } long startRecovery( - RecoveriesCollection collection, + ReplicationCollection collection, DiscoveryNode sourceNode, IndexShard indexShard, - PeerRecoveryTargetService.RecoveryListener listener, + ReplicationListener listener, TimeValue timeValue ) { final DiscoveryNode rNode = getDiscoveryNode(indexShard.routingEntry().currentNodeId()); indexShard.markAsRecovering("remote", new RecoveryState(indexShard.routingEntry(), sourceNode, rNode)); indexShard.prepareForIndexRecovery(); - return collection.startRecovery(indexShard, sourceNode, listener, timeValue); + return collection.start(new RecoveryTarget(indexShard, sourceNode, listener), timeValue); } } diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 509edfd1b9103..298fdcaea6465 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -34,6 +34,7 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.Directory; +import org.opensearch.OpenSearchException; import org.opensearch.Version; import org.opensearch.action.admin.indices.flush.FlushRequest; import org.opensearch.action.index.IndexRequest; @@ -93,6 +94,8 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; +import org.opensearch.indices.replication.common.ReplicationListener; +import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; import org.opensearch.repositories.Repository; import org.opensearch.repositories.blobstore.OpenSearchBlobStoreRepositoryIntegTestCase; @@ -138,14 +141,14 @@ public abstract class IndexShardTestCase extends OpenSearchTestCase { } }; - protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() { + protected static final ReplicationListener recoveryListener = new ReplicationListener() { @Override - public void onRecoveryDone(RecoveryState state) { + public void onDone(ReplicationState state) { } @Override - public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) { + public void onFailure(ReplicationState state, OpenSearchException e, boolean sendShardFailure) { throw new AssertionError(e); } }; From eb847aeeef785419bb6e4c26fc4253389dae54e6 Mon Sep 17 00:00:00 2001 From: Suraj Singh Date: Mon, 23 May 2022 14:01:22 -0700 Subject: [PATCH 11/16] [Type removal] Remove type from BulkRequestParser (#3423) * [Type removal] Remove type handling in bulk request parser Signed-off-by: Suraj Singh * [Type removal] Remove testTypesStillParsedForBulkMonitoring as it is no longer present in codebase Signed-off-by: Suraj Singh --- .../opensearch/action/bulk/BulkRequest.java | 4 +- .../action/bulk/BulkRequestParser.java | 34 ++------ .../action/bulk/BulkRequestParserTests.java | 83 +++++-------------- 3 files changed, 30 insertions(+), 91 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java index 25b335eae0bf1..3af4227bf46ca 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequest.java @@ -287,7 +287,7 @@ public BulkRequest add( String routing = valueOrDefault(defaultRouting, globalRouting); String pipeline = valueOrDefault(defaultPipeline, globalPipeline); Boolean requireAlias = valueOrDefault(defaultRequireAlias, globalRequireAlias); - new BulkRequestParser(true).parse( + new BulkRequestParser().parse( data, defaultIndex, routing, @@ -296,7 +296,7 @@ public BulkRequest add( requireAlias, allowExplicitIndex, xContentType, - (indexRequest, type) -> internalAdd(indexRequest), + this::internalAdd, this::internalAdd, this::add ); diff --git a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java index 675905cc60e75..212450515b57e 100644 --- a/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java +++ b/server/src/main/java/org/opensearch/action/bulk/BulkRequestParser.java @@ -53,7 +53,6 @@ import java.io.IOException; import java.util.HashMap; import java.util.Map; -import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; @@ -67,7 +66,6 @@ public final class BulkRequestParser { private static final ParseField INDEX = new ParseField("_index"); - private static final ParseField TYPE = new ParseField("_type"); private static final ParseField ID = new ParseField("_id"); private static final ParseField ROUTING = new ParseField("routing"); private static final ParseField OP_TYPE = new ParseField("op_type"); @@ -80,17 +78,6 @@ public final class BulkRequestParser { private static final ParseField IF_PRIMARY_TERM = new ParseField("if_primary_term"); private static final ParseField REQUIRE_ALIAS = new ParseField(DocWriteRequest.REQUIRE_ALIAS); - // TODO: Remove this parameter once the BulkMonitoring endpoint has been removed - private final boolean errorOnType; - - /** - * Create a new parser. - * @param errorOnType whether to allow _type information in the index line; used by BulkMonitoring - */ - public BulkRequestParser(boolean errorOnType) { - this.errorOnType = errorOnType; - } - private static int findNextMarker(byte marker, int from, BytesReference data) { final int res = data.indexOf(marker, from); if (res != -1) { @@ -136,7 +123,7 @@ public void parse( @Nullable Boolean defaultRequireAlias, boolean allowExplicitIndex, XContentType xContentType, - BiConsumer indexRequestConsumer, + Consumer indexRequestConsumer, Consumer updateRequestConsumer, Consumer deleteRequestConsumer ) throws IOException { @@ -192,7 +179,6 @@ public void parse( String action = parser.currentName(); String index = defaultIndex; - String type = null; String id = null; String routing = defaultRouting; FetchSourceContext fetchSourceContext = defaultFetchSourceContext; @@ -205,7 +191,7 @@ public void parse( String pipeline = defaultPipeline; boolean requireAlias = defaultRequireAlias != null && defaultRequireAlias; - // at this stage, next token can either be END_OBJECT (and use default index and type, with auto generated id) + // at this stage, next token can either be END_OBJECT (and use default index with auto generated id) // or START_OBJECT which will have another set of parameters token = parser.nextToken(); @@ -220,13 +206,6 @@ public void parse( throw new IllegalArgumentException("explicit index in bulk is not allowed"); } index = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity()); - } else if (TYPE.match(currentFieldName, parser.getDeprecationHandler())) { - if (errorOnType) { - throw new IllegalArgumentException( - "Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]" - ); - } - type = stringDeduplicator.computeIfAbsent(parser.text(), Function.identity()); } else if (ID.match(currentFieldName, parser.getDeprecationHandler())) { id = parser.text(); } else if (ROUTING.match(currentFieldName, parser.getDeprecationHandler())) { @@ -322,8 +301,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias), - type + .setRequireAlias(requireAlias) ); } else { indexRequestConsumer.accept( @@ -336,8 +314,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias), - type + .setRequireAlias(requireAlias) ); } } else if ("create".equals(action)) { @@ -351,8 +328,7 @@ public void parse( .setIfSeqNo(ifSeqNo) .setIfPrimaryTerm(ifPrimaryTerm) .source(sliceTrimmingCarriageReturn(data, from, nextMarker, xContentType), xContentType) - .setRequireAlias(requireAlias), - type + .setRequireAlias(requireAlias) ); } else if ("update".equals(action)) { if (version != Versions.MATCH_ANY || versionType != VersionType.INTERNAL) { diff --git a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java index 239bb19c5f6ad..d3da77112408b 100644 --- a/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java +++ b/server/src/test/java/org/opensearch/action/bulk/BulkRequestParserTests.java @@ -47,9 +47,9 @@ public class BulkRequestParserTests extends OpenSearchTestCase { public void testIndexRequest() throws IOException { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (indexRequest, type) -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, indexRequest -> { assertFalse(parsed.get()); assertEquals("foo", indexRequest.index()); assertEquals("bar", indexRequest.id()); @@ -67,7 +67,7 @@ public void testIndexRequest() throws IOException { true, false, XContentType.JSON, - (indexRequest, type) -> { assertTrue(indexRequest.isRequireAlias()); }, + indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -82,7 +82,7 @@ public void testIndexRequest() throws IOException { null, false, XContentType.JSON, - (indexRequest, type) -> { assertTrue(indexRequest.isRequireAlias()); }, + indexRequest -> { assertTrue(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -97,7 +97,7 @@ public void testIndexRequest() throws IOException { true, false, XContentType.JSON, - (indexRequest, type) -> { assertFalse(indexRequest.isRequireAlias()); }, + indexRequest -> { assertFalse(indexRequest.isRequireAlias()); }, req -> fail(), req -> fail() ); @@ -105,34 +105,22 @@ public void testIndexRequest() throws IOException { public void testDeleteRequest() throws IOException { BytesArray request = new BytesArray("{ \"delete\":{ \"_id\": \"bar\" } }\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse( - request, - "foo", - null, - null, - null, - null, - false, - XContentType.JSON, - (req, type) -> fail(), - req -> fail(), - deleteRequest -> { - assertFalse(parsed.get()); - assertEquals("foo", deleteRequest.index()); - assertEquals("bar", deleteRequest.id()); - parsed.set(true); - } - ); + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), deleteRequest -> { + assertFalse(parsed.get()); + assertEquals("foo", deleteRequest.index()); + assertEquals("bar", deleteRequest.id()); + parsed.set(true); + }); assertTrue(parsed.get()); } public void testUpdateRequest() throws IOException { BytesArray request = new BytesArray("{ \"update\":{ \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (req, type) -> fail(), updateRequest -> { + parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, req -> fail(), updateRequest -> { assertFalse(parsed.get()); assertEquals("foo", updateRequest.index()); assertEquals("bar", updateRequest.id()); @@ -150,7 +138,7 @@ public void testUpdateRequest() throws IOException { true, false, XContentType.JSON, - (req, type) -> fail(), + req -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -165,7 +153,7 @@ public void testUpdateRequest() throws IOException { null, false, XContentType.JSON, - (req, type) -> fail(), + req -> fail(), updateRequest -> { assertTrue(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -180,7 +168,7 @@ public void testUpdateRequest() throws IOException { true, false, XContentType.JSON, - (req, type) -> fail(), + req -> fail(), updateRequest -> { assertFalse(updateRequest.isRequireAlias()); }, req -> fail() ); @@ -188,7 +176,7 @@ public void testUpdateRequest() throws IOException { public void testBarfOnLackOfTrailingNewline() { BytesArray request = new BytesArray("{ \"index\":{ \"_id\": \"bar\" } }\n{}"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> parser.parse( @@ -200,7 +188,7 @@ public void testBarfOnLackOfTrailingNewline() { null, false, XContentType.JSON, - (indexRequest, type) -> fail(), + indexRequest -> fail(), req -> fail(), req -> fail() ) @@ -210,46 +198,21 @@ public void testBarfOnLackOfTrailingNewline() { public void testFailOnExplicitIndex() { BytesArray request = new BytesArray("{ \"index\":{ \"_index\": \"foo\", \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); IllegalArgumentException ex = expectThrows( IllegalArgumentException.class, - () -> parser.parse( - request, - null, - null, - null, - null, - null, - false, - XContentType.JSON, - (req, type) -> fail(), - req -> fail(), - req -> fail() - ) + () -> parser.parse(request, null, null, null, null, null, false, XContentType.JSON, req -> fail(), req -> fail(), req -> fail()) ); assertEquals("explicit index in bulk is not allowed", ex.getMessage()); } - public void testTypesStillParsedForBulkMonitoring() throws IOException { - BytesArray request = new BytesArray("{ \"index\":{ \"_type\": \"quux\", \"_id\": \"bar\" } }\n{}\n"); - BulkRequestParser parser = new BulkRequestParser(false); - final AtomicBoolean parsed = new AtomicBoolean(); - parser.parse(request, "foo", null, null, null, null, false, XContentType.JSON, (indexRequest, type) -> { - assertFalse(parsed.get()); - assertEquals("foo", indexRequest.index()); - assertEquals("bar", indexRequest.id()); - parsed.set(true); - }, req -> fail(), req -> fail()); - assertTrue(parsed.get()); - } - public void testParseDeduplicatesParameterStrings() throws IOException { BytesArray request = new BytesArray( "{ \"index\":{ \"_index\": \"bar\", \"pipeline\": \"foo\", \"routing\": \"blub\"} }\n{}\n" + "{ \"index\":{ \"_index\": \"bar\", \"pipeline\": \"foo\", \"routing\": \"blub\" } }\n{}\n" ); - BulkRequestParser parser = new BulkRequestParser(randomBoolean()); + BulkRequestParser parser = new BulkRequestParser(); final List indexRequests = new ArrayList<>(); parser.parse( request, @@ -260,7 +223,7 @@ public void testParseDeduplicatesParameterStrings() throws IOException { null, true, XContentType.JSON, - (indexRequest, type) -> indexRequests.add(indexRequest), + indexRequest -> indexRequests.add(indexRequest), req -> fail(), req -> fail() ); From fd5a38de12cbb0fe5fca40aa833cd77ad3efdb8e Mon Sep 17 00:00:00 2001 From: Rishikesh Pasham <62345295+Rishikesh1159@users.noreply.github.com> Date: Tue, 24 May 2022 14:08:14 +0000 Subject: [PATCH 12/16] Adding CheckpointRefreshListener to trigger when Segment replication is turned on and Primary shard refreshes (#3108) * Intial PR adding classes and tests related to checkpoint publishing Signed-off-by: Rishikesh1159 * Putting a Draft PR with all changes in classes. Testing is still not included in this commit. Signed-off-by: Rishikesh1159 * Wiring up index shard to new engine, spotless apply and removing unnecessary tests and logs Signed-off-by: Rishikesh1159 * Adding Unit test for checkpointRefreshListener Signed-off-by: Rishikesh1159 * Applying spotless check Signed-off-by: Rishikesh1159 * Fixing import statements * Signed-off-by: Rishikesh1159 * removing unused constructor in index shard Signed-off-by: Rishikesh1159 * Addressing comments from last commit Signed-off-by: Rishikesh1159 * Adding package-info.java files for two new packages Signed-off-by: Rishikesh1159 * Adding test for null checkpoint publisher and addreesing PR comments Signed-off-by: Rishikesh1159 * Add docs for indexshardtests and remove shard.refresh Signed-off-by: Rishikesh1159 --- .../opensearch/index/shard/IndexShardIT.java | 4 +- .../org/opensearch/index/IndexService.java | 7 +- .../shard/CheckpointRefreshListener.java | 47 +++++ .../opensearch/index/shard/IndexShard.java | 36 +++- .../org/opensearch/indices/IndicesModule.java | 5 + .../opensearch/indices/IndicesService.java | 4 +- .../cluster/IndicesClusterStateService.java | 11 +- .../checkpoint/PublishCheckpointAction.java | 173 ++++++++++++++++++ .../checkpoint/PublishCheckpointRequest.java | 53 ++++++ .../checkpoint/ReplicationCheckpoint.java | 136 ++++++++++++++ ...SegmentReplicationCheckpointPublisher.java | 49 +++++ .../replication/checkpoint/package-info.java | 10 + .../index/shard/IndexShardTests.java | 69 +++++++ ...dicesLifecycleListenerSingleNodeTests.java | 8 +- ...actIndicesClusterStateServiceTestCase.java | 2 + ...ClusterStateServiceRandomUpdatesTests.java | 2 + .../PublishCheckpointActionTests.java | 157 ++++++++++++++++ .../snapshots/SnapshotResiliencyTests.java | 4 +- .../index/shard/IndexShardTestCase.java | 47 ++++- 19 files changed, 814 insertions(+), 10 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java create mode 100644 server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java create mode 100644 server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java diff --git a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java index 5f014e89e330e..888881d43eb11 100644 --- a/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java +++ b/server/src/internalClusterTest/java/org/opensearch/index/shard/IndexShardIT.java @@ -84,6 +84,7 @@ import org.opensearch.indices.IndicesService; import org.opensearch.indices.breaker.CircuitBreakerService; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.Plugin; import org.opensearch.search.builder.SearchSourceBuilder; import org.opensearch.test.DummyShardLock; @@ -673,7 +674,8 @@ public static final IndexShard newIndexShard( Arrays.asList(listeners), () -> {}, RetentionLeaseSyncer.EMPTY, - cbs + cbs, + SegmentReplicationCheckpointPublisher.EMPTY ); } diff --git a/server/src/main/java/org/opensearch/index/IndexService.java b/server/src/main/java/org/opensearch/index/IndexService.java index 0739e5afdffcd..0a6d1501f2bea 100644 --- a/server/src/main/java/org/opensearch/index/IndexService.java +++ b/server/src/main/java/org/opensearch/index/IndexService.java @@ -94,6 +94,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.mapper.MapperRegistry; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.script.ScriptService; import org.opensearch.search.aggregations.support.ValuesSourceRegistry; @@ -428,7 +429,8 @@ private long getAvgShardSizeInBytes() throws IOException { public synchronized IndexShard createShard( final ShardRouting routing, final Consumer globalCheckpointSyncer, - final RetentionLeaseSyncer retentionLeaseSyncer + final RetentionLeaseSyncer retentionLeaseSyncer, + final SegmentReplicationCheckpointPublisher checkpointPublisher ) throws IOException { Objects.requireNonNull(retentionLeaseSyncer); /* @@ -530,7 +532,8 @@ public synchronized IndexShard createShard( indexingOperationListeners, () -> globalCheckpointSyncer.accept(shardId), retentionLeaseSyncer, - circuitBreakerService + circuitBreakerService, + this.indexSettings.isSegRepEnabled() && routing.primary() ? checkpointPublisher : null ); eventListener.indexShardStateChanged(indexShard, null, indexShard.state(), "shard created"); eventListener.afterIndexShardCreated(indexShard); diff --git a/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java new file mode 100644 index 0000000000000..ac6754bf6a74a --- /dev/null +++ b/server/src/main/java/org/opensearch/index/shard/CheckpointRefreshListener.java @@ -0,0 +1,47 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.shard; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.ReferenceManager; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; + +import java.io.IOException; + +/** + * A {@link ReferenceManager.RefreshListener} that publishes a checkpoint to be consumed by replicas. + * This class is only used with Segment Replication enabled. + * + * @opensearch.internal + */ +public class CheckpointRefreshListener implements ReferenceManager.RefreshListener { + + protected static Logger logger = LogManager.getLogger(CheckpointRefreshListener.class); + + private final IndexShard shard; + private final SegmentReplicationCheckpointPublisher publisher; + + public CheckpointRefreshListener(IndexShard shard, SegmentReplicationCheckpointPublisher publisher) { + this.shard = shard; + this.publisher = publisher; + } + + @Override + public void beforeRefresh() throws IOException { + // Do nothing + } + + @Override + public void afterRefresh(boolean didRefresh) throws IOException { + if (didRefresh) { + publisher.publish(shard); + } + } +} diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 8002dfe688def..60a3305370c2a 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -160,6 +160,9 @@ import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.checkpoint.PublishCheckpointRequest; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; +import org.opensearch.indices.replication.checkpoint.ReplicationCheckpoint; import org.opensearch.repositories.RepositoriesService; import org.opensearch.repositories.Repository; import org.opensearch.rest.RestStatus; @@ -299,6 +302,7 @@ Runnable getGlobalCheckpointSyncer() { private final AtomicReference pendingRefreshLocation = new AtomicReference<>(); private final RefreshPendingLocationListener refreshPendingLocationListener; private volatile boolean useRetentionLeasesInPeerRecovery; + private final ReferenceManager.RefreshListener checkpointRefreshListener; public IndexShard( final ShardRouting shardRouting, @@ -320,7 +324,8 @@ public IndexShard( final List listeners, final Runnable globalCheckpointSyncer, final RetentionLeaseSyncer retentionLeaseSyncer, - final CircuitBreakerService circuitBreakerService + final CircuitBreakerService circuitBreakerService, + @Nullable final SegmentReplicationCheckpointPublisher checkpointPublisher ) throws IOException { super(shardRouting.shardId(), indexSettings); assert shardRouting.initializing(); @@ -403,6 +408,11 @@ public boolean shouldCache(Query query) { persistMetadata(path, indexSettings, shardRouting, null, logger); this.useRetentionLeasesInPeerRecovery = replicationTracker.hasAllPeerRecoveryRetentionLeases(); this.refreshPendingLocationListener = new RefreshPendingLocationListener(); + if (checkpointPublisher != null) { + this.checkpointRefreshListener = new CheckpointRefreshListener(this, checkpointPublisher); + } else { + this.checkpointRefreshListener = null; + } } public ThreadPool getThreadPool() { @@ -1363,6 +1373,21 @@ public GatedCloseable acquireSafeIndexCommit() throws EngineExcepti } } + /** + * Returns the lastest Replication Checkpoint that shard received + */ + public ReplicationCheckpoint getLatestReplicationCheckpoint() { + return new ReplicationCheckpoint(shardId, 0, 0, 0, 0); + } + + /** + * Invoked when a new checkpoint is received from a primary shard. Starts the copy process. + */ + public synchronized void onNewCheckpoint(final PublishCheckpointRequest request) { + assert shardRouting.primary() == false; + // TODO + } + /** * gets a {@link Store.MetadataSnapshot} for the current directory. This method is safe to call in all lifecycle of the index shard, * without having to worry about the current state of the engine and concurrent flushes. @@ -3106,6 +3131,13 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { } }; + final List internalRefreshListener; + if (this.checkpointRefreshListener != null) { + internalRefreshListener = Arrays.asList(new RefreshMetricUpdater(refreshMetric), checkpointRefreshListener); + } else { + internalRefreshListener = Collections.singletonList(new RefreshMetricUpdater(refreshMetric)); + } + return this.engineConfigFactory.newEngineConfig( shardId, threadPool, @@ -3122,7 +3154,7 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { translogConfig, IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING.get(indexSettings.getSettings()), Arrays.asList(refreshListeners, refreshPendingLocationListener), - Collections.singletonList(new RefreshMetricUpdater(refreshMetric)), + internalRefreshListener, indexSort, circuitBreakerService, globalCheckpointSupplier, diff --git a/server/src/main/java/org/opensearch/indices/IndicesModule.java b/server/src/main/java/org/opensearch/indices/IndicesModule.java index f188c47e7a9de..0cb2ff958c787 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesModule.java +++ b/server/src/main/java/org/opensearch/indices/IndicesModule.java @@ -41,6 +41,7 @@ import org.opensearch.common.inject.AbstractModule; import org.opensearch.common.io.stream.NamedWriteableRegistry; import org.opensearch.common.io.stream.NamedWriteableRegistry.Entry; +import org.opensearch.common.util.FeatureFlags; import org.opensearch.common.xcontent.NamedXContentRegistry; import org.opensearch.index.mapper.BinaryFieldMapper; import org.opensearch.index.mapper.BooleanFieldMapper; @@ -73,6 +74,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.indices.cluster.IndicesClusterStateService; import org.opensearch.indices.mapper.MapperRegistry; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.store.IndicesStore; import org.opensearch.indices.store.TransportNodesListShardStoreMetadata; import org.opensearch.plugins.MapperPlugin; @@ -278,6 +280,9 @@ protected void configure() { bind(RetentionLeaseSyncAction.class).asEagerSingleton(); bind(RetentionLeaseBackgroundSyncAction.class).asEagerSingleton(); bind(RetentionLeaseSyncer.class).asEagerSingleton(); + if (FeatureFlags.isEnabled(FeatureFlags.REPLICATION_TYPE)) { + bind(SegmentReplicationCheckpointPublisher.class).asEagerSingleton(); + } } /** diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 1c7e45323813c..5ce10069aaa89 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -138,6 +138,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.node.Node; import org.opensearch.plugins.IndexStorePlugin; import org.opensearch.plugins.PluginsService; @@ -839,6 +840,7 @@ public synchronized void verifyIndexMetadata(IndexMetadata metadata, IndexMetada @Override public IndexShard createShard( final ShardRouting shardRouting, + final SegmentReplicationCheckpointPublisher checkpointPublisher, final PeerRecoveryTargetService recoveryTargetService, final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, @@ -853,7 +855,7 @@ public IndexShard createShard( IndexService indexService = indexService(shardRouting.index()); assert indexService != null; RecoveryState recoveryState = indexService.createRecoveryState(shardRouting, targetNode, sourceNode); - IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer); + IndexShard indexShard = indexService.createShard(shardRouting, globalCheckpointSyncer, retentionLeaseSyncer, checkpointPublisher); indexShard.addShardFailureCallback(onShardFailure); indexShard.startRecovery(recoveryState, recoveryTargetService, recoveryListener, repositoriesService, mapping -> { assert recoveryState.getRecoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS diff --git a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java index d1623df156593..7233b6893b03e 100644 --- a/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/opensearch/indices/cluster/IndicesClusterStateService.java @@ -80,6 +80,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.RepositoriesService; import org.opensearch.search.SearchService; @@ -138,6 +139,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple private final Consumer globalCheckpointSyncer; private final RetentionLeaseSyncer retentionLeaseSyncer; + private final SegmentReplicationCheckpointPublisher checkpointPublisher; + @Inject public IndicesClusterStateService( final Settings settings, @@ -153,13 +156,15 @@ public IndicesClusterStateService( final SnapshotShardsService snapshotShardsService, final PrimaryReplicaSyncer primaryReplicaSyncer, final GlobalCheckpointSyncAction globalCheckpointSyncAction, - final RetentionLeaseSyncer retentionLeaseSyncer + final RetentionLeaseSyncer retentionLeaseSyncer, + final SegmentReplicationCheckpointPublisher checkpointPublisher ) { this( settings, indicesService, clusterService, threadPool, + checkpointPublisher, recoveryTargetService, shardStateAction, nodeMappingRefreshAction, @@ -179,6 +184,7 @@ public IndicesClusterStateService( final AllocatedIndices> indicesService, final ClusterService clusterService, final ThreadPool threadPool, + final SegmentReplicationCheckpointPublisher checkpointPublisher, final PeerRecoveryTargetService recoveryTargetService, final ShardStateAction shardStateAction, final NodeMappingRefreshAction nodeMappingRefreshAction, @@ -191,6 +197,7 @@ public IndicesClusterStateService( final RetentionLeaseSyncer retentionLeaseSyncer ) { this.settings = settings; + this.checkpointPublisher = checkpointPublisher; this.buildInIndexListener = Arrays.asList(peerRecoverySourceService, recoveryTargetService, searchService, snapshotShardsService); this.indicesService = indicesService; this.clusterService = clusterService; @@ -624,6 +631,7 @@ private void createShard(DiscoveryNodes nodes, RoutingTable routingTable, ShardR logger.debug("{} creating shard with primary term [{}]", shardRouting.shardId(), primaryTerm); indicesService.createShard( shardRouting, + checkpointPublisher, recoveryTargetService, new RecoveryListener(shardRouting, primaryTerm, this), repositoriesService, @@ -981,6 +989,7 @@ U createIndex(IndexMetadata indexMetadata, List builtInIndex */ T createShard( ShardRouting shardRouting, + SegmentReplicationCheckpointPublisher checkpointPublisher, PeerRecoveryTargetService recoveryTargetService, RecoveryListener recoveryListener, RepositoriesService repositoriesService, diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java new file mode 100644 index 0000000000000..b74a69971ebd5 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointAction.java @@ -0,0 +1,173 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.opensearch.ExceptionsHelper; +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.replication.ReplicationResponse; +import org.opensearch.action.support.replication.ReplicationTask; +import org.opensearch.action.support.replication.TransportReplicationAction; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.inject.Inject; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.index.IndexNotFoundException; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.IndexShardClosedException; +import org.opensearch.indices.IndicesService; +import org.opensearch.node.NodeClosedException; +import org.opensearch.tasks.Task; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponseHandler; +import org.opensearch.transport.TransportService; + +import java.io.IOException; +import java.util.Objects; + +/** + * Replication action responsible for publishing checkpoint to a replica shard. + * + * @opensearch.internal + */ + +public class PublishCheckpointAction extends TransportReplicationAction< + PublishCheckpointRequest, + PublishCheckpointRequest, + ReplicationResponse> { + + public static final String ACTION_NAME = "indices:admin/publishCheckpoint"; + protected static Logger logger = LogManager.getLogger(PublishCheckpointAction.class); + + @Inject + public PublishCheckpointAction( + Settings settings, + TransportService transportService, + ClusterService clusterService, + IndicesService indicesService, + ThreadPool threadPool, + ShardStateAction shardStateAction, + ActionFilters actionFilters + ) { + super( + settings, + ACTION_NAME, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + actionFilters, + PublishCheckpointRequest::new, + PublishCheckpointRequest::new, + ThreadPool.Names.REFRESH + ); + } + + @Override + protected ReplicationResponse newResponseInstance(StreamInput in) throws IOException { + return new ReplicationResponse(in); + } + + @Override + protected void doExecute(Task task, PublishCheckpointRequest request, ActionListener listener) { + assert false : "use PublishCheckpointAction#publish"; + } + + /** + * Publish checkpoint request to shard + */ + final void publish(IndexShard indexShard) { + String primaryAllocationId = indexShard.routingEntry().allocationId().getId(); + long primaryTerm = indexShard.getPendingPrimaryTerm(); + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + // we have to execute under the system context so that if security is enabled the sync is authorized + threadContext.markAsSystemContext(); + PublishCheckpointRequest request = new PublishCheckpointRequest(indexShard.getLatestReplicationCheckpoint()); + final ReplicationTask task = (ReplicationTask) taskManager.register("transport", "segrep_publish_checkpoint", request); + transportService.sendChildRequest( + clusterService.localNode(), + transportPrimaryAction, + new ConcreteShardRequest<>(request, primaryAllocationId, primaryTerm), + task, + transportOptions, + new TransportResponseHandler() { + @Override + public ReplicationResponse read(StreamInput in) throws IOException { + return newResponseInstance(in); + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + + @Override + public void handleResponse(ReplicationResponse response) { + task.setPhase("finished"); + taskManager.unregister(task); + } + + @Override + public void handleException(TransportException e) { + task.setPhase("finished"); + taskManager.unregister(task); + if (ExceptionsHelper.unwrap(e, NodeClosedException.class) != null) { + // node shutting down + return; + } + if (ExceptionsHelper.unwrap( + e, + IndexNotFoundException.class, + AlreadyClosedException.class, + IndexShardClosedException.class + ) != null) { + // the index was deleted or the shard is closed + return; + } + logger.warn( + new ParameterizedMessage("{} segment replication checkpoint publishing failed", indexShard.shardId()), + e + ); + } + } + ); + } + } + + @Override + protected void shardOperationOnPrimary( + PublishCheckpointRequest request, + IndexShard primary, + ActionListener> listener + ) { + ActionListener.completeWith(listener, () -> new PrimaryResult<>(request, new ReplicationResponse())); + } + + @Override + protected void shardOperationOnReplica(PublishCheckpointRequest request, IndexShard replica, ActionListener listener) { + Objects.requireNonNull(request); + Objects.requireNonNull(replica); + ActionListener.completeWith(listener, () -> { + logger.trace("Checkpoint received on replica {}", request); + if (request.getCheckpoint().getShardId().equals(replica.shardId())) { + replica.onNewCheckpoint(request); + } + return new ReplicaResult(); + }); + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java new file mode 100644 index 0000000000000..740fd3bccb7c4 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointRequest.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.action.support.replication.ReplicationRequest; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; + +import java.io.IOException; + +/** + * Replication request responsible for publishing checkpoint request to a replica shard. + * + * @opensearch.internal + */ +public class PublishCheckpointRequest extends ReplicationRequest { + + private final ReplicationCheckpoint checkpoint; + + public PublishCheckpointRequest(ReplicationCheckpoint checkpoint) { + super(checkpoint.getShardId()); + this.checkpoint = checkpoint; + } + + public PublishCheckpointRequest(StreamInput in) throws IOException { + super(in); + this.checkpoint = new ReplicationCheckpoint(in); + } + + /** + * Returns Replication Checkpoint + */ + public ReplicationCheckpoint getCheckpoint() { + return checkpoint; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + checkpoint.writeTo(out); + } + + @Override + public String toString() { + return "PublishCheckpointRequest{" + "checkpoint=" + checkpoint + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java new file mode 100644 index 0000000000000..98ab9cc4c1708 --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/ReplicationCheckpoint.java @@ -0,0 +1,136 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.index.shard.ShardId; + +import java.io.IOException; +import java.util.Objects; + +/** + * Represents a Replication Checkpoint which is sent to a replica shard. + * + * @opensearch.internal + */ +public class ReplicationCheckpoint implements Writeable { + + private final ShardId shardId; + private final long primaryTerm; + private final long segmentsGen; + private final long seqNo; + private final long segmentInfosVersion; + + public ReplicationCheckpoint(ShardId shardId, long primaryTerm, long segmentsGen, long seqNo, long segmentInfosVersion) { + this.shardId = shardId; + this.primaryTerm = primaryTerm; + this.segmentsGen = segmentsGen; + this.seqNo = seqNo; + this.segmentInfosVersion = segmentInfosVersion; + } + + public ReplicationCheckpoint(StreamInput in) throws IOException { + shardId = new ShardId(in); + primaryTerm = in.readLong(); + segmentsGen = in.readLong(); + seqNo = in.readLong(); + segmentInfosVersion = in.readLong(); + } + + /** + * The primary term of this Replication Checkpoint. + * + * @return the primary term + */ + public long getPrimaryTerm() { + return primaryTerm; + } + + /** + * @return the Segments Gen number + */ + public long getSegmentsGen() { + return segmentsGen; + } + + /** + * @return the Segment Info version + */ + public long getSegmentInfosVersion() { + return segmentInfosVersion; + } + + /** + * @return the Seq number + */ + public long getSeqNo() { + return seqNo; + } + + /** + * Shard Id of primary shard. + * + * @return the Shard Id + */ + public ShardId getShardId() { + return shardId; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardId.writeTo(out); + out.writeLong(primaryTerm); + out.writeLong(segmentsGen); + out.writeLong(seqNo); + out.writeLong(segmentInfosVersion); + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ReplicationCheckpoint that = (ReplicationCheckpoint) o; + return primaryTerm == that.primaryTerm + && segmentsGen == that.segmentsGen + && seqNo == that.seqNo + && segmentInfosVersion == that.segmentInfosVersion + && Objects.equals(shardId, that.shardId); + } + + @Override + public int hashCode() { + return Objects.hash(shardId, primaryTerm, segmentsGen, seqNo); + } + + /** + * Checks if other is aheadof current replication point by comparing segmentInfosVersion. Returns true for null + */ + public boolean isAheadOf(@Nullable ReplicationCheckpoint other) { + return other == null || segmentInfosVersion > other.getSegmentInfosVersion(); + } + + @Override + public String toString() { + return "ReplicationCheckpoint{" + + "shardId=" + + shardId + + ", primaryTerm=" + + primaryTerm + + ", segmentsGen=" + + segmentsGen + + ", seqNo=" + + seqNo + + ", version=" + + segmentInfosVersion + + '}'; + } +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java new file mode 100644 index 0000000000000..2b09901a947fe --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/SegmentReplicationCheckpointPublisher.java @@ -0,0 +1,49 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.common.inject.Inject; +import org.opensearch.index.shard.IndexShard; + +import java.util.Objects; + +/** + * Publish Segment Replication Checkpoint. + * + * @opensearch.internal + */ +public class SegmentReplicationCheckpointPublisher { + + private final PublishAction publishAction; + + @Inject + public SegmentReplicationCheckpointPublisher(PublishCheckpointAction publishAction) { + this(publishAction::publish); + } + + public SegmentReplicationCheckpointPublisher(PublishAction publishAction) { + this.publishAction = Objects.requireNonNull(publishAction); + } + + public void publish(IndexShard indexShard) { + publishAction.publish(indexShard); + } + + /** + * Represents an action that is invoked to publish segment replication checkpoint to replica shard + */ + public interface PublishAction { + void publish(IndexShard indexShard); + } + + /** + * NoOp Checkpoint publisher + */ + public static final SegmentReplicationCheckpointPublisher EMPTY = new SegmentReplicationCheckpointPublisher(indexShard -> {}); +} diff --git a/server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java b/server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java new file mode 100644 index 0000000000000..a30154ea9206a --- /dev/null +++ b/server/src/main/java/org/opensearch/indices/replication/checkpoint/package-info.java @@ -0,0 +1,10 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/** Package containing classes to implement a replication checkpoint */ +package org.opensearch.indices.replication.checkpoint; diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index e54d30c626812..bf9671964a210 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -37,6 +37,7 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.Term; +import org.apache.lucene.search.ReferenceManager; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.AlreadyClosedException; @@ -133,6 +134,7 @@ import org.opensearch.indices.fielddata.cache.IndicesFieldDataCache; import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; @@ -198,6 +200,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.oneOf; import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.mock; import static org.opensearch.cluster.routing.TestShardRouting.newShardRouting; import static org.opensearch.common.lucene.Lucene.cleanLuceneIndex; import static org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS; @@ -3425,6 +3428,72 @@ public void testReadSnapshotConcurrently() throws IOException, InterruptedExcept closeShards(newShard); } + /** + * here we are mocking a SegmentReplicationcheckpointPublisher and testing on index shard if CheckpointRefreshListener is added to the InternalrefreshListerners List + */ + public void testCheckpointRefreshListener() throws IOException { + final SegmentReplicationCheckpointPublisher mock = mock(SegmentReplicationCheckpointPublisher.class); + IndexShard shard = newStartedShard(p -> newShard(mock), true); + List refreshListeners = shard.getEngine().config().getInternalRefreshListener(); + assertTrue(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); + closeShards(shard); + } + + /** + * here we are passing null in place of SegmentReplicationCheckpointPublisher and testing on index shard if CheckpointRefreshListener is not added to the InternalrefreshListerners List + */ + public void testCheckpointRefreshListenerWithNull() throws IOException { + IndexShard shard = newStartedShard(p -> newShard(null), true); + List refreshListeners = shard.getEngine().config().getInternalRefreshListener(); + assertFalse(refreshListeners.stream().anyMatch(e -> e instanceof CheckpointRefreshListener)); + closeShards(shard); + } + + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * current node id the shard is assigned to. + * @param checkpointPublisher Segment Replication Checkpoint Publisher to publish checkpoint + */ + private IndexShard newShard(SegmentReplicationCheckpointPublisher checkpointPublisher) throws IOException { + final ShardId shardId = new ShardId("index", "_na_", 0); + final ShardRouting shardRouting = TestShardRouting.newShardRouting( + shardId, + randomAlphaOfLength(10), + true, + ShardRoutingState.INITIALIZING, + RecoverySource.EmptyStoreRecoverySource.INSTANCE + ); + final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir()); + ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId); + + Settings indexSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, "SEGMENT") + .put(IndexSettings.INDEX_SOFT_DELETES_RETENTION_OPERATIONS_SETTING.getKey(), between(0, 1000)) + .put(Settings.EMPTY) + .build(); + IndexMetadata metadata = IndexMetadata.builder(shardRouting.getIndexName()) + .settings(indexSettings) + .primaryTerm(0, primaryTerm) + .putMapping("{ \"properties\": {} }") + .build(); + return newShard( + shardRouting, + shardPath, + metadata, + null, + null, + new InternalEngineFactory(), + new EngineConfigFactory(new IndexSettings(metadata, metadata.getSettings())), + () -> {}, + RetentionLeaseSyncer.EMPTY, + EMPTY_EVENT_LISTENER, + checkpointPublisher + ); + } + public void testIndexCheckOnStartup() throws Exception { final IndexShard indexShard = newStartedShard(true); diff --git a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java index 5f3d03f85f324..0989bf869f18e 100644 --- a/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java +++ b/server/src/test/java/org/opensearch/indices/IndicesLifecycleListenerSingleNodeTests.java @@ -49,6 +49,7 @@ import org.opensearch.index.shard.ShardId; import org.opensearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.test.OpenSearchSingleNodeTestCase; import java.util.Arrays; @@ -148,7 +149,12 @@ public void afterIndexRemoved(Index index, IndexSettings indexSettings, IndexRem newRouting = newRouting.moveToUnassigned(unassignedInfo) .updateUnassigned(unassignedInfo, RecoverySource.EmptyStoreRecoverySource.INSTANCE); newRouting = ShardRoutingHelper.initialize(newRouting, nodeId); - IndexShard shard = index.createShard(newRouting, s -> {}, RetentionLeaseSyncer.EMPTY); + IndexShard shard = index.createShard( + newRouting, + s -> {}, + RetentionLeaseSyncer.EMPTY, + SegmentReplicationCheckpointPublisher.EMPTY + ); IndexShardTestCase.updateRoutingEntry(shard, newRouting); assertEquals(5, counter.get()); final DiscoveryNode localNode = new DiscoveryNode( diff --git a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java index 97cb1dc341b13..0619e3e3f62a2 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java +++ b/server/src/test/java/org/opensearch/indices/cluster/AbstractIndicesClusterStateServiceTestCase.java @@ -59,6 +59,7 @@ import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoveryListener; import org.opensearch.indices.recovery.RecoveryState; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.test.OpenSearchTestCase; @@ -253,6 +254,7 @@ public MockIndexService indexService(Index index) { @Override public MockIndexShard createShard( final ShardRouting shardRouting, + final SegmentReplicationCheckpointPublisher checkpointPublisher, final PeerRecoveryTargetService recoveryTargetService, final RecoveryListener recoveryListener, final RepositoriesService repositoriesService, diff --git a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java index 7789054cfdc16..cd3fee60014a7 100644 --- a/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java +++ b/server/src/test/java/org/opensearch/indices/cluster/IndicesClusterStateServiceRandomUpdatesTests.java @@ -66,6 +66,7 @@ import org.opensearch.index.shard.PrimaryReplicaSyncer; import org.opensearch.index.shard.ShardId; import org.opensearch.indices.recovery.PeerRecoveryTargetService; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.repositories.RepositoriesService; import org.opensearch.threadpool.TestThreadPool; import org.opensearch.threadpool.ThreadPool; @@ -562,6 +563,7 @@ private IndicesClusterStateService createIndicesClusterStateService( indicesService, clusterService, threadPool, + SegmentReplicationCheckpointPublisher.EMPTY, recoveryTargetService, shardStateAction, null, diff --git a/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java new file mode 100644 index 0000000000000..074b5ff613b08 --- /dev/null +++ b/server/src/test/java/org/opensearch/indices/replication/checkpoint/PublishCheckpointActionTests.java @@ -0,0 +1,157 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.indices.replication.checkpoint; + +import org.opensearch.action.ActionListener; +import org.opensearch.action.support.ActionFilters; +import org.opensearch.action.support.ActionTestUtils; +import org.opensearch.action.support.PlainActionFuture; +import org.opensearch.action.support.replication.TransportReplicationAction; +import org.opensearch.cluster.action.shard.ShardStateAction; +import org.opensearch.cluster.service.ClusterService; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.Index; +import org.opensearch.index.IndexService; +import org.opensearch.index.shard.IndexShard; +import org.opensearch.index.shard.ShardId; +import org.opensearch.indices.IndicesService; +import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.CapturingTransport; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportService; + +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.hamcrest.Matchers.sameInstance; +import static org.mockito.Mockito.*; +import static org.opensearch.test.ClusterServiceUtils.createClusterService; + +public class PublishCheckpointActionTests extends OpenSearchTestCase { + + private ThreadPool threadPool; + private CapturingTransport transport; + private ClusterService clusterService; + private TransportService transportService; + private ShardStateAction shardStateAction; + + @Override + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + transport = new CapturingTransport(); + clusterService = createClusterService(threadPool); + transportService = transport.createTransportService( + clusterService.getSettings(), + threadPool, + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundAddress -> clusterService.localNode(), + null, + Collections.emptySet() + ); + transportService.start(); + transportService.acceptIncomingRequests(); + shardStateAction = new ShardStateAction(clusterService, transportService, null, null, threadPool); + } + + @Override + public void tearDown() throws Exception { + try { + IOUtils.close(transportService, clusterService, transport); + } finally { + terminate(threadPool); + } + super.tearDown(); + } + + public void testPublishCheckpointActionOnPrimary() throws InterruptedException { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, clusterService.getClusterSettings()); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()) + ); + + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 111, 11, 1); + + final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); + + action.shardOperationOnPrimary(request, indexShard, ActionTestUtils.assertNoFailureListener(result -> { + // we should forward the request containing the current publish checkpoint to the replica + assertThat(result.replicaRequest(), sameInstance(request)); + })); + + } + + public void testPublishCheckpointActionOnReplica() { + final IndicesService indicesService = mock(IndicesService.class); + + final Index index = new Index("index", "uuid"); + final IndexService indexService = mock(IndexService.class); + when(indicesService.indexServiceSafe(index)).thenReturn(indexService); + + final int id = randomIntBetween(0, 4); + final IndexShard indexShard = mock(IndexShard.class); + when(indexService.getShard(id)).thenReturn(indexShard); + + final ShardId shardId = new ShardId(index, id); + when(indexShard.shardId()).thenReturn(shardId); + + final RecoverySettings recoverySettings = new RecoverySettings(Settings.EMPTY, clusterService.getClusterSettings()); + + final PublishCheckpointAction action = new PublishCheckpointAction( + Settings.EMPTY, + transportService, + clusterService, + indicesService, + threadPool, + shardStateAction, + new ActionFilters(Collections.emptySet()) + ); + + final ReplicationCheckpoint checkpoint = new ReplicationCheckpoint(indexShard.shardId(), 1111, 111, 11, 1); + + final PublishCheckpointRequest request = new PublishCheckpointRequest(checkpoint); + + final PlainActionFuture listener = PlainActionFuture.newFuture(); + action.shardOperationOnReplica(request, indexShard, listener); + final TransportReplicationAction.ReplicaResult result = listener.actionGet(); + + // onNewCheckpoint should be called on shard with checkpoint request + verify(indexShard).onNewCheckpoint(request); + + // the result should indicate success + final AtomicBoolean success = new AtomicBoolean(); + result.runPostReplicaActions(ActionListener.wrap(r -> success.set(true), e -> fail(e.toString()))); + assertTrue(success.get()); + + } + +} diff --git a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java index a896aab0f70c9..ab9a455399366 100644 --- a/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/opensearch/snapshots/SnapshotResiliencyTests.java @@ -182,6 +182,7 @@ import org.opensearch.indices.recovery.PeerRecoverySourceService; import org.opensearch.indices.recovery.PeerRecoveryTargetService; import org.opensearch.indices.recovery.RecoverySettings; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.ingest.IngestService; import org.opensearch.monitor.StatusInfo; import org.opensearch.node.ResponseCollectorService; @@ -1860,7 +1861,8 @@ public void onFailure(final Exception e) { shardStateAction, actionFilters ), - RetentionLeaseSyncer.EMPTY + RetentionLeaseSyncer.EMPTY, + SegmentReplicationCheckpointPublisher.EMPTY ); Map actions = new HashMap<>(); final SystemIndices systemIndices = new SystemIndices(emptyMap()); diff --git a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java index 298fdcaea6465..371fa6d102304 100644 --- a/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/shard/IndexShardTestCase.java @@ -94,6 +94,7 @@ import org.opensearch.indices.recovery.RecoveryState; import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.recovery.StartRecoveryRequest; +import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationListener; import org.opensearch.indices.replication.common.ReplicationState; import org.opensearch.repositories.IndexId; @@ -411,6 +412,47 @@ protected IndexShard newShard( ); } + /** + * creates a new initializing shard. The shard will will be put in its proper path under the + * current node id the shard is assigned to. + * @param routing shard routing to use + * @param shardPath path to use for shard data + * @param indexMetadata indexMetadata for the shard, including any mapping + * @param storeProvider an optional custom store provider to use. If null a default file based store will be created + * @param indexReaderWrapper an optional wrapper to be used during search + * @param globalCheckpointSyncer callback for syncing global checkpoints + * @param indexEventListener index event listener + * @param listeners an optional set of listeners to add to the shard + */ + protected IndexShard newShard( + ShardRouting routing, + ShardPath shardPath, + IndexMetadata indexMetadata, + @Nullable CheckedFunction storeProvider, + @Nullable CheckedFunction indexReaderWrapper, + @Nullable EngineFactory engineFactory, + @Nullable EngineConfigFactory engineConfigFactory, + Runnable globalCheckpointSyncer, + RetentionLeaseSyncer retentionLeaseSyncer, + IndexEventListener indexEventListener, + IndexingOperationListener... listeners + ) throws IOException { + return newShard( + routing, + shardPath, + indexMetadata, + storeProvider, + indexReaderWrapper, + engineFactory, + engineConfigFactory, + globalCheckpointSyncer, + retentionLeaseSyncer, + indexEventListener, + SegmentReplicationCheckpointPublisher.EMPTY, + listeners + ); + } + /** * creates a new initializing shard. * @param routing shard routing to use @@ -420,6 +462,7 @@ protected IndexShard newShard( * @param indexReaderWrapper an optional wrapper to be used during search * @param globalCheckpointSyncer callback for syncing global checkpoints * @param indexEventListener index event listener + * @param checkpointPublisher segment Replication Checkpoint Publisher to publish checkpoint * @param listeners an optional set of listeners to add to the shard */ protected IndexShard newShard( @@ -433,6 +476,7 @@ protected IndexShard newShard( Runnable globalCheckpointSyncer, RetentionLeaseSyncer retentionLeaseSyncer, IndexEventListener indexEventListener, + SegmentReplicationCheckpointPublisher checkpointPublisher, IndexingOperationListener... listeners ) throws IOException { final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build(); @@ -480,7 +524,8 @@ protected IndexShard newShard( Arrays.asList(listeners), globalCheckpointSyncer, retentionLeaseSyncer, - breakerService + breakerService, + checkpointPublisher ); indexShard.addShardFailureCallback(DEFAULT_SHARD_FAILURE_HANDLER); success = true; From a0030dfb47b4312eb4087f17cc3cee0139c74b6a Mon Sep 17 00:00:00 2001 From: Marc Handalian Date: Tue, 24 May 2022 10:18:18 -0700 Subject: [PATCH 13/16] Add a new Engine implementation for replicas with segment replication enabled. (#3240) * Change fastForwardProcessedSeqNo method in LocalCheckpointTracker to persisted checkpoint. This change inverts fastForwardProcessedSeqNo to fastForwardPersistedSeqNo for use in Segment Replication. This is so that a Segrep Engine can match the logic of InternalEngine where the seqNo is incremented with each operation, but only persisted in the tracker on a flush. With Segment Replication we bump the processed number with each operation received index/delete/noOp, and invoke this method when we receive a new set of segments to bump the persisted seqNo. Signed-off-by: Marc Handalian * Extract Translog specific engine methods into an abstract class. This change extracts translog specific methods to an abstract engine class so that other engine implementations can reuse translog logic. Signed-off-by: Marc Handalian * Add a separate Engine implementation for replicas with segment replication enabled. This change adds a new engine intended to be used on replicas with segment replication enabled. This engine does not wire up an IndexWriter, but still writes all operations to a translog. The engine uses a new ReaderManager that refreshes from an externally provided SegmentInfos. Signed-off-by: Marc Handalian * Fix spotless checks. Signed-off-by: Marc Handalian * Fix :server:compileInternalClusterTestJava compilation. Signed-off-by: Marc Handalian * Fix failing test naming convention check. Signed-off-by: Marc Handalian * PR feedback. - Removed isReadOnlyReplica from overloaded constructor and added feature flag checks. - Updated log msg in NRTReplicationReaderManager - cleaned up store ref counting in NRTReplicationEngine. Signed-off-by: Marc Handalian * Fix spotless check. Signed-off-by: Marc Handalian * Remove TranslogAwareEngine and build translog in NRTReplicationEngine. Signed-off-by: Marc Handalian * Fix formatting Signed-off-by: Marc Handalian * Add missing translog methods to NRTEngine. Signed-off-by: Marc Handalian * Remove persistent seqNo check from fastForwardProcessedSeqNo. Signed-off-by: Marc Handalian * PR feedback. Signed-off-by: Marc Handalian * Add test specific to translog trimming. Signed-off-by: Marc Handalian * Javadoc check. Signed-off-by: Marc Handalian * Add failEngine calls to translog methods in NRTReplicationEngine. Roll xlog generation on replica when a new commit point is received. Signed-off-by: Marc Handalian --- .../org/opensearch/index/engine/Engine.java | 17 + .../opensearch/index/engine/EngineConfig.java | 72 +++ .../index/engine/EngineConfigFactory.java | 6 +- .../index/engine/InternalEngine.java | 29 +- .../index/engine/NRTReplicationEngine.java | 482 ++++++++++++++++++ .../engine/NRTReplicationEngineFactory.java | 25 + .../engine/NRTReplicationReaderManager.java | 92 ++++ .../index/engine/ReadOnlyEngine.java | 5 + .../index/seqno/LocalCheckpointTracker.java | 2 +- .../opensearch/index/shard/IndexShard.java | 3 +- .../opensearch/indices/IndicesService.java | 4 + .../engine/EngineConfigFactoryTests.java | 6 +- .../engine/NRTReplicationEngineTests.java | 239 +++++++++ .../seqno/LocalCheckpointTrackerTests.java | 45 +- .../index/shard/IndexShardTests.java | 28 +- .../index/engine/EngineTestCase.java | 22 +- 16 files changed, 1007 insertions(+), 70 deletions(-) create mode 100644 server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java create mode 100644 server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java create mode 100644 server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java create mode 100644 server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java diff --git a/server/src/main/java/org/opensearch/index/engine/Engine.java b/server/src/main/java/org/opensearch/index/engine/Engine.java index 047d632c44392..c242d98b4b65c 100644 --- a/server/src/main/java/org/opensearch/index/engine/Engine.java +++ b/server/src/main/java/org/opensearch/index/engine/Engine.java @@ -169,6 +169,12 @@ public final EngineConfig config() { protected abstract SegmentInfos getLastCommittedSegmentInfos(); + /** + * Return the latest active SegmentInfos from the engine. + * @return {@link SegmentInfos} + */ + protected abstract SegmentInfos getLatestSegmentInfos(); + public MergeStats getMergeStats() { return new MergeStats(); } @@ -176,6 +182,17 @@ public MergeStats getMergeStats() { /** returns the history uuid for the engine */ public abstract String getHistoryUUID(); + /** + * Reads the current stored history ID from commit data. + */ + String loadHistoryUUID(Map commitData) { + final String uuid = commitData.get(HISTORY_UUID_KEY); + if (uuid == null) { + throw new IllegalStateException("commit doesn't contain history uuid"); + } + return uuid; + } + /** Returns how many bytes we are currently moving from heap to disk */ public abstract long getWritingBytes(); diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java index 0ea4a96a72362..4ae6646ed14f0 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfig.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfig.java @@ -97,6 +97,7 @@ public final class EngineConfig { private final CircuitBreakerService circuitBreakerService; private final LongSupplier globalCheckpointSupplier; private final Supplier retentionLeasesSupplier; + private final boolean isReadOnlyReplica; /** * A supplier of the outstanding retention leases. This is used during merged operations to determine which operations that have been @@ -228,6 +229,66 @@ public EngineConfig( LongSupplier primaryTermSupplier, TombstoneDocSupplier tombstoneDocSupplier ) { + this( + shardId, + threadPool, + indexSettings, + warmer, + store, + mergePolicy, + analyzer, + similarity, + codecService, + eventListener, + queryCache, + queryCachingPolicy, + translogConfig, + translogDeletionPolicyFactory, + flushMergesAfter, + externalRefreshListener, + internalRefreshListener, + indexSort, + circuitBreakerService, + globalCheckpointSupplier, + retentionLeasesSupplier, + primaryTermSupplier, + tombstoneDocSupplier, + false + ); + } + + /** + * Creates a new {@link org.opensearch.index.engine.EngineConfig} + */ + EngineConfig( + ShardId shardId, + ThreadPool threadPool, + IndexSettings indexSettings, + Engine.Warmer warmer, + Store store, + MergePolicy mergePolicy, + Analyzer analyzer, + Similarity similarity, + CodecService codecService, + Engine.EventListener eventListener, + QueryCache queryCache, + QueryCachingPolicy queryCachingPolicy, + TranslogConfig translogConfig, + TranslogDeletionPolicyFactory translogDeletionPolicyFactory, + TimeValue flushMergesAfter, + List externalRefreshListener, + List internalRefreshListener, + Sort indexSort, + CircuitBreakerService circuitBreakerService, + LongSupplier globalCheckpointSupplier, + Supplier retentionLeasesSupplier, + LongSupplier primaryTermSupplier, + TombstoneDocSupplier tombstoneDocSupplier, + boolean isReadOnlyReplica + ) { + if (isReadOnlyReplica && indexSettings.isSegRepEnabled() == false) { + throw new IllegalArgumentException("Shard can only be wired as a read only replica with Segment Replication enabled"); + } this.shardId = shardId; this.indexSettings = indexSettings; this.threadPool = threadPool; @@ -266,6 +327,7 @@ public EngineConfig( this.retentionLeasesSupplier = Objects.requireNonNull(retentionLeasesSupplier); this.primaryTermSupplier = primaryTermSupplier; this.tombstoneDocSupplier = tombstoneDocSupplier; + this.isReadOnlyReplica = isReadOnlyReplica; } /** @@ -460,6 +522,16 @@ public LongSupplier getPrimaryTermSupplier() { return primaryTermSupplier; } + /** + * Returns if this replica should be wired as a read only. + * This is used for Segment Replication where the engine implementation used is dependent on + * if the shard is a primary/replica. + * @return true if this engine should be wired as read only. + */ + public boolean isReadOnlyReplica() { + return indexSettings.isSegRepEnabled() && isReadOnlyReplica; + } + /** * A supplier supplies tombstone documents which will be used in soft-update methods. * The returned document consists only _uid, _seqno, _term and _version fields; other metadata fields are excluded. diff --git a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java index afab57905a9a7..c8aec3570f8b5 100644 --- a/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java +++ b/server/src/main/java/org/opensearch/index/engine/EngineConfigFactory.java @@ -146,7 +146,8 @@ public EngineConfig newEngineConfig( LongSupplier globalCheckpointSupplier, Supplier retentionLeasesSupplier, LongSupplier primaryTermSupplier, - EngineConfig.TombstoneDocSupplier tombstoneDocSupplier + EngineConfig.TombstoneDocSupplier tombstoneDocSupplier, + boolean isReadOnlyReplica ) { CodecService codecServiceToUse = codecService; if (codecService == null && this.codecServiceFactory != null) { @@ -176,7 +177,8 @@ public EngineConfig newEngineConfig( globalCheckpointSupplier, retentionLeasesSupplier, primaryTermSupplier, - tombstoneDocSupplier + tombstoneDocSupplier, + isReadOnlyReplica ); } diff --git a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java index eb91478b97adc..e60e650372ec4 100644 --- a/server/src/main/java/org/opensearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/InternalEngine.java @@ -49,6 +49,7 @@ import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.index.ShuffleForcedMergePolicy; import org.apache.lucene.index.SoftDeletesRetentionMergePolicy; +import org.apache.lucene.index.StandardDirectoryReader; import org.apache.lucene.index.Term; import org.apache.lucene.sandbox.index.MergeOnFlushMergePolicy; import org.apache.lucene.search.BooleanClause; @@ -648,17 +649,6 @@ public long getWritingBytes() { return indexWriter.getFlushingBytes() + versionMap.getRefreshingBytes(); } - /** - * Reads the current stored history ID from the IW commit data. - */ - private String loadHistoryUUID(Map commitData) { - final String uuid = commitData.get(HISTORY_UUID_KEY); - if (uuid == null) { - throw new IllegalStateException("commit doesn't contain history uuid"); - } - return uuid; - } - private ExternalReaderManager createReaderManager(RefreshWarmerListener externalRefreshListener) throws EngineException { boolean success = false; OpenSearchReaderManager internalReaderManager = null; @@ -2298,6 +2288,23 @@ protected SegmentInfos getLastCommittedSegmentInfos() { return lastCommittedSegmentInfos; } + @Override + public SegmentInfos getLatestSegmentInfos() { + OpenSearchDirectoryReader reader = null; + try { + reader = internalReaderManager.acquire(); + return ((StandardDirectoryReader) reader.getDelegate()).getSegmentInfos(); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } finally { + try { + internalReaderManager.release(reader); + } catch (IOException e) { + throw new EngineException(shardId, e.getMessage(), e); + } + } + } + @Override protected final void writerSegmentStats(SegmentsStats stats) { stats.addVersionMapMemoryInBytes(versionMap.ramBytesUsed()); diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java new file mode 100644 index 0000000000000..106643198cc3b --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngine.java @@ -0,0 +1,482 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.search.ReferenceManager; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ReleasableLock; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.index.seqno.LocalCheckpointTracker; +import org.opensearch.index.seqno.SeqNoStats; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.translog.DefaultTranslogDeletionPolicy; +import org.opensearch.index.translog.Translog; +import org.opensearch.index.translog.TranslogConfig; +import org.opensearch.index.translog.TranslogDeletionPolicy; +import org.opensearch.index.translog.TranslogStats; +import org.opensearch.search.suggest.completion.CompletionStats; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.function.BiFunction; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; +import java.util.stream.Stream; + +/** + * This is an {@link Engine} implementation intended for replica shards when Segment Replication + * is enabled. This Engine does not create an IndexWriter, rather it refreshes a {@link NRTReplicationReaderManager} + * with new Segments when received from an external source. + * + * @opensearch.internal + */ +public class NRTReplicationEngine extends Engine { + + private volatile SegmentInfos lastCommittedSegmentInfos; + private final NRTReplicationReaderManager readerManager; + private final CompletionStatsCache completionStatsCache; + private final LocalCheckpointTracker localCheckpointTracker; + private final Translog translog; + + public NRTReplicationEngine(EngineConfig engineConfig) { + super(engineConfig); + store.incRef(); + NRTReplicationReaderManager readerManager = null; + try { + lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + readerManager = new NRTReplicationReaderManager(OpenSearchDirectoryReader.wrap(getDirectoryReader(), shardId)); + final SequenceNumbers.CommitInfo commitInfo = SequenceNumbers.loadSeqNoInfoFromLuceneCommit( + this.lastCommittedSegmentInfos.getUserData().entrySet() + ); + this.localCheckpointTracker = new LocalCheckpointTracker(commitInfo.maxSeqNo, commitInfo.localCheckpoint); + this.completionStatsCache = new CompletionStatsCache(() -> acquireSearcher("completion_stats")); + this.readerManager = readerManager; + this.readerManager.addListener(completionStatsCache); + this.translog = openTranslog( + engineConfig, + getTranslogDeletionPolicy(engineConfig), + engineConfig.getGlobalCheckpointSupplier(), + localCheckpointTracker::markSeqNoAsPersisted + ); + } catch (IOException e) { + IOUtils.closeWhileHandlingException(store::decRef, readerManager); + throw new EngineCreationFailureException(shardId, "failed to create engine", e); + } + } + + public synchronized void updateSegments(final SegmentInfos infos, long seqNo) throws IOException { + // Update the current infos reference on the Engine's reader. + readerManager.updateSegments(infos); + + // only update the persistedSeqNo and "lastCommitted" infos reference if the incoming segments have a higher + // generation. We can still refresh with incoming SegmentInfos that are not part of a commit point. + if (infos.getGeneration() > lastCommittedSegmentInfos.getGeneration()) { + this.lastCommittedSegmentInfos = infos; + rollTranslogGeneration(); + } + localCheckpointTracker.fastForwardProcessedSeqNo(seqNo); + } + + @Override + public String getHistoryUUID() { + return loadHistoryUUID(lastCommittedSegmentInfos.userData); + } + + @Override + public long getWritingBytes() { + return 0; + } + + @Override + public CompletionStats completionStats(String... fieldNamePatterns) { + return completionStatsCache.get(fieldNamePatterns); + } + + @Override + public long getIndexThrottleTimeInMillis() { + return 0; + } + + @Override + public boolean isThrottled() { + return false; + } + + @Override + public void trimOperationsFromTranslog(long belowTerm, long aboveSeqNo) throws EngineException { + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + translog.trimOperations(belowTerm, aboveSeqNo); + } catch (Exception e) { + try { + failEngine("translog operations trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to trim translog operations", e); + } + } + + @Override + public IndexResult index(Index index) throws IOException { + ensureOpen(); + IndexResult indexResult = new IndexResult(index.version(), index.primaryTerm(), index.seqNo(), false); + final Translog.Location location = translog.add(new Translog.Index(index, indexResult)); + indexResult.setTranslogLocation(location); + indexResult.setTook(System.nanoTime() - index.startTime()); + indexResult.freeze(); + localCheckpointTracker.advanceMaxSeqNo(index.seqNo()); + return indexResult; + } + + @Override + public DeleteResult delete(Delete delete) throws IOException { + ensureOpen(); + DeleteResult deleteResult = new DeleteResult(delete.version(), delete.primaryTerm(), delete.seqNo(), true); + final Translog.Location location = translog.add(new Translog.Delete(delete, deleteResult)); + deleteResult.setTranslogLocation(location); + deleteResult.setTook(System.nanoTime() - delete.startTime()); + deleteResult.freeze(); + localCheckpointTracker.advanceMaxSeqNo(delete.seqNo()); + return deleteResult; + } + + @Override + public NoOpResult noOp(NoOp noOp) throws IOException { + ensureOpen(); + NoOpResult noOpResult = new NoOpResult(noOp.primaryTerm(), noOp.seqNo()); + final Translog.Location location = translog.add(new Translog.NoOp(noOp.seqNo(), noOp.primaryTerm(), noOp.reason())); + noOpResult.setTranslogLocation(location); + noOpResult.setTook(System.nanoTime() - noOp.startTime()); + noOpResult.freeze(); + localCheckpointTracker.advanceMaxSeqNo(noOp.seqNo()); + return noOpResult; + } + + @Override + public GetResult get(Get get, BiFunction searcherFactory) throws EngineException { + return getFromSearcher(get, searcherFactory, SearcherScope.EXTERNAL); + } + + @Override + protected ReferenceManager getReferenceManager(SearcherScope scope) { + return readerManager; + } + + @Override + public boolean isTranslogSyncNeeded() { + return translog.syncNeeded(); + } + + @Override + public boolean ensureTranslogSynced(Stream locations) throws IOException { + boolean synced = translog.ensureSynced(locations); + if (synced) { + translog.trimUnreferencedReaders(); + } + return synced; + } + + @Override + public void syncTranslog() throws IOException { + translog.sync(); + translog.trimUnreferencedReaders(); + } + + @Override + public Closeable acquireHistoryRetentionLock() { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public Translog.Snapshot newChangesSnapshot( + String source, + long fromSeqNo, + long toSeqNo, + boolean requiredFullRange, + boolean accurateCount + ) throws IOException { + throw new UnsupportedOperationException("Not implemented"); + } + + @Override + public int countNumberOfHistoryOperations(String source, long fromSeqNo, long toSeqNumber) throws IOException { + return 0; + } + + @Override + public boolean hasCompleteOperationHistory(String reason, long startingSeqNo) { + return false; + } + + @Override + public long getMinRetainedSeqNo() { + return localCheckpointTracker.getProcessedCheckpoint(); + } + + @Override + public TranslogStats getTranslogStats() { + return translog.stats(); + } + + @Override + public Translog.Location getTranslogLastWriteLocation() { + return translog.getLastWriteLocation(); + } + + @Override + public long getPersistedLocalCheckpoint() { + return localCheckpointTracker.getPersistedCheckpoint(); + } + + public long getProcessedLocalCheckpoint() { + return localCheckpointTracker.getProcessedCheckpoint(); + } + + @Override + public SeqNoStats getSeqNoStats(long globalCheckpoint) { + return localCheckpointTracker.getStats(globalCheckpoint); + } + + @Override + public long getLastSyncedGlobalCheckpoint() { + return translog.getLastSyncedGlobalCheckpoint(); + } + + @Override + public long getIndexBufferRAMBytesUsed() { + return 0; + } + + @Override + public List segments(boolean verbose) { + return Arrays.asList(getSegmentInfo(getLatestSegmentInfos(), verbose)); + } + + @Override + public void refresh(String source) throws EngineException {} + + @Override + public boolean maybeRefresh(String source) throws EngineException { + return false; + } + + @Override + public void writeIndexingBuffer() throws EngineException {} + + @Override + public boolean shouldPeriodicallyFlush() { + return false; + } + + @Override + public void flush(boolean force, boolean waitIfOngoing) throws EngineException {} + + @Override + public void trimUnreferencedTranslogFiles() throws EngineException { + try (ReleasableLock lock = readLock.acquire()) { + ensureOpen(); + translog.trimUnreferencedReaders(); + } catch (Exception e) { + try { + failEngine("translog trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to trim translog", e); + } + } + + @Override + public boolean shouldRollTranslogGeneration() { + return translog.shouldRollGeneration(); + } + + @Override + public void rollTranslogGeneration() throws EngineException { + try (ReleasableLock ignored = readLock.acquire()) { + ensureOpen(); + translog.rollGeneration(); + translog.trimUnreferencedReaders(); + } catch (Exception e) { + try { + failEngine("translog trimming failed", e); + } catch (Exception inner) { + e.addSuppressed(inner); + } + throw new EngineException(shardId, "failed to roll translog", e); + } + } + + @Override + public void forceMerge( + boolean flush, + int maxNumSegments, + boolean onlyExpungeDeletes, + boolean upgrade, + boolean upgradeOnlyAncientSegments, + String forceMergeUUID + ) throws EngineException, IOException {} + + @Override + public GatedCloseable acquireLastIndexCommit(boolean flushFirst) throws EngineException { + try { + final IndexCommit indexCommit = Lucene.getIndexCommit(lastCommittedSegmentInfos, store.directory()); + return new GatedCloseable<>(indexCommit, () -> {}); + } catch (IOException e) { + throw new EngineException(shardId, "Unable to build latest IndexCommit", e); + } + } + + @Override + public GatedCloseable acquireSafeIndexCommit() throws EngineException { + return acquireLastIndexCommit(false); + } + + @Override + public SafeCommitInfo getSafeCommitInfo() { + return new SafeCommitInfo(localCheckpointTracker.getProcessedCheckpoint(), lastCommittedSegmentInfos.totalMaxDoc()); + } + + @Override + protected final void closeNoLock(String reason, CountDownLatch closedLatch) { + if (isClosed.compareAndSet(false, true)) { + assert rwl.isWriteLockedByCurrentThread() || failEngineLock.isHeldByCurrentThread() + : "Either the write lock must be held or the engine must be currently be failing itself"; + try { + IOUtils.close(readerManager, translog, store::decRef); + } catch (Exception e) { + logger.warn("failed to close engine", e); + } finally { + logger.debug("engine closed [{}]", reason); + closedLatch.countDown(); + } + } + } + + @Override + public void activateThrottling() {} + + @Override + public void deactivateThrottling() {} + + @Override + public int restoreLocalHistoryFromTranslog(TranslogRecoveryRunner translogRecoveryRunner) throws IOException { + return 0; + } + + @Override + public int fillSeqNoGaps(long primaryTerm) throws IOException { + return 0; + } + + @Override + public Engine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { + throw new UnsupportedOperationException("Read only replicas do not have an IndexWriter and cannot recover from a translog."); + } + + @Override + public void skipTranslogRecovery() { + // Do nothing. + } + + @Override + public void maybePruneDeletes() {} + + @Override + public void updateMaxUnsafeAutoIdTimestamp(long newTimestamp) {} + + @Override + public long getMaxSeqNoOfUpdatesOrDeletes() { + return localCheckpointTracker.getMaxSeqNo(); + } + + @Override + public void advanceMaxSeqNoOfUpdatesOrDeletes(long maxSeqNoOfUpdatesOnPrimary) {} + + public Translog getTranslog() { + return translog; + } + + @Override + public void onSettingsChanged(TimeValue translogRetentionAge, ByteSizeValue translogRetentionSize, long softDeletesRetentionOps) { + final TranslogDeletionPolicy translogDeletionPolicy = translog.getDeletionPolicy(); + translogDeletionPolicy.setRetentionAgeInMillis(translogRetentionAge.millis()); + translogDeletionPolicy.setRetentionSizeInBytes(translogRetentionSize.getBytes()); + } + + @Override + protected SegmentInfos getLastCommittedSegmentInfos() { + return lastCommittedSegmentInfos; + } + + @Override + protected SegmentInfos getLatestSegmentInfos() { + return readerManager.getSegmentInfos(); + } + + protected LocalCheckpointTracker getLocalCheckpointTracker() { + return localCheckpointTracker; + } + + private DirectoryReader getDirectoryReader() throws IOException { + // for segment replication: replicas should create the reader from store, we don't want an open IW on replicas. + return new SoftDeletesDirectoryReaderWrapper(DirectoryReader.open(store.directory()), Lucene.SOFT_DELETES_FIELD); + } + + private Translog openTranslog( + EngineConfig engineConfig, + TranslogDeletionPolicy translogDeletionPolicy, + LongSupplier globalCheckpointSupplier, + LongConsumer persistedSequenceNumberConsumer + ) throws IOException { + final TranslogConfig translogConfig = engineConfig.getTranslogConfig(); + final Map userData = lastCommittedSegmentInfos.getUserData(); + final String translogUUID = Objects.requireNonNull(userData.get(Translog.TRANSLOG_UUID_KEY)); + // We expect that this shard already exists, so it must already have an existing translog else something is badly wrong! + return new Translog( + translogConfig, + translogUUID, + translogDeletionPolicy, + globalCheckpointSupplier, + engineConfig.getPrimaryTermSupplier(), + persistedSequenceNumberConsumer + ); + } + + private TranslogDeletionPolicy getTranslogDeletionPolicy(EngineConfig engineConfig) { + TranslogDeletionPolicy customTranslogDeletionPolicy = null; + if (engineConfig.getCustomTranslogDeletionPolicyFactory() != null) { + customTranslogDeletionPolicy = engineConfig.getCustomTranslogDeletionPolicyFactory() + .create(engineConfig.getIndexSettings(), engineConfig.retentionLeasesSupplier()); + } + return Objects.requireNonNullElseGet( + customTranslogDeletionPolicy, + () -> new DefaultTranslogDeletionPolicy( + engineConfig.getIndexSettings().getTranslogRetentionSize().getBytes(), + engineConfig.getIndexSettings().getTranslogRetentionAge().getMillis(), + engineConfig.getIndexSettings().getTranslogRetentionTotalFiles() + ) + ); + } + +} diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java new file mode 100644 index 0000000000000..45fe3086ac3f6 --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationEngineFactory.java @@ -0,0 +1,25 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +/** + * Engine Factory implementation used with Segment Replication that wires up replica shards with an ${@link NRTReplicationEngine} + * and primary with an ${@link InternalEngine} + * + * @opensearch.internal + */ +public class NRTReplicationEngineFactory implements EngineFactory { + @Override + public Engine newReadWriteEngine(EngineConfig config) { + if (config.isReadOnlyReplica()) { + return new NRTReplicationEngine(config); + } + return new InternalEngine(config); + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java new file mode 100644 index 0000000000000..16e615672a26f --- /dev/null +++ b/server/src/main/java/org/opensearch/index/engine/NRTReplicationReaderManager.java @@ -0,0 +1,92 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.SoftDeletesDirectoryReaderWrapper; +import org.apache.lucene.index.StandardDirectoryReader; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.index.OpenSearchDirectoryReader; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +/** + * This is an extension of {@link OpenSearchReaderManager} for use with {@link NRTReplicationEngine}. + * The manager holds a reference to the latest {@link SegmentInfos} object that is used to refresh a reader. + * + * @opensearch.internal + */ +public class NRTReplicationReaderManager extends OpenSearchReaderManager { + + private final static Logger logger = LogManager.getLogger(NRTReplicationReaderManager.class); + private volatile SegmentInfos currentInfos; + + /** + * Creates and returns a new SegmentReplicationReaderManager from the given + * already-opened {@link OpenSearchDirectoryReader}, stealing + * the incoming reference. + * + * @param reader the SegmentReplicationReaderManager to use for future reopens + */ + NRTReplicationReaderManager(OpenSearchDirectoryReader reader) { + super(reader); + currentInfos = unwrapStandardReader(reader).getSegmentInfos(); + } + + @Override + protected OpenSearchDirectoryReader refreshIfNeeded(OpenSearchDirectoryReader referenceToRefresh) throws IOException { + Objects.requireNonNull(referenceToRefresh); + final List subs = new ArrayList<>(); + final StandardDirectoryReader standardDirectoryReader = unwrapStandardReader(referenceToRefresh); + for (LeafReaderContext ctx : standardDirectoryReader.leaves()) { + subs.add(ctx.reader()); + } + DirectoryReader innerReader = StandardDirectoryReader.open(referenceToRefresh.directory(), currentInfos, subs, null); + final DirectoryReader softDeletesDirectoryReaderWrapper = new SoftDeletesDirectoryReaderWrapper( + innerReader, + Lucene.SOFT_DELETES_FIELD + ); + logger.trace( + () -> new ParameterizedMessage("updated to SegmentInfosVersion=" + currentInfos.getVersion() + " reader=" + innerReader) + ); + return OpenSearchDirectoryReader.wrap(softDeletesDirectoryReaderWrapper, referenceToRefresh.shardId()); + } + + /** + * Update this reader's segments and refresh. + * + * @param infos {@link SegmentInfos} infos + * @throws IOException - When Refresh fails with an IOException. + */ + public synchronized void updateSegments(SegmentInfos infos) throws IOException { + currentInfos = infos; + maybeRefresh(); + } + + public SegmentInfos getSegmentInfos() { + return currentInfos; + } + + private StandardDirectoryReader unwrapStandardReader(OpenSearchDirectoryReader reader) { + final DirectoryReader delegate = reader.getDelegate(); + if (delegate instanceof SoftDeletesDirectoryReaderWrapper) { + return (StandardDirectoryReader) ((SoftDeletesDirectoryReaderWrapper) delegate).getDelegate(); + } + return (StandardDirectoryReader) delegate; + } +} diff --git a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java index 2e3155a4d173e..23a86d8da5599 100644 --- a/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/opensearch/index/engine/ReadOnlyEngine.java @@ -270,6 +270,11 @@ protected SegmentInfos getLastCommittedSegmentInfos() { return lastCommittedSegmentInfos; } + @Override + protected SegmentInfos getLatestSegmentInfos() { + return lastCommittedSegmentInfos; + } + @Override public String getHistoryUUID() { return lastCommittedSegmentInfos.userData.get(Engine.HISTORY_UUID_KEY); diff --git a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java index dbcc5e2190006..d75893080c0d7 100644 --- a/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java +++ b/server/src/main/java/org/opensearch/index/seqno/LocalCheckpointTracker.java @@ -156,7 +156,7 @@ public synchronized void markSeqNoAsPersisted(final long seqNo) { public synchronized void fastForwardProcessedSeqNo(final long seqNo) { advanceMaxSeqNo(seqNo); final long currentProcessedCheckpoint = processedCheckpoint.get(); - if (shouldUpdateSeqNo(seqNo, currentProcessedCheckpoint, persistedCheckpoint) == false) { + if (seqNo <= currentProcessedCheckpoint) { return; } processedCheckpoint.compareAndSet(currentProcessedCheckpoint, seqNo); diff --git a/server/src/main/java/org/opensearch/index/shard/IndexShard.java b/server/src/main/java/org/opensearch/index/shard/IndexShard.java index 60a3305370c2a..995a92e94aeb3 100644 --- a/server/src/main/java/org/opensearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/opensearch/index/shard/IndexShard.java @@ -3160,7 +3160,8 @@ private EngineConfig newEngineConfig(LongSupplier globalCheckpointSupplier) { globalCheckpointSupplier, replicationTracker::getRetentionLeases, () -> getOperationPrimaryTerm(), - tombstoneDocSupplier() + tombstoneDocSupplier(), + indexSettings.isSegRepEnabled() && shardRouting.primary() == false ); } diff --git a/server/src/main/java/org/opensearch/indices/IndicesService.java b/server/src/main/java/org/opensearch/indices/IndicesService.java index 5ce10069aaa89..79fd2893fb78c 100644 --- a/server/src/main/java/org/opensearch/indices/IndicesService.java +++ b/server/src/main/java/org/opensearch/indices/IndicesService.java @@ -109,6 +109,7 @@ import org.opensearch.index.engine.EngineConfigFactory; import org.opensearch.index.engine.EngineFactory; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; import org.opensearch.index.engine.NoOpEngine; import org.opensearch.index.fielddata.IndexFieldDataCache; import org.opensearch.index.flush.FlushStats; @@ -764,6 +765,9 @@ private EngineFactory getEngineFactory(final IndexSettings idxSettings) { .filter(maybe -> Objects.requireNonNull(maybe).isPresent()) .collect(Collectors.toList()); if (engineFactories.isEmpty()) { + if (idxSettings.isSegRepEnabled()) { + return new NRTReplicationEngineFactory(); + } return new InternalEngineFactory(); } else if (engineFactories.size() == 1) { assert engineFactories.get(0).isPresent(); diff --git a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java index 8030619500278..7ddd92ea7b36e 100644 --- a/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java +++ b/server/src/test/java/org/opensearch/index/engine/EngineConfigFactoryTests.java @@ -65,7 +65,8 @@ public void testCreateEngineConfigFromFactory() { null, () -> new RetentionLeases(0, 0, Collections.emptyList()), null, - null + null, + false ); assertNotNull(config.getCodec()); @@ -141,7 +142,8 @@ public void testCreateCodecServiceFromFactory() { null, () -> new RetentionLeases(0, 0, Collections.emptyList()), null, - null + null, + false ); assertNotNull(config.getCodec()); } diff --git a/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java new file mode 100644 index 0000000000000..6aa00bb9312dd --- /dev/null +++ b/server/src/test/java/org/opensearch/index/engine/NRTReplicationEngineTests.java @@ -0,0 +1,239 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +package org.opensearch.index.engine; + +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.index.SegmentInfos; +import org.hamcrest.MatcherAssert; +import org.opensearch.common.concurrent.GatedCloseable; +import org.opensearch.common.lucene.Lucene; +import org.opensearch.common.lucene.search.Queries; +import org.opensearch.index.mapper.ParsedDocument; +import org.opensearch.index.seqno.SequenceNumbers; +import org.opensearch.index.store.Store; +import org.opensearch.index.translog.TestTranslog; +import org.opensearch.index.translog.Translog; + +import java.io.IOException; +import java.nio.file.Path; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.opensearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; + +public class NRTReplicationEngineTests extends EngineTestCase { + + public void testCreateEngine() throws IOException { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + final SegmentInfos latestSegmentInfos = nrtEngine.getLatestSegmentInfos(); + final SegmentInfos lastCommittedSegmentInfos = nrtEngine.getLastCommittedSegmentInfos(); + assertEquals(latestSegmentInfos.version, lastCommittedSegmentInfos.version); + assertEquals(latestSegmentInfos.getGeneration(), lastCommittedSegmentInfos.getGeneration()); + assertEquals(latestSegmentInfos.getUserData(), lastCommittedSegmentInfos.getUserData()); + assertEquals(latestSegmentInfos.files(true), lastCommittedSegmentInfos.files(true)); + + assertTrue(nrtEngine.segments(true).isEmpty()); + + try (final GatedCloseable indexCommitGatedCloseable = nrtEngine.acquireLastIndexCommit(false)) { + final IndexCommit indexCommit = indexCommitGatedCloseable.get(); + assertEquals(indexCommit.getUserData(), lastCommittedSegmentInfos.getUserData()); + assertTrue(indexCommit.getFileNames().containsAll(lastCommittedSegmentInfos.files(true))); + } + } + } + + public void testEngineWritesOpsToTranslog() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + List operations = generateHistoryOnReplica( + between(1, 500), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + } + + assertEquals(nrtEngine.getTranslogLastWriteLocation(), engine.getTranslogLastWriteLocation()); + assertEquals(nrtEngine.getLastSyncedGlobalCheckpoint(), engine.getLastSyncedGlobalCheckpoint()); + + // we don't index into nrtEngine, so get the doc ids from the regular engine. + final List docs = getDocIds(engine, true); + + // recover a new engine from the nrtEngine's xlog. + nrtEngine.syncTranslog(); + try (InternalEngine engine = new InternalEngine(nrtEngine.config())) { + engine.recoverFromTranslog(translogHandler, Long.MAX_VALUE); + assertEquals(getDocIds(engine, true), docs); + } + assertEngineCleanedUp(nrtEngine, nrtEngine.getTranslog()); + } + } + + public void testUpdateSegments() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + // add docs to the primary engine. + List operations = generateHistoryOnReplica(between(1, 500), randomBoolean(), randomBoolean(), randomBoolean()) + .stream() + .filter(op -> op.operationType().equals(Engine.Operation.TYPE.INDEX)) + .collect(Collectors.toList()); + for (Engine.Operation op : operations) { + applyOperation(engine, op); + applyOperation(nrtEngine, op); + } + + engine.refresh("test"); + + nrtEngine.updateSegments(engine.getLatestSegmentInfos(), engine.getProcessedLocalCheckpoint()); + assertMatchingSegmentsAndCheckpoints(nrtEngine); + + // assert a doc from the operations exists. + final ParsedDocument parsedDoc = createParsedDoc(operations.stream().findFirst().get().id(), null); + try (Engine.GetResult getResult = engine.get(newGet(true, parsedDoc), engine::acquireSearcher)) { + assertThat(getResult.exists(), equalTo(true)); + assertThat(getResult.docIdAndVersion(), notNullValue()); + } + + try (Engine.GetResult getResult = nrtEngine.get(newGet(true, parsedDoc), nrtEngine::acquireSearcher)) { + assertThat(getResult.exists(), equalTo(true)); + assertThat(getResult.docIdAndVersion(), notNullValue()); + } + + // Flush the primary and update the NRTEngine with the latest committed infos. + engine.flush(); + nrtEngine.syncTranslog(); // to advance persisted checkpoint + + Set seqNos = operations.stream().map(Engine.Operation::seqNo).collect(Collectors.toSet()); + + try (Translog.Snapshot snapshot = nrtEngine.getTranslog().newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + assertThat( + TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), + equalTo(seqNos) + ); + } + + nrtEngine.updateSegments(engine.getLastCommittedSegmentInfos(), engine.getProcessedLocalCheckpoint()); + assertMatchingSegmentsAndCheckpoints(nrtEngine); + + assertEquals( + nrtEngine.getTranslog().getGeneration().translogFileGeneration, + engine.getTranslog().getGeneration().translogFileGeneration + ); + + try (Translog.Snapshot snapshot = nrtEngine.getTranslog().newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + assertThat( + TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), + equalTo(seqNos) + ); + } + + // Ensure the same hit count between engines. + int expectedDocCount; + try (final Engine.Searcher test = engine.acquireSearcher("test")) { + expectedDocCount = test.count(Queries.newMatchAllQuery()); + assertSearcherHits(nrtEngine, expectedDocCount); + } + assertEngineCleanedUp(nrtEngine, nrtEngine.getTranslog()); + } + } + + public void testTrimTranslogOps() throws Exception { + final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED); + + try ( + final Store nrtEngineStore = createStore(); + final NRTReplicationEngine nrtEngine = buildNrtReplicaEngine(globalCheckpoint, nrtEngineStore); + ) { + List operations = generateHistoryOnReplica( + between(1, 100), + randomBoolean(), + randomBoolean(), + randomBoolean() + ); + applyOperations(nrtEngine, operations); + Set seqNos = operations.stream().map(Engine.Operation::seqNo).collect(Collectors.toSet()); + try (Translog.Snapshot snapshot = nrtEngine.getTranslog().newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(operations.size())); + assertThat( + TestTranslog.drainSnapshot(snapshot, false).stream().map(Translog.Operation::seqNo).collect(Collectors.toSet()), + equalTo(seqNos) + ); + } + nrtEngine.rollTranslogGeneration(); + nrtEngine.trimOperationsFromTranslog(primaryTerm.get(), NO_OPS_PERFORMED); + try (Translog.Snapshot snapshot = getTranslog(engine).newSnapshot()) { + assertThat(snapshot.totalOperations(), equalTo(0)); + assertNull(snapshot.next()); + } + } + } + + private void assertMatchingSegmentsAndCheckpoints(NRTReplicationEngine nrtEngine) throws IOException { + assertEquals(engine.getPersistedLocalCheckpoint(), nrtEngine.getPersistedLocalCheckpoint()); + assertEquals(engine.getProcessedLocalCheckpoint(), nrtEngine.getProcessedLocalCheckpoint()); + assertEquals(engine.getLocalCheckpointTracker().getMaxSeqNo(), nrtEngine.getLocalCheckpointTracker().getMaxSeqNo()); + assertEquals(engine.getLatestSegmentInfos().files(true), nrtEngine.getLatestSegmentInfos().files(true)); + assertEquals(engine.getLatestSegmentInfos().getUserData(), nrtEngine.getLatestSegmentInfos().getUserData()); + assertEquals(engine.getLatestSegmentInfos().getVersion(), nrtEngine.getLatestSegmentInfos().getVersion()); + assertEquals(engine.segments(true), nrtEngine.segments(true)); + } + + private void assertSearcherHits(Engine engine, int hits) { + try (final Engine.Searcher test = engine.acquireSearcher("test")) { + MatcherAssert.assertThat(test, EngineSearcherTotalHitsMatcher.engineSearcherTotalHits(hits)); + } + } + + private NRTReplicationEngine buildNrtReplicaEngine(AtomicLong globalCheckpoint, Store store) throws IOException { + Lucene.cleanLuceneIndex(store.directory()); + final Path translogDir = createTempDir(); + final EngineConfig replicaConfig = config( + defaultSettings, + store, + translogDir, + NoMergePolicy.INSTANCE, + null, + null, + globalCheckpoint::get + ); + if (Lucene.indexExists(store.directory()) == false) { + store.createEmpty(replicaConfig.getIndexSettings().getIndexVersionCreated().luceneVersion); + final String translogUuid = Translog.createEmptyTranslog( + replicaConfig.getTranslogConfig().getTranslogPath(), + SequenceNumbers.NO_OPS_PERFORMED, + shardId, + primaryTerm.get() + ); + store.associateIndexWithNewTranslog(translogUuid); + } + return new NRTReplicationEngine(replicaConfig); + } +} diff --git a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java index 237066e549b09..3a450e1f72a8d 100644 --- a/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java +++ b/server/src/test/java/org/opensearch/index/seqno/LocalCheckpointTrackerTests.java @@ -332,59 +332,22 @@ public void testContains() { assertThat(tracker.hasProcessed(seqNo), equalTo(seqNo <= localCheckpoint || seqNos.contains(seqNo))); } - public void testFastForwardProcessedNoPersistentUpdate() { + public void testFastForwardProcessedSeqNo() { // base case with no persistent checkpoint update long seqNo1; assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); seqNo1 = tracker.generateSeqNo(); assertThat(seqNo1, equalTo(0L)); tracker.fastForwardProcessedSeqNo(seqNo1); - assertThat(tracker.getProcessedCheckpoint(), equalTo(-1L)); - } + assertThat(tracker.getProcessedCheckpoint(), equalTo(seqNo1)); - public void testFastForwardProcessedPersistentUpdate() { - // base case with persistent checkpoint update - long seqNo1; - assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - seqNo1 = tracker.generateSeqNo(); - assertThat(seqNo1, equalTo(0L)); - - tracker.markSeqNoAsPersisted(seqNo1); - assertThat(tracker.getPersistedCheckpoint(), equalTo(0L)); + // idempotent case tracker.fastForwardProcessedSeqNo(seqNo1); assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); assertThat(tracker.hasProcessed(0L), equalTo(true)); - assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); - // idempotent case - tracker.fastForwardProcessedSeqNo(seqNo1); + tracker.fastForwardProcessedSeqNo(-1); assertThat(tracker.getProcessedCheckpoint(), equalTo(0L)); assertThat(tracker.hasProcessed(0L), equalTo(true)); - assertThat(tracker.hasProcessed(atLeast(1)), equalTo(false)); - - } - - public void testFastForwardProcessedPersistentUpdate2() { - long seqNo1, seqNo2; - assertThat(tracker.getProcessedCheckpoint(), equalTo(SequenceNumbers.NO_OPS_PERFORMED)); - seqNo1 = tracker.generateSeqNo(); - seqNo2 = tracker.generateSeqNo(); - assertThat(seqNo1, equalTo(0L)); - assertThat(seqNo2, equalTo(1L)); - tracker.markSeqNoAsPersisted(seqNo1); - tracker.markSeqNoAsPersisted(seqNo2); - assertThat(tracker.getProcessedCheckpoint(), equalTo(-1L)); - assertThat(tracker.getPersistedCheckpoint(), equalTo(1L)); - - tracker.fastForwardProcessedSeqNo(seqNo2); - assertThat(tracker.getProcessedCheckpoint(), equalTo(1L)); - assertThat(tracker.hasProcessed(seqNo1), equalTo(true)); - assertThat(tracker.hasProcessed(seqNo2), equalTo(true)); - - tracker.fastForwardProcessedSeqNo(seqNo1); - assertThat(tracker.getProcessedCheckpoint(), equalTo(1L)); - assertThat(tracker.hasProcessed(between(0, 1)), equalTo(true)); - assertThat(tracker.hasProcessed(atLeast(2)), equalTo(false)); - assertThat(tracker.getMaxSeqNo(), equalTo(1L)); } } diff --git a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java index bf9671964a210..49d0c089f072b 100644 --- a/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/opensearch/index/shard/IndexShardTests.java @@ -101,6 +101,8 @@ import org.opensearch.index.engine.EngineTestCase; import org.opensearch.index.engine.InternalEngine; import org.opensearch.index.engine.InternalEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngineFactory; +import org.opensearch.index.engine.NRTReplicationEngine; import org.opensearch.index.engine.ReadOnlyEngine; import org.opensearch.index.fielddata.FieldDataStats; import org.opensearch.index.fielddata.IndexFieldData; @@ -136,6 +138,7 @@ import org.opensearch.indices.recovery.RecoveryTarget; import org.opensearch.indices.replication.checkpoint.SegmentReplicationCheckpointPublisher; import org.opensearch.indices.replication.common.ReplicationLuceneIndex; +import org.opensearch.indices.replication.common.ReplicationType; import org.opensearch.repositories.IndexId; import org.opensearch.snapshots.Snapshot; import org.opensearch.snapshots.SnapshotId; @@ -4167,14 +4170,14 @@ public void testSnapshotWhileResettingEngine() throws Exception { @Override public InternalEngine recoverFromTranslog(TranslogRecoveryRunner translogRecoveryRunner, long recoverUpToSeqNo) throws IOException { - InternalEngine internalEngine = super.recoverFromTranslog(translogRecoveryRunner, recoverUpToSeqNo); + InternalEngine engine = super.recoverFromTranslog(translogRecoveryRunner, recoverUpToSeqNo); readyToSnapshotLatch.countDown(); try { snapshotDoneLatch.await(); } catch (InterruptedException e) { throw new AssertionError(e); } - return internalEngine; + return engine; } }); @@ -4447,6 +4450,27 @@ protected void ensureMaxSeqNoEqualsToGlobalCheckpoint(SeqNoStats seqNoStats) { closeShards(readonlyShard); } + public void testReadOnlyReplicaEngineConfig() throws IOException { + Settings primarySettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .build(); + final IndexShard primaryShard = newStartedShard(false, primarySettings, new NRTReplicationEngineFactory()); + assertFalse(primaryShard.getEngine().config().isReadOnlyReplica()); + assertEquals(primaryShard.getEngine().getClass(), InternalEngine.class); + + Settings replicaSettings = Settings.builder() + .put(primarySettings) + .put(IndexMetadata.SETTING_REPLICATION_TYPE, ReplicationType.SEGMENT) + .build(); + final IndexShard replicaShard = newStartedShard(false, replicaSettings, new NRTReplicationEngineFactory()); + assertTrue(replicaShard.getEngine().config().isReadOnlyReplica()); + assertEquals(replicaShard.getEngine().getClass(), NRTReplicationEngine.class); + + closeShards(primaryShard, replicaShard); + } + public void testCloseShardWhileEngineIsWarming() throws Exception { CountDownLatch warmerStarted = new CountDownLatch(1); CountDownLatch warmerBlocking = new CountDownLatch(1); diff --git a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java index 2bce5a7c81794..66c697d83510b 100644 --- a/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java +++ b/test/framework/src/main/java/org/opensearch/index/engine/EngineTestCase.java @@ -328,24 +328,26 @@ public void tearDown() throws Exception { super.tearDown(); try { if (engine != null && engine.isClosed.get() == false) { - engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine); - assertNoInFlightDocuments(engine); - assertMaxSeqNoInCommitUserData(engine); - assertAtMostOneLuceneDocumentPerSequenceNumber(engine); + assertEngineCleanedUp(engine, engine.getTranslog()); } if (replicaEngine != null && replicaEngine.isClosed.get() == false) { - replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - assertConsistentHistoryBetweenTranslogAndLuceneIndex(replicaEngine); - assertNoInFlightDocuments(replicaEngine); - assertMaxSeqNoInCommitUserData(replicaEngine); - assertAtMostOneLuceneDocumentPerSequenceNumber(replicaEngine); + assertEngineCleanedUp(replicaEngine, replicaEngine.getTranslog()); } } finally { IOUtils.close(replicaEngine, storeReplica, engine, store, () -> terminate(threadPool)); } } + protected void assertEngineCleanedUp(Engine engine, Translog translog) throws Exception { + if (engine.isClosed.get() == false) { + translog.getDeletionPolicy().assertNoOpenTranslogRefs(); + assertConsistentHistoryBetweenTranslogAndLuceneIndex(engine); + assertNoInFlightDocuments(engine); + assertMaxSeqNoInCommitUserData(engine); + assertAtMostOneLuceneDocumentPerSequenceNumber(engine); + } + } + protected static ParseContext.Document testDocumentWithTextField() { return testDocumentWithTextField("test"); } From 0b77055037e5b2ad0b435d26a5b29565c74fdb66 Mon Sep 17 00:00:00 2001 From: Tianli Feng Date: Tue, 24 May 2022 23:00:18 -0700 Subject: [PATCH 14/16] Rename master to cluster_manager in the XContent Parser of ClusterHealthResponse (#3432) Signed-off-by: Tianli Feng --- .../cluster/health/ClusterHealthResponse.java | 5 ++- .../cluster/health/ClusterStateHealth.java | 24 +++++----- .../health/ClusterHealthResponsesTests.java | 44 +++++++++++++++++-- 3 files changed, 57 insertions(+), 16 deletions(-) diff --git a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java index e4ec75fb7045a..a67ef721879ce 100644 --- a/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java +++ b/server/src/main/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponse.java @@ -71,6 +71,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo private static final String TIMED_OUT = "timed_out"; private static final String NUMBER_OF_NODES = "number_of_nodes"; private static final String NUMBER_OF_DATA_NODES = "number_of_data_nodes"; + @Deprecated private static final String DISCOVERED_MASTER = "discovered_master"; private static final String DISCOVERED_CLUSTER_MANAGER = "discovered_cluster_manager"; private static final String NUMBER_OF_PENDING_TASKS = "number_of_pending_tasks"; @@ -95,6 +96,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo // ClusterStateHealth fields int numberOfNodes = (int) parsedObjects[i++]; int numberOfDataNodes = (int) parsedObjects[i++]; + boolean hasDiscoveredMaster = Boolean.TRUE.equals(parsedObjects[i++]); boolean hasDiscoveredClusterManager = Boolean.TRUE.equals(parsedObjects[i++]); int activeShards = (int) parsedObjects[i++]; int relocatingShards = (int) parsedObjects[i++]; @@ -123,7 +125,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo unassignedShards, numberOfNodes, numberOfDataNodes, - hasDiscoveredClusterManager, + hasDiscoveredClusterManager || hasDiscoveredMaster, activeShardsPercent, status, indices @@ -157,6 +159,7 @@ public class ClusterHealthResponse extends ActionResponse implements StatusToXCo PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_NODES)); PARSER.declareInt(constructorArg(), new ParseField(NUMBER_OF_DATA_NODES)); PARSER.declareBoolean(optionalConstructorArg(), new ParseField(DISCOVERED_MASTER)); + PARSER.declareBoolean(optionalConstructorArg(), new ParseField(DISCOVERED_CLUSTER_MANAGER)); PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_SHARDS)); PARSER.declareInt(constructorArg(), new ParseField(RELOCATING_SHARDS)); PARSER.declareInt(constructorArg(), new ParseField(ACTIVE_PRIMARY_SHARDS)); diff --git a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java index 4c8be0f2d73f0..f1fe680f80769 100644 --- a/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java +++ b/server/src/main/java/org/opensearch/cluster/health/ClusterStateHealth.java @@ -58,7 +58,7 @@ public final class ClusterStateHealth implements Iterable, W private final int numberOfNodes; private final int numberOfDataNodes; - private final boolean hasDiscoveredMaster; + private final boolean hasDiscoveredClusterManager; private final int activeShards; private final int relocatingShards; private final int activePrimaryShards; @@ -86,7 +86,7 @@ public ClusterStateHealth(final ClusterState clusterState) { public ClusterStateHealth(final ClusterState clusterState, final String[] concreteIndices) { numberOfNodes = clusterState.nodes().getSize(); numberOfDataNodes = clusterState.nodes().getDataNodes().size(); - hasDiscoveredMaster = clusterState.nodes().getMasterNodeId() != null; + hasDiscoveredClusterManager = clusterState.nodes().getMasterNodeId() != null; indices = new HashMap<>(); for (String index : concreteIndices) { IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(index); @@ -155,9 +155,9 @@ public ClusterStateHealth(final StreamInput in) throws IOException { numberOfNodes = in.readVInt(); numberOfDataNodes = in.readVInt(); if (in.getVersion().onOrAfter(Version.V_1_0_0)) { - hasDiscoveredMaster = in.readBoolean(); + hasDiscoveredClusterManager = in.readBoolean(); } else { - hasDiscoveredMaster = true; + hasDiscoveredClusterManager = true; } status = ClusterHealthStatus.fromValue(in.readByte()); int size = in.readVInt(); @@ -180,7 +180,7 @@ public ClusterStateHealth( int unassignedShards, int numberOfNodes, int numberOfDataNodes, - boolean hasDiscoveredMaster, + boolean hasDiscoveredClusterManager, double activeShardsPercent, ClusterHealthStatus status, Map indices @@ -192,7 +192,7 @@ public ClusterStateHealth( this.unassignedShards = unassignedShards; this.numberOfNodes = numberOfNodes; this.numberOfDataNodes = numberOfDataNodes; - this.hasDiscoveredMaster = hasDiscoveredMaster; + this.hasDiscoveredClusterManager = hasDiscoveredClusterManager; this.activeShardsPercent = activeShardsPercent; this.status = status; this.indices = indices; @@ -239,7 +239,7 @@ public double getActiveShardsPercent() { } public boolean hasDiscoveredMaster() { - return hasDiscoveredMaster; + return hasDiscoveredClusterManager; } @Override @@ -257,7 +257,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVInt(numberOfNodes); out.writeVInt(numberOfDataNodes); if (out.getVersion().onOrAfter(Version.V_1_0_0)) { - out.writeBoolean(hasDiscoveredMaster); + out.writeBoolean(hasDiscoveredClusterManager); } out.writeByte(status.value()); out.writeVInt(indices.size()); @@ -274,8 +274,8 @@ public String toString() { + numberOfNodes + ", numberOfDataNodes=" + numberOfDataNodes - + ", hasDiscoveredMaster=" - + hasDiscoveredMaster + + ", hasDiscoveredClusterManager=" + + hasDiscoveredClusterManager + ", activeShards=" + activeShards + ", relocatingShards=" @@ -302,7 +302,7 @@ public boolean equals(Object o) { ClusterStateHealth that = (ClusterStateHealth) o; return numberOfNodes == that.numberOfNodes && numberOfDataNodes == that.numberOfDataNodes - && hasDiscoveredMaster == that.hasDiscoveredMaster + && hasDiscoveredClusterManager == that.hasDiscoveredClusterManager && activeShards == that.activeShards && relocatingShards == that.relocatingShards && activePrimaryShards == that.activePrimaryShards @@ -318,7 +318,7 @@ public int hashCode() { return Objects.hash( numberOfNodes, numberOfDataNodes, - hasDiscoveredMaster, + hasDiscoveredClusterManager, activeShards, relocatingShards, activePrimaryShards, diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java index 3db20fd3404a7..844dfe9c6c00f 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/health/ClusterHealthResponsesTests.java @@ -157,13 +157,13 @@ ClusterHealthResponse maybeSerialize(ClusterHealthResponse clusterHealth) throws return clusterHealth; } - public void testParseFromXContentWithDiscoveredMasterField() throws IOException { + public void testParseFromXContentWithDiscoveredClusterManagerField() throws IOException { try ( XContentParser parser = JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, "{\"cluster_name\":\"535799904437:7-1-3-node\",\"status\":\"green\"," - + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":true," + + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_cluster_manager\":true," + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," @@ -179,7 +179,7 @@ public void testParseFromXContentWithDiscoveredMasterField() throws IOException } } - public void testParseFromXContentWithoutDiscoveredMasterField() throws IOException { + public void testParseFromXContentWithoutDiscoveredClusterManagerField() throws IOException { try ( XContentParser parser = JsonXContent.jsonXContent.createParser( NamedXContentRegistry.EMPTY, @@ -200,6 +200,44 @@ public void testParseFromXContentWithoutDiscoveredMasterField() throws IOExcepti } } + /** + * Validate the ClusterHealthResponse can be parsed from JsonXContent that contains the deprecated "discovered_master" field. + * As of 2.0, to support inclusive language, "discovered_master" field will be replaced by "discovered_cluster_manager". + */ + public void testParseFromXContentWithDeprecatedDiscoveredMasterField() throws IOException { + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{\"cluster_name\":\"opensearch-cluster\",\"status\":\"green\",\"timed_out\":false," + + "\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_cluster_manager\":true,\"discovered_master\":true," + + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," + + "\"active_shards_percent_as_number\":100}" + ) + ) { + ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); + assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + } + + try ( + XContentParser parser = JsonXContent.jsonXContent.createParser( + NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + "{\"cluster_name\":\"opensearch-cluster\",\"status\":\"green\"," + + "\"timed_out\":false,\"number_of_nodes\":6,\"number_of_data_nodes\":3,\"discovered_master\":true," + + "\"active_primary_shards\":4,\"active_shards\":5,\"relocating_shards\":0,\"initializing_shards\":0," + + "\"unassigned_shards\":0,\"delayed_unassigned_shards\":0,\"number_of_pending_tasks\":0," + + "\"number_of_in_flight_fetch\":0,\"task_max_waiting_in_queue_millis\":0," + + "\"active_shards_percent_as_number\":100}" + ) + ) { + ClusterHealthResponse clusterHealth = ClusterHealthResponse.fromXContent(parser); + assertThat(clusterHealth.hasDiscoveredMaster(), Matchers.equalTo(true)); + } + } + @Override protected ClusterHealthResponse doParseInstance(XContentParser parser) { return ClusterHealthResponse.fromXContent(parser); From ff75307952eebee51a964f59321a5933283ac0eb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 May 2022 10:20:52 -0700 Subject: [PATCH 15/16] Bump hadoop-minicluster in /test/fixtures/hdfs-fixture (#3359) Bumps hadoop-minicluster from 3.3.2 to 3.3.3. --- updated-dependencies: - dependency-name: org.apache.hadoop:hadoop-minicluster dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- test/fixtures/hdfs-fixture/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/fixtures/hdfs-fixture/build.gradle b/test/fixtures/hdfs-fixture/build.gradle index 2ff444c03b123..0795cecaa36cc 100644 --- a/test/fixtures/hdfs-fixture/build.gradle +++ b/test/fixtures/hdfs-fixture/build.gradle @@ -33,7 +33,7 @@ apply plugin: 'opensearch.java' group = 'hdfs' dependencies { - api "org.apache.hadoop:hadoop-minicluster:3.3.2" + api "org.apache.hadoop:hadoop-minicluster:3.3.3" api "org.apache.commons:commons-compress:1.21" api "commons-codec:commons-codec:${versions.commonscodec}" api "org.apache.logging.log4j:log4j-core:${versions.log4j}" From 5320b680e79b104492372fdf79eeff8567159519 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 25 May 2022 10:21:20 -0700 Subject: [PATCH 16/16] Bump avro from 1.10.2 to 1.11.0 in /plugins/repository-hdfs (#3358) * Bump avro from 1.10.2 to 1.11.0 in /plugins/repository-hdfs Bumps avro from 1.10.2 to 1.11.0. --- updated-dependencies: - dependency-name: org.apache.avro:avro dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Updating SHAs Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: dependabot[bot] --- plugins/repository-hdfs/build.gradle | 2 +- plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 | 1 - plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 | 1 + 3 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 create mode 100644 plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 diff --git a/plugins/repository-hdfs/build.gradle b/plugins/repository-hdfs/build.gradle index cf51daec2b740..41c38b0b4e558 100644 --- a/plugins/repository-hdfs/build.gradle +++ b/plugins/repository-hdfs/build.gradle @@ -63,7 +63,7 @@ dependencies { api "org.apache.hadoop:hadoop-hdfs:${versions.hadoop3}" api 'org.apache.htrace:htrace-core4:4.2.0-incubating' api "org.apache.logging.log4j:log4j-core:${versions.log4j}" - api 'org.apache.avro:avro:1.10.2' + api 'org.apache.avro:avro:1.11.0' api "com.fasterxml.jackson.core:jackson-databind:${versions.jackson_databind}" api 'com.google.code.gson:gson:2.9.0' runtimeOnly 'com.google.guava:guava:31.1-jre' diff --git a/plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 deleted file mode 100644 index eae1c5116ff0f..0000000000000 --- a/plugins/repository-hdfs/licenses/avro-1.10.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -a65aaa91c1aeceb3dd4859dbb9765d1c2063f5a2 \ No newline at end of file diff --git a/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 new file mode 100644 index 0000000000000..9a0601879a1fc --- /dev/null +++ b/plugins/repository-hdfs/licenses/avro-1.11.0.jar.sha1 @@ -0,0 +1 @@ +2b0c58e5b450d4f4931456952ad9520cae9c896c \ No newline at end of file