diff --git a/.gitignore b/.gitignore
index 6314a160bb40f..4e3b5d8fb81eb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,6 +12,7 @@ target/
docs/html/
docs/build.log
/tmp/
+backwards/
## eclipse ignores (use 'mvn eclipse:eclipse' to build eclipse projects)
## The only configuration files which are not ignored are certain files in
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index ed7b86d406454..62dab6169368a 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -176,6 +176,24 @@ even if tests are passing.
mvn test -Dtests.output=always
------------------------------
+== Backwards Compatibility Tests
+
+Running backwards compatibility tests is disabled by default since it
+requires a release version of elasticsearch to be present on the test system.
+To run backwards compatibiilty tests untar or unzip a release and run the tests
+with the following command:
+
+---------------------------------------------------------------------------
+mvn test -Dtests.bwc=true -Dtests.bwc.version=x.y.z -Dtests.bwc.path=/path/to/elasticsearch
+---------------------------------------------------------------------------
+
+If the elasticsearch release is placed under `./backwards/elasticsearch-x.y.z` the path
+can be omitted:
+
+---------------------------------------------------------------------------
+mvn test -Dtests.bwc=true -Dtests.bwc.version=x.y.z
+---------------------------------------------------------------------------
+
== Testing the REST layer
The available integration tests make use of the java API to communicate with
@@ -213,3 +231,5 @@ cluster by specifying the `tests.cluster` property, which if present needs to co
comma separated list of nodes to connect to (e.g. localhost:9300). A transport client will
be created based on that and used for all the before|after test operations, and to extract
the http addresses of the nodes so that REST requests can be sent to them.
+
+
diff --git a/pom.xml b/pom.xml
index 266a32bd99ecf..e422be92fc5bd 100644
--- a/pom.xml
+++ b/pom.xml
@@ -36,6 +36,7 @@
trueonerror
+ ${project.basedir}/backwardsINFO512m5
@@ -468,6 +469,9 @@
.
+ ${tests.bwc}
+ ${tests.bwc.path}
+ ${tests.bwc.version}${tests.jvm.argline}${tests.processors}${tests.appendseed}
diff --git a/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java b/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
index 98b5b45dbd805..9e8e776430c37 100644
--- a/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
+++ b/src/main/java/org/elasticsearch/discovery/DiscoveryModule.java
@@ -36,6 +36,8 @@ public class DiscoveryModule extends AbstractModule implements SpawnModules {
private final Settings settings;
+ public static final String DISCOVERY_TYPE_KEY = "discovery.type";
+
public DiscoveryModule(Settings settings) {
this.settings = settings;
}
@@ -48,7 +50,7 @@ public Iterable extends Module> spawnModules() {
} else {
defaultDiscoveryModule = ZenDiscoveryModule.class;
}
- return ImmutableList.of(Modules.createModule(settings.getAsClass("discovery.type", defaultDiscoveryModule, "org.elasticsearch.discovery.", "DiscoveryModule"), settings));
+ return ImmutableList.of(Modules.createModule(settings.getAsClass(DISCOVERY_TYPE_KEY, defaultDiscoveryModule, "org.elasticsearch.discovery.", "DiscoveryModule"), settings));
}
@Override
diff --git a/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java b/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
index ad9addbfd8512..5b50169415528 100644
--- a/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
+++ b/src/test/java/org/apache/lucene/util/AbstractRandomizedTest.java
@@ -67,6 +67,32 @@
// NOTE: this class is in o.a.lucene.util since it uses some classes that are related
// to the test framework that didn't make sense to copy but are package private access
public abstract class AbstractRandomizedTest extends RandomizedTest {
+
+
+ /**
+ * Annotation for backwards compat tests
+ */
+ @Inherited
+ @Retention(RetentionPolicy.RUNTIME)
+ @Target(ElementType.TYPE)
+ @TestGroup(enabled = false, sysProperty = TESTS_BACKWARDS_COMPATIBILITY)
+ public @interface BackwardsCompatibilityTest {
+ }
+
+ /**
+ * Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
+ * via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ */
+ public static final String TESTS_BACKWARDS_COMPATIBILITY = "tests.bwc";
+
+ public static final String TESTS_BACKWARDS_COMPATIBILITY_VERSION = "tests.bwc.version";
+
+ /**
+ * Key used to set the path for the elasticsearch executable used to run backwards compatibility tests from
+ * via the commandline -D{@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ */
+ public static final String TESTS_BACKWARDS_COMPATIBILITY_PATH = "tests.bwc.path";
+
/**
* Annotation for integration tests
*/
diff --git a/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java b/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java
new file mode 100644
index 0000000000000..8e9ecd8fef851
--- /dev/null
+++ b/src/test/java/org/elasticsearch/bwcompat/BasicAnalysisBackwardCompatibilityTests.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.bwcompat;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.elasticsearch.action.admin.indices.analyze.AnalyzeResponse;
+import org.elasticsearch.indices.analysis.PreBuiltAnalyzers;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.elasticsearch.test.ElasticsearchIntegrationTest;
+import org.junit.Test;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Locale;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ */
+@ElasticsearchIntegrationTest.ClusterScope(numDataNodes = 0, scope = ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
+public class BasicAnalysisBackwardCompatibilityTests extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ /**
+ * Simple upgrade test for analyzers to make sure they analyze to the same tokens after upgrade
+ * TODO we need this for random tokenizers / tokenfilters as well
+ */
+ @Test
+ public void testAnalyzerTokensAfterUpgrade() throws IOException, ExecutionException, InterruptedException {
+ int numFields = randomIntBetween(PreBuiltAnalyzers.values().length, PreBuiltAnalyzers.values().length * 10);
+ StringBuilder builder = new StringBuilder();
+ String[] fields = new String[numFields * 2];
+ int fieldId = 0;
+ for (int i = 0; i < fields.length; i++) {
+ fields[i++] = "field_" + fieldId++;
+ String analyzer = RandomPicks.randomFrom(getRandom(), PreBuiltAnalyzers.values()).name().toLowerCase(Locale.ROOT);
+ fields[i] = "type=string,analyzer=" + analyzer;
+ }
+ assertAcked(prepareCreate("test")
+ .addMapping("type", fields)
+ .setSettings(indexSettings()));
+ ensureYellow();
+ InputOutput[] inout = new InputOutput[numFields];
+ for (int i = 0; i < numFields; i++) {
+ String input = randomRealisticUnicodeOfCodepointLengthBetween(1, 100);
+ AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", input).setField("field_" + i).get();
+ inout[i] = new InputOutput(test, input, "field_" + i);
+ }
+
+ logClusterState();
+ boolean upgraded;
+ do {
+ logClusterState();
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ } while (upgraded);
+
+ for (int i = 0; i < inout.length; i++) {
+ InputOutput inputOutput = inout[i];
+ AnalyzeResponse test = client().admin().indices().prepareAnalyze("test", inputOutput.input).setField(inputOutput.field).get();
+ List tokens = test.getTokens();
+ List expectedTokens = inputOutput.response.getTokens();
+ assertThat("size mismatch field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + inputOutput.input, expectedTokens.size(), equalTo(tokens.size()));
+ for (int j = 0; j < tokens.size(); j++) {
+ String msg = "failed for term: " + expectedTokens.get(j).getTerm() + " field: " + fields[i*2] + " analyzer: " + fields[i*2 + 1] + " input: " + inputOutput.input;
+ assertThat(msg, expectedTokens.get(j).getTerm(), equalTo(tokens.get(j).getTerm()));
+ assertThat(msg, expectedTokens.get(j).getPosition(), equalTo(tokens.get(j).getPosition()));
+ assertThat(msg, expectedTokens.get(j).getStartOffset(), equalTo(tokens.get(j).getStartOffset()));
+ assertThat(msg, expectedTokens.get(j).getEndOffset(), equalTo(tokens.get(j).getEndOffset()));
+ assertThat(msg, expectedTokens.get(j).getType(), equalTo(tokens.get(j).getType()));
+ }
+ }
+ }
+
+ private static final class InputOutput {
+ final AnalyzeResponse response;
+ final String input;
+ final String field;
+
+ public InputOutput(AnalyzeResponse response, String input, String field) {
+ this.response = response;
+ this.input = input;
+ this.field = field;
+ }
+
+
+ }
+}
diff --git a/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java b/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java
new file mode 100644
index 0000000000000..6385995ae41e4
--- /dev/null
+++ b/src/test/java/org/elasticsearch/bwcompat/BasicBackwardsCompatibilityTest.java
@@ -0,0 +1,266 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.bwcompat;
+
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import org.apache.lucene.util.English;
+import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
+import org.elasticsearch.action.count.CountResponse;
+import org.elasticsearch.action.get.GetResponse;
+import org.elasticsearch.action.index.IndexRequestBuilder;
+import org.elasticsearch.cluster.ClusterState;
+import org.elasticsearch.cluster.routing.IndexRoutingTable;
+import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
+import org.elasticsearch.cluster.routing.ShardRouting;
+import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
+import org.elasticsearch.common.regex.Regex;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.index.VersionType;
+import org.elasticsearch.index.query.QueryBuilders;
+import org.elasticsearch.test.ElasticsearchBackwardsCompatIntegrationTest;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ExecutionException;
+
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
+import static org.hamcrest.Matchers.equalTo;
+import static org.hamcrest.Matchers.is;
+
+/**
+ */
+public class BasicBackwardsCompatibilityTest extends ElasticsearchBackwardsCompatIntegrationTest {
+
+ /**
+ * Basic test using Index & Realtime Get with external versioning. This test ensures routing works correctly across versions.
+ */
+ @Test
+ public void testExternalVersion() throws Exception {
+ createIndex("test");
+ final boolean routing = randomBoolean();
+ int numDocs = randomIntBetween(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
+ client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(1).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
+ GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
+ assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(1l));
+ client().prepareIndex("test", "type1", id).setRouting(routingKey).setVersion(2).setVersionType(VersionType.EXTERNAL).setSource("field1", English.intToEnglish(i)).get();
+ get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
+ assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(2l));
+ }
+ }
+
+ /**
+ * Basic test using Index & Realtime Get with internal versioning. This test ensures routing works correctly across versions.
+ */
+ @Test
+ public void testInternalVersion() throws Exception {
+ createIndex("test");
+ final boolean routing = randomBoolean();
+ int numDocs = randomIntBetween(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ String routingKey = routing ? randomRealisticUnicodeOfLength(10) : null;
+ String id = Integer.toString(i);
+ assertThat(id, client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).get().isCreated(), is(true));
+ GetResponse get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
+ assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(1l));
+ client().prepareIndex("test", "type1", id).setRouting(routingKey).setSource("field1", English.intToEnglish(i)).execute().actionGet();
+ get = client().prepareGet("test", "type1", id).setRouting(routingKey).get();
+ assertThat("Document with ID " +id + " should exist but doesn't", get.isExists(), is(true));
+ assertThat(get.getVersion(), equalTo(2l));
+ }
+ }
+
+ /**
+ * Very basic bw compat test with a mixed version cluster random indexing and lookup by ID via term query
+ */
+ @Test
+ public void testIndexAndSearch() throws Exception {
+ createIndex("test");
+ int numDocs = randomIntBetween(10, 20);
+ List builder = new ArrayList<>();
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ builder.add(client().prepareIndex("test", "type1", id).setSource("field1", English.intToEnglish(i), "the_id", id));
+ }
+ indexRandom(true, builder);
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ assertHitCount(client().prepareSearch().setQuery(QueryBuilders.termQuery("the_id", id)).get(), 1);
+ }
+ }
+
+ @Test
+ public void testRecoverFromPreviousVersion() throws ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
+ ensureYellow();
+ assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", randomRealisticUnicodeOfLength(10) + String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+ indexRandom(true, docs);
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ backwardsCluster().allowOnlyNewNodes("test");
+ ensureYellow("test");// move all shards to the new node
+ final int numIters = randomIntBetween(10, 20);
+ for (int i = 0; i < numIters; i++) {
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+ }
+
+ /**
+ * Test that ensures that we will never recover from a newer to an older version (we are not forward compatible)
+ */
+ @Test
+ public void testNoRecoveryFromNewNodes() throws ExecutionException, InterruptedException {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().backwardsNodePattern()).put(indexSettings())));
+ if (backwardsCluster().numNewDataNodes() == 0) {
+ backwardsCluster().startNewNode();
+ }
+ ensureYellow();
+ assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
+ if (randomBoolean()) {
+ backwardsCluster().allowOnAllNodes("test");
+ }
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", randomRealisticUnicodeOfLength(10) + String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+ indexRandom(true, docs);
+ backwardsCluster().allowOnAllNodes("test");
+ while(ensureYellow() != ClusterHealthStatus.GREEN) {
+ backwardsCluster().startNewNode();
+ }
+ assertAllShardsOnNodes("test", backwardsCluster().newNodePattern());
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ final int numIters = randomIntBetween(10, 20);
+ for (int i = 0; i < numIters; i++) {
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+ }
+
+ public void assertAllShardsOnNodes(String index, String pattern) {
+ ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
+ for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
+ for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
+ for (ShardRouting shardRouting : indexShardRoutingTable) {
+ if (shardRouting.currentNodeId() != null && index.equals(shardRouting.getIndex())) {
+ String name = clusterState.nodes().get(shardRouting.currentNodeId()).name();
+ assertThat("Allocated on new node: " + name, Regex.simpleMatch(pattern, name), is(true));
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Upgrades a single node to the current version
+ */
+ @Test
+ public void testIndexUpgradeSingleNode() throws Exception {
+ assertAcked(prepareCreate("test").setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+
+ indexRandom(true, docs);
+ assertAllShardsOnNodes("test", backwardsCluster().backwardsNodePattern());
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ backwardsCluster().allowOnAllNodes("test");
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ backwardsCluster().upgradeOneNode();
+ ensureYellow("test");
+ if (randomBoolean()) {
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex("test", "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+ indexRandom(true, docs);
+ }
+ client().admin().indices().prepareUpdateSettings("test").setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+ final int numIters = randomIntBetween(10, 20);
+ for (int i = 0; i < numIters; i++) {
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+ }
+
+ /**
+ * Test that allocates an index on one or more old nodes and then do a rolling upgrade
+ * one node after another is shut down and restarted from a newer version and we verify
+ * that all documents are still around after each nodes upgrade.
+ */
+ @Test
+ public void testIndexRollingUpgrade() throws Exception {
+ String[] indices = new String[randomIntBetween(1,3)];
+ for (int i = 0; i < indices.length; i++) {
+ indices[i] = "test" + i;
+ assertAcked(prepareCreate(indices[i]).setSettings(ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsCluster().newNodePattern()).put(indexSettings())));
+ }
+
+
+ int numDocs = randomIntBetween(100, 150);
+ IndexRequestBuilder[] docs = new IndexRequestBuilder[numDocs];
+ String[] indexForDoc = new String[docs.length];
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex(indexForDoc[i] = RandomPicks.randomFrom(getRandom(), indices), "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+ indexRandom(true, docs);
+ for (int i = 0; i < indices.length; i++) {
+ assertAllShardsOnNodes(indices[i], backwardsCluster().backwardsNodePattern());
+ }
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "none")).get();
+ backwardsCluster().allowOnAllNodes(indices);
+ logClusterState();
+ boolean upgraded;
+ do {
+ logClusterState();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ upgraded = backwardsCluster().upgradeOneNode();
+ ensureYellow();
+ countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ for (int i = 0; i < numDocs; i++) {
+ docs[i] = client().prepareIndex(indexForDoc[i], "type1", String.valueOf(i)).setSource("field1", English.intToEnglish(i));
+ }
+ indexRandom(true, docs);
+ } while (upgraded);
+ client().admin().indices().prepareUpdateSettings(indices).setSettings(ImmutableSettings.builder().put(EnableAllocationDecider.INDEX_ROUTING_ALLOCATION_ENABLE, "all")).get();
+ CountResponse countResponse = client().prepareCount().get();
+ assertHitCount(countResponse, numDocs);
+ }
+
+
+}
diff --git a/src/test/java/org/elasticsearch/get/GetActionTests.java b/src/test/java/org/elasticsearch/get/GetActionTests.java
index 16f1e84214c91..c27d2722987e8 100644
--- a/src/test/java/org/elasticsearch/get/GetActionTests.java
+++ b/src/test/java/org/elasticsearch/get/GetActionTests.java
@@ -19,6 +19,7 @@
package org.elasticsearch.get;
+import org.apache.lucene.util.English;
import org.elasticsearch.ElasticsearchIllegalArgumentException;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
import org.elasticsearch.action.admin.cluster.health.ClusterHealthStatus;
@@ -33,6 +34,7 @@
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.VersionConflictEngineException;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.junit.Test;
@@ -156,11 +158,6 @@ public void simpleGetTests() {
@Test
public void simpleMultiGetTests() throws Exception {
- try {
- client().admin().indices().prepareDelete("test").execute().actionGet();
- } catch (Exception e) {
- // fine
- }
client().admin().indices().prepareCreate("test").setSettings(ImmutableSettings.settingsBuilder().put("index.refresh_interval", -1)).execute().actionGet();
ensureGreen();
diff --git a/src/test/java/org/elasticsearch/test/CompositeTestCluster.java b/src/test/java/org/elasticsearch/test/CompositeTestCluster.java
new file mode 100644
index 0000000000000..8f2bd0aceb985
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/CompositeTestCluster.java
@@ -0,0 +1,235 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.carrotsearch.ant.tasks.junit4.dependencies.com.google.common.collect.Iterators;
+import com.carrotsearch.randomizedtesting.generators.RandomPicks;
+import com.google.common.base.Predicate;
+import com.google.common.collect.Collections2;
+import org.apache.lucene.util.IOUtils;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.internal.InternalClient;
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.test.client.FilterClient;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Random;
+
+/**
+ * A test cluster implementation that holds a fixed set of external nodes as well as a TestCluster
+ * which is used to run mixed version clusters in tests like backwards compatibility tests.
+ * Note: this is an experimental API
+ */
+public class CompositeTestCluster extends ImmutableTestCluster {
+ private final TestCluster cluster;
+ private final ExternalNode[] externalNodes;
+ private final ExternalClient client = new ExternalClient();
+ private static final String NODE_PREFIX = "external_";
+
+ public CompositeTestCluster(TestCluster cluster, int numExternalNodes, ExternalNode externalNode) throws IOException {
+ this.cluster = cluster;
+ this.externalNodes = new ExternalNode[numExternalNodes];
+ for (int i = 0; i < externalNodes.length; i++) {
+ externalNodes[i] = externalNode;
+ }
+ }
+
+ @Override
+ public synchronized void afterTest() {
+ cluster.afterTest();
+ }
+
+ @Override
+ public synchronized void beforeTest(Random random, double transportClientRatio) throws IOException {
+ super.beforeTest(random, transportClientRatio);
+ cluster.beforeTest(random, transportClientRatio);
+ Settings defaultSettings = cluster.getDefaultSettings();
+ final Client client = cluster.size() > 0 ? cluster.client() : cluster.clientNodeClient();
+ for (int i = 0; i < externalNodes.length; i++) {
+ if (!externalNodes[i].running()) {
+ try {
+ externalNodes[i] = externalNodes[i].start(client, defaultSettings, NODE_PREFIX + i, cluster.getClusterName());
+ } catch (InterruptedException e) {
+ Thread.interrupted();
+ return;
+ }
+ }
+ externalNodes[i].reset(random.nextLong());
+ }
+ }
+
+ private Collection runningNodes() {
+ return Collections2.filter(Arrays.asList(externalNodes), new Predicate() {
+ @Override
+ public boolean apply(ExternalNode input) {
+ return input.running();
+ }
+ });
+ }
+
+ /**
+ * Upgrades one external running node to a node from the version running the tests. Commonly this is used
+ * to move from a node with version N-1 to a node running version N. This works seamless since they will
+ * share the same data directory. This method will return true iff a node got upgraded otherwise if no
+ * external node is running it returns false
+ */
+ public synchronized boolean upgradeOneNode() throws InterruptedException, IOException {
+ return upgradeOneNode(ImmutableSettings.EMPTY);
+ }
+
+ /**
+ * Upgrades one external running node to a node from the version running the tests. Commonly this is used
+ * to move from a node with version N-1 to a node running version N. This works seamless since they will
+ * share the same data directory. This method will return true iff a node got upgraded otherwise if no
+ * external node is running it returns false
+ */
+ public synchronized boolean upgradeOneNode(Settings nodeSettings) throws InterruptedException, IOException {
+ Collection runningNodes = runningNodes();
+ if (!runningNodes.isEmpty()) {
+ ExternalNode externalNode = RandomPicks.randomFrom(random, runningNodes);
+ externalNode.stop();
+ String s = cluster.startNode(nodeSettings);
+ ExternalNode.waitForNode(cluster.client(), s);
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Returns the a simple pattern that matches all "new" nodes in the cluster.
+ */
+ public String newNodePattern() {
+ return cluster.nodePrefix() + "*";
+ }
+
+ /**
+ * Returns the a simple pattern that matches all "old" / "backwardss" nodes in the cluster.
+ */
+ public String backwardsNodePattern() {
+ return NODE_PREFIX + "*";
+ }
+
+ /**
+ * Allows allocation of shards of the given indices on all nodes in the cluster.
+ */
+ public void allowOnAllNodes(String... index) {
+ Settings build = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", "").build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+
+ /**
+ * Allows allocation of shards of the given indices only on "new" nodes in the cluster.
+ * Note: if a shard is allocated on an "old" node and can't be allocated on a "new" node it will only be removed it can
+ * be allocated on some other "new" node.
+ */
+ public void allowOnlyNewNodes(String... index) {
+ Settings build = ImmutableSettings.builder().put("index.routing.allocation.exclude._name", backwardsNodePattern()).build();
+ client().admin().indices().prepareUpdateSettings(index).setSettings(build).execute().actionGet();
+ }
+
+ /**
+ * Starts a current version data node
+ */
+ public void startNewNode() {
+ cluster.startNode();
+ }
+
+
+ @Override
+ public synchronized Client client() {
+ return client;
+ }
+
+ @Override
+ public synchronized int size() {
+ return runningNodes().size() + cluster.size();
+ }
+
+ @Override
+ public int numDataNodes() {
+ return runningNodes().size() + cluster.numDataNodes();
+ }
+
+ @Override
+ public int numBenchNodes() {
+ return cluster.numBenchNodes();
+ }
+
+ @Override
+ public InetSocketAddress[] httpAddresses() {
+ return cluster.httpAddresses();
+ }
+
+ @Override
+ public void close() throws IOException {
+ try {
+ IOUtils.close(externalNodes);
+ } finally {
+ IOUtils.close(cluster);
+ }
+ }
+
+ @Override
+ public boolean hasFilterCache() {
+ return true;
+ }
+
+ @Override
+ public synchronized Iterator iterator() {
+ return Iterators.singletonIterator(client());
+ }
+
+ /**
+ * Returns the number of current version data nodes in the cluster
+ */
+ public int numNewDataNodes() {
+ return cluster.numDataNodes();
+ }
+
+ /**
+ * Returns the number of former version data nodes in the cluster
+ */
+ public int numBackwardsDataNodes() {
+ return runningNodes().size();
+ }
+
+ private synchronized InternalClient internalClient() {
+ Collection externalNodes = runningNodes();
+ return random.nextBoolean() && !externalNodes.isEmpty() ? (InternalClient) RandomPicks.randomFrom(random, externalNodes).getClient() : (InternalClient) cluster.client();
+ }
+
+ private final class ExternalClient extends FilterClient {
+
+ public ExternalClient() {
+ super(null);
+ }
+
+ @Override
+ protected InternalClient delegate() {
+ return internalClient();
+ }
+
+ }
+
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java
new file mode 100644
index 0000000000000..0ac4d35256a5e
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchBackwardsCompatIntegrationTest.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import org.elasticsearch.common.settings.ImmutableSettings;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.discovery.DiscoveryModule;
+import org.elasticsearch.discovery.zen.ZenDiscoveryModule;
+import org.elasticsearch.transport.TransportModule;
+import org.elasticsearch.transport.TransportService;
+import org.elasticsearch.transport.netty.NettyTransportModule;
+import org.junit.Ignore;
+
+import java.io.File;
+import java.io.IOException;
+
+/**
+ * Abstract base class for backwards compatibility tests. Subclasses of this class
+ * can run tests against a mixed version cluster. A subset of the nodes in the cluster
+ * are started in dedicated process running off a full fledged elasticsearch release.
+ * Nodes can be "upgraded" from the "backwards" node to an "new" node where "new" nodes
+ * version corresponds to current version.
+ * The purpose of this test class is to run tests in scenarios where clusters are in an
+ * intermediate state during a rolling upgrade as well as upgrade situations. The clients
+ * accessed via #client() are random clients to the nodes in the cluster which might
+ * execute requests on the "new" as well as the "old" nodes.
+ *
+ * Note: this base class is still experimental and might have bugs or leave external processes running behind.
+ *
+ * Backwards compatibility tests are disabled by default via {@link BackwardsCompatibilityTest} annotation.
+ * The following system variables control the test execution:
+ *
+ * {@value #TESTS_BACKWARDS_COMPATIBILITY_VERSION}
+ * sets the version to run the external nodes from formatted as X.Y.Z.
+ * The tests class will try to locate a release folder elasticsearch-X.Y.Z
+ * within path passed via {@value #TESTS_BACKWARDS_COMPATIBILITY_PATH}
+ * depending on this system variable.
+ *
+ *
+ * {@value #TESTS_BACKWARDS_COMPATIBILITY_PATH} the path to the
+ * elasticsearch releases to run backwards compatibility tests against.
+ *
+ *
+ *
+ */
+// the transportClientRatio is tricky here since we don't fully control the cluster nodes
+@ElasticsearchBackwardsCompatIntegrationTest.BackwardsCompatibilityTest
+@ElasticsearchIntegrationTest.ClusterScope(minNumDataNodes = 0, maxNumDataNodes = 2, scope = ElasticsearchIntegrationTest.Scope.SUITE, numClientNodes = 0, transportClientRatio = 0.0)
+@Ignore
+public abstract class ElasticsearchBackwardsCompatIntegrationTest extends ElasticsearchIntegrationTest {
+
+ private static File backwardsCompatibilityPath() {
+ String path = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_PATH);
+ String version = System.getProperty(TESTS_BACKWARDS_COMPATIBILITY_VERSION);
+ if (path == null || path.isEmpty() || version == null || version.isEmpty()) {
+ throw new IllegalArgumentException("Invalid Backwards tests location path:" + path + " version: " + version);
+ }
+ File file = new File(path, "elasticsearch-" + version);
+ if (!file.isDirectory()) {
+ throw new IllegalArgumentException("Backwards tests location is not a directory: " + file.getAbsolutePath());
+ }
+ return file;
+ }
+
+ public CompositeTestCluster backwardsCluster() {
+ return (CompositeTestCluster) immutableCluster();
+ }
+
+ protected ImmutableTestCluster buildTestCluster(Scope scope) throws IOException {
+ ImmutableTestCluster cluster = super.buildTestCluster(scope);
+ return new CompositeTestCluster((TestCluster) cluster, between(minExternalNodes(), maxExternalNodes()), new ExternalNode(backwardsCompatibilityPath(), randomLong()));
+ }
+
+ protected int minExternalNodes() {
+ return 1;
+ }
+
+ protected int maxExternalNodes() {
+ return 2;
+ }
+
+ @Override
+ protected int maximumNumberOfReplicas() {
+ return 1;
+ }
+
+ protected Settings nodeSettings(int nodeOrdinal) {
+ return ImmutableSettings.builder()
+ .put(TransportModule.TRANSPORT_TYPE_KEY, NettyTransportModule.class) // run same transport / disco as external
+ .put(DiscoveryModule.DISCOVERY_TYPE_KEY, ZenDiscoveryModule.class)
+ .put("gateway.type", "local") // we require local gateway to mimic upgrades of nodes
+ .put("discovery.type", "zen") // zen is needed since we start external nodes
+ .put(TransportModule.TRANSPORT_SERVICE_TYPE_KEY, TransportService.class.getName())
+ .build();
+ }
+}
diff --git a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
index 5e8ebfe870b5a..873355ff377e6 100644
--- a/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
+++ b/src/test/java/org/elasticsearch/test/ElasticsearchIntegrationTest.java
@@ -27,6 +27,7 @@
import com.google.common.collect.Lists;
import org.apache.lucene.store.StoreRateLimiting;
import org.apache.lucene.util.AbstractRandomizedTest;
+import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
@@ -100,9 +101,7 @@
import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.TestCluster.clusterName;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
-import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
+import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.*;
import static org.hamcrest.Matchers.emptyIterable;
import static org.hamcrest.Matchers.equalTo;
@@ -489,11 +488,9 @@ public ImmutableTestCluster buildAndPutCluster(Scope currentClusterScope, boolea
return testCluster;
}
- private void clearClusters() throws IOException {
+ private static void clearClusters() throws IOException {
if (!clusters.isEmpty()) {
- for (ImmutableTestCluster cluster : clusters.values()) {
- cluster.close();
- }
+ IOUtils.close(clusters.values());
clusters.clear();
}
}
@@ -595,7 +592,7 @@ protected int minimumNumberOfReplicas() {
}
protected int maximumNumberOfReplicas() {
- return immutableCluster().numDataNodes() - 1;
+ return Math.max(0, immutableCluster().numDataNodes() - 1);
}
protected int numberOfReplicas() {
@@ -845,7 +842,15 @@ public ClusterHealthStatus ensureYellow(String... indices) {
return actionGet.getStatus();
}
- private void ensureClusterSizeConsistency() {
+ /**
+ * Prints the current cluster state as info logging.
+ */
+ public void logClusterState() {
+ logger.debug("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState().prettyPrint(), client().admin().cluster().preparePendingClusterTasks().get().prettyPrint());
+ }
+
+ void ensureClusterSizeConsistency() {
+ logger.trace("Check consistency for [{}] nodes", immutableCluster().size());
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(immutableCluster().size())).get());
}
@@ -1307,7 +1312,7 @@ protected Settings nodeSettings(int nodeOrdinal) {
return ImmutableSettings.EMPTY;
}
- private TestCluster buildTestCluster(Scope scope) {
+ protected ImmutableTestCluster buildTestCluster(Scope scope) throws IOException {
long currentClusterSeed = randomLong();
NodeSettingsSource nodeSettingsSource = new NodeSettingsSource() {
@@ -1426,6 +1431,8 @@ public static void afterClass() throws IOException {
} finally {
INSTANCE = null;
}
+ } else {
+ clearClusters();
}
}
diff --git a/src/test/java/org/elasticsearch/test/ExternalNode.java b/src/test/java/org/elasticsearch/test/ExternalNode.java
new file mode 100644
index 0000000000000..8930b36bf9764
--- /dev/null
+++ b/src/test/java/org/elasticsearch/test/ExternalNode.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.elasticsearch.test;
+
+import com.google.common.base.Predicate;
+import org.apache.lucene.util.Constants;
+import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
+import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.client.transport.TransportClient;
+import org.elasticsearch.cluster.ClusterName;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.transport.TransportAddress;
+
+import java.io.Closeable;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+
+import static org.elasticsearch.common.settings.ImmutableSettings.settingsBuilder;
+
+/**
+ * Simple helper class to start external nodes to be used within a test cluster
+ */
+final class ExternalNode implements Closeable {
+
+ private final File path;
+ private final Random random;
+ private Process process;
+ private NodeInfo nodeInfo;
+ private final String clusterName;
+ private TransportClient client;
+
+ ExternalNode(File path, long seed) {
+ this(path, null, seed);
+ }
+
+ ExternalNode(File path, String clusterName, long seed) {
+ if (!path.isDirectory()) {
+ throw new IllegalArgumentException("path must be a directory");
+ }
+ this.path = path;
+ this.clusterName = clusterName;
+ this.random = new Random(seed);
+ }
+
+
+ synchronized ExternalNode start(Client localNode, Settings settings, String nodeName, String clusterName) throws IOException, InterruptedException {
+ ExternalNode externalNode = new ExternalNode(path, clusterName, random.nextLong());
+ externalNode.startInternal(localNode, settings, nodeName, clusterName);
+ return externalNode;
+ }
+
+ synchronized void startInternal(Client client, Settings settings, String nodeName, String clusterName) throws IOException, InterruptedException {
+ if (process != null) {
+ throw new IllegalStateException("Already started");
+ }
+ List params = new ArrayList<>();
+
+ if (!Constants.WINDOWS) {
+ params.add("bin/elasticsearch");
+ } else {
+ params.add("bin/elasticsearch.bat");
+ }
+ params.add("-Des.cluster.name=" + clusterName);
+ params.add("-Des.node.name=" + nodeName);
+ for (Map.Entry entry : settings.getAsMap().entrySet()) {
+ switch (entry.getKey()) {
+ case "cluster.name":
+ case "node.name":
+ case "path.home":
+ case "node.mode":
+ case "gateway.type":
+ continue;
+ default:
+ params.add("-Des." + entry.getKey() + "=" + entry.getValue());
+
+ }
+ }
+
+ params.add("-Des.gateway.type=local");
+ params.add("-Des.path.home=" + new File("").getAbsolutePath());
+ ProcessBuilder builder = new ProcessBuilder(params);
+ builder.directory(path);
+ builder.inheritIO();
+ boolean success = false;
+ try {
+ process = builder.start();
+ this.nodeInfo = null;
+ if (waitForNode(client, nodeName)) {
+ nodeInfo = nodeInfo(client, nodeName);
+ assert nodeInfo != null;
+ } else {
+ throw new IllegalStateException("Node [" + nodeName + "] didn't join the cluster");
+ }
+ success = true;
+ } finally {
+ if (!success) {
+ stop();
+ }
+ }
+ }
+
+ static boolean waitForNode(final Client client, final String name) throws InterruptedException {
+ return ElasticsearchTestCase.awaitBusy(new Predicate