Skip to content

Fix trimUnsafeCommits for indices created before 6.2 #57187

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -348,6 +348,11 @@ public void goToNextVersion() {
writeUnicastHostsFiles();
}

public void fullRestart() {
stop(false);
start();
}

public void nextNodeToNextVersion() {
if (nodeIndex + 1 > nodes.size()) {
throw new TestClustersException("Ran out of nodes to take to the next version");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -653,24 +653,6 @@ static String toStr(Response response) throws IOException {
return EntityUtils.toString(response.getEntity());
}

static void assertNoFailures(Map<?, ?> response) {
int failed = (int) XContentMapValues.extractValue("_shards.failed", response);
assertEquals(0, failed);
}

void assertTotalHits(int expectedTotalHits, Map<?, ?> response) {
int actualTotalHits = extractTotalHits(response);
assertEquals(response.toString(), expectedTotalHits, actualTotalHits);
}

int extractTotalHits(Map<?, ?> response) {
if (isRunningAgainstOldCluster() && getOldClusterVersion().before(Version.V_7_0_0)) {
return (Integer) XContentMapValues.extractValue("hits.total", response);
} else {
return (Integer) XContentMapValues.extractValue("hits.total.value", response);
}
}

/**
* Tests that a single document survives. Super basic smoke test.
*/
Expand Down Expand Up @@ -708,6 +690,12 @@ public void testEmptyShard() throws IOException {
// before timing out
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
.put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0"); // fail faster
if (getOldClusterVersion().onOrAfter(Version.V_6_5_0)) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
if (randomBoolean()) {
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1");
}
createIndex(index, settings.build());
}
ensureGreen(index);
Expand Down Expand Up @@ -1429,4 +1417,43 @@ public void testTurnOffTranslogRetentionAfterUpgraded() throws Exception {
ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index);
}
}

public void testRecoveryWithTranslogRetentionDisabled() throws Exception {
if (isRunningAgainstOldCluster()) {
final Settings.Builder settings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1);
if (getOldClusterVersion().onOrAfter(Version.V_6_5_0)) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
if (randomBoolean()) {
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1");
}
if (randomBoolean()) {
settings.put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), "1kb");
}
createIndex(index, settings.build());
ensureGreen(index);
int numDocs = randomIntBetween(0, 100);
for (int i = 0; i < numDocs; i++) {
indexDocument(Integer.toString(i));
if (rarely()) {
flush(index, randomBoolean());
}
}
client().performRequest(new Request("POST", "/" + index + "/_refresh"));
if (randomBoolean()) {
ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index);
}
if (randomBoolean()) {
flush(index, randomBoolean());
} else if (randomBoolean()) {
performSyncedFlush(index);
}
saveInfoDocument("doc_count", Integer.toString(numDocs));
}
ensureGreen(index);
final int numDocs = Integer.parseInt(loadInfoDocument("doc_count"));
assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
}
}
101 changes: 101 additions & 0 deletions qa/translog-policy/build.gradle
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/


import org.elasticsearch.gradle.Version
import org.elasticsearch.gradle.info.BuildParams
import org.elasticsearch.gradle.testclusters.RestTestRunnerTask
import org.elasticsearch.gradle.testclusters.TestDistribution

apply plugin: 'elasticsearch.testclusters'
apply plugin: 'elasticsearch.standalone-test'
apply from : "$rootDir/gradle/bwc-test.gradle"

for (Version bwcVersion : BuildParams.bwcVersions.indexCompatible) {
String baseName = "v${bwcVersion}"

testClusters {
"${baseName}" {
versions = [bwcVersion.toString(), project.version]
numberOfNodes = 2
setting 'http.content_type.required', 'true'
}
}

tasks.register("${baseName}#Step1OldClusterTest", RestTestRunnerTask) {
useCluster testClusters."${baseName}"
mustRunAfter(precommit)
systemProperty 'tests.test_step', 'step1'
systemProperty 'tests.is_old_cluster', 'true'
}

tasks.register("${baseName}#Step2OldClusterTest", RestTestRunnerTask) {
useCluster testClusters."${baseName}"
dependsOn "${baseName}#Step1OldClusterTest"
doFirst {
testClusters."${baseName}".fullRestart()
}
systemProperty 'tests.test_step', 'step2'
systemProperty 'tests.is_old_cluster', 'true'
}

tasks.register("${baseName}#Step3NewClusterTest", RestTestRunnerTask) {
useCluster testClusters."${baseName}"
dependsOn "${baseName}#Step2OldClusterTest"
doFirst {
testClusters."${baseName}".goToNextVersion()
}
systemProperty 'tests.test_step', 'step3'
systemProperty 'tests.is_old_cluster', 'false'
}

tasks.register("${baseName}#Step4NewClusterTest", RestTestRunnerTask) {
useCluster testClusters."${baseName}"
dependsOn "${baseName}#Step3NewClusterTest"
doFirst {
testClusters."${baseName}".fullRestart()
}
systemProperty 'tests.test_step', 'step4'
systemProperty 'tests.is_old_cluster', 'false'
}

String oldVersion = bwcVersion.toString().minus("-SNAPSHOT")
tasks.matching { it.name.startsWith(baseName) && it.name.endsWith("ClusterTest") }.configureEach {
it.systemProperty 'tests.old_cluster_version', oldVersion
it.nonInputProperties.systemProperty('tests.rest.cluster', "${-> testClusters."${baseName}".allHttpSocketURI.join(",")}")
it.nonInputProperties.systemProperty('tests.clustername', "${-> testClusters."${baseName}".getName()}")
}

tasks.register(bwcTaskName(bwcVersion)) {
dependsOn tasks.named("${baseName}#Step4NewClusterTest")
}
}

configurations {
testArtifacts.extendsFrom testRuntime
}

task testJar(type: Jar) {
appendix 'test'
from sourceSets.test.output
}

artifacts {
testArtifacts testJar
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.elasticsearch.upgrades;

import org.elasticsearch.Version;
import org.elasticsearch.client.Request;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.json.JsonXContent;
import org.elasticsearch.index.IndexSettings;
import org.junit.Before;

import java.io.IOException;
import java.util.Locale;

/**
* Ensures that we correctly trim unsafe commits when migrating from a translog generation to the sequence number based policy.
* See https://github.com/elastic/elasticsearch/issues/57091
*/
public class TranslogPolicyIT extends AbstractFullClusterRestartTestCase {

private enum TestStep {
STEP1_OLD_CLUSTER("step1"),
STEP2_OLD_CLUSTER("step2"),
STEP3_NEW_CLUSTER("step3"),
STEP4_NEW_CLUSTER("step4");

private final String name;

TestStep(String name) {
this.name = name;
}

@Override
public String toString() {
return name;
}

public static TestStep parse(String value) {
switch (value) {
case "step1":
return STEP1_OLD_CLUSTER;
case "step2":
return STEP2_OLD_CLUSTER;
case "step3":
return STEP3_NEW_CLUSTER;
case "step4":
return STEP4_NEW_CLUSTER;
default:
throw new AssertionError("unknown test step: " + value);
}
}
}

protected static final TestStep TEST_STEP = TestStep.parse(System.getProperty("tests.test_step"));

private String index;
private String type;

@Before
public void setIndex() {
index = getTestName().toLowerCase(Locale.ROOT);
}

@Before
public void setType() {
type = getOldClusterVersion().before(Version.V_6_7_0) ? "doc" : "_doc";
}

public void testEmptyIndex() throws Exception {
if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER) {
final Settings.Builder settings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, between(0, 1));
if (getOldClusterVersion().onOrAfter(Version.V_6_5_0)) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
if (randomBoolean()) {
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1");
}
createIndex(index, settings.build());
}
ensureGreen(index);
assertTotalHits(0, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
}

public void testRecoverReplica() throws Exception {
int numDocs = 100;
if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER) {
final Settings.Builder settings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1);
if (getOldClusterVersion().onOrAfter(Version.V_6_5_0)) {
settings.put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), randomBoolean());
}
if (randomBoolean()) {
settings.put(IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getKey(), "-1");
}
if (randomBoolean()) {
settings.put(IndexSettings.INDEX_TRANSLOG_GENERATION_THRESHOLD_SIZE_SETTING.getKey(), "1kb");
}
createIndex(index, settings.build());
ensureGreen(index);
for (int i = 0; i < numDocs; i++) {
indexDocument(Integer.toString(i));
if (rarely()) {
flush(index, randomBoolean());
}
}
client().performRequest(new Request("POST", "/" + index + "/_refresh"));
if (randomBoolean()) {
ensurePeerRecoveryRetentionLeasesRenewedAndSynced(index);
}
if (randomBoolean()) {
flush(index, randomBoolean());
} else if (randomBoolean()) {
performSyncedFlush(index);
}
}
ensureGreen(index);
assertTotalHits(100, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
}

private void indexDocument(String id) throws IOException {
final Request indexRequest = new Request("POST", "/" + index + "/" + type + "/" + id);
indexRequest.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("f", "v").endObject()));
assertOK(client().performRequest(indexRequest));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ synchronized boolean releaseCommit(final IndexCommit snapshotCommit) {
*/
public static IndexCommit findSafeCommitPoint(List<IndexCommit> commits, long globalCheckpoint) throws IOException {
if (commits.isEmpty()) {
throw new IllegalArgumentException("Commit list must not empty");
throw new IllegalArgumentException("Commit list must not be empty");
}
final int keptPosition = indexOfKeptCommits(commits, globalCheckpoint);
return commits.get(keptPosition);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1411,14 +1411,16 @@ public long recoverLocallyUpToGlobalCheckpoint() {
logger.debug("skip local recovery as failed to find the safe commit", e);
return UNASSIGNED_SEQ_NO;
}
if (safeCommit.isPresent() == false) {
logger.trace("skip local recovery as no safe commit found");
return UNASSIGNED_SEQ_NO;
}
assert safeCommit.get().localCheckpoint <= globalCheckpoint : safeCommit.get().localCheckpoint + " > " + globalCheckpoint;
try {
maybeCheckIndex(); // check index here and won't do it again if ops-based recovery occurs
recoveryState.setStage(RecoveryState.Stage.TRANSLOG);
if (safeCommit.isPresent() == false) {
assert globalCheckpoint == UNASSIGNED_SEQ_NO || indexSettings.getIndexVersionCreated().before(Version.V_6_2_0) :
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The new test found this issue where the index is synced flush, but the global checkpoint is still unassigned.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure I understand what issue is being addressed here. How is moving this condition further down (after maybeCheckIndex) helping?
Is the issue that we have not properly moved to the translog stage?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, which part of the new tests show this issue, and is it something that can also be triggered with a single restart?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry, I should have explained better.

Is the issue that we have not properly moved to the translog stage?

That's correct. Previously, we do not move the recovery stage from INDEX to TRANSLOG if we don't have the safe commit, which can be the case if the index was created before 6.2 or the global checkpoint is still unassigned. Here we expect a file-based recovery to happen, and we will move the recovery stage to TRANSLOG in the clean files step. However, if the shard has a synced flush, we won't execute the clean files step and trip the assertion.

Also, which part of the new tests show this issue, and is it something that can also be triggered with a single restart?

Yes, I will add it to the full cluster restart suite.

"global checkpoint [" + globalCheckpoint + "] [ created version [" + indexSettings.getIndexVersionCreated() + "]";
logger.trace("skip local recovery as no safe commit found");
return UNASSIGNED_SEQ_NO;
}
assert safeCommit.get().localCheckpoint <= globalCheckpoint : safeCommit.get().localCheckpoint + " > " + globalCheckpoint;
if (safeCommit.get().localCheckpoint == globalCheckpoint) {
logger.trace("skip local recovery as the safe commit is up to date; safe commit {} global checkpoint {}",
safeCommit.get(), globalCheckpoint);
Expand Down
8 changes: 6 additions & 2 deletions server/src/main/java/org/elasticsearch/index/store/Store.java
Original file line number Diff line number Diff line change
Expand Up @@ -1519,8 +1519,12 @@ public void trimUnsafeCommits(final long lastSyncedGlobalCheckpoint, final long
recoverableCommits.add(commit);
}
}
assert recoverableCommits.isEmpty() == false : "No commit point with translog found; " +
"commits [" + existingCommits + "], minRetainedTranslogGen [" + minRetainedTranslogGen + "]";
// We could reach here if the node is restarted multiple times after upgraded without flushing a new index commit.
// In this case, we can safely consider all commits as the starting commit because we have trimmed the unsafe
// commits in the first restart.
if (recoverableCommits.isEmpty()) {
recoverableCommits.addAll(existingCommits);
}
startingIndexCommit = CombinedDeletionPolicy.findSafeCommitPoint(recoverableCommits, lastSyncedGlobalCheckpoint);
} else {
// TODO: Asserts the starting commit is a safe commit once peer-recovery sets global checkpoint.
Expand Down
Loading