-
Notifications
You must be signed in to change notification settings - Fork 25.3k
Remove per-index metadata without assigned shards #49234
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -56,6 +56,7 @@ | |
import org.elasticsearch.test.InternalTestCluster.RestartCallback; | ||
|
||
import java.io.IOException; | ||
import java.nio.file.Files; | ||
import java.nio.file.Path; | ||
import java.util.List; | ||
import java.util.Map; | ||
|
@@ -533,7 +534,7 @@ public void testHalfDeletedIndexImport() throws Exception { | |
// // term in the coordination metadata | ||
// .coordinationMetaData(CoordinationMetaData.builder(metaData.coordinationMetaData()).term(0L).build()) | ||
// // add a tombstone but do not delete the index metadata from disk | ||
// .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metaData.index("test").getIndex()).build()).build()); | ||
// .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metaData.index("test").getIndex()).build()).build()); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. line length, oops :( |
||
// for (final Path path : paths) { | ||
// try (Stream<Path> stateFiles = Files.list(path.resolve(MetaDataStateFormat.STATE_DIR_NAME))) { | ||
// for (final Path manifestPath : stateFiles | ||
|
@@ -549,6 +550,90 @@ public void testHalfDeletedIndexImport() throws Exception { | |
assertBusy(() -> assertThat(internalCluster().getInstance(NodeEnvironment.class).availableIndexFolders(), empty())); | ||
} | ||
|
||
public void testOnlyWritesIndexMetaDataFilesOnDataNodes() throws Exception { | ||
final String masterNode = internalCluster().startMasterOnlyNode(); | ||
final String dataNode = internalCluster().startDataOnlyNode(); | ||
final String mixedNode = internalCluster().startNode(); | ||
|
||
createIndex("test", Settings.builder() | ||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, between(1, 3)) | ||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) | ||
.build()); | ||
ensureGreen("test"); | ||
|
||
final String indexUUID = client().admin().indices().prepareStats("test").get().getIndex("test").getUuid(); | ||
|
||
final Path[] masterPaths = internalCluster().getInstance(NodeEnvironment.class, masterNode).nodeDataPaths(); | ||
final Path[] dataPaths = internalCluster().getInstance(NodeEnvironment.class, dataNode).nodeDataPaths(); | ||
final Path[] mixedPaths = internalCluster().getInstance(NodeEnvironment.class, mixedNode).nodeDataPaths(); | ||
|
||
for (final Path path : masterPaths) { | ||
assertFalse("master: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER))); | ||
} | ||
for (final Path path : dataPaths) { | ||
assertTrue("data: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
for (final Path path : mixedPaths) { | ||
assertTrue("mixed: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
|
||
logger.info("--> remove shards from data node, to check the index folder is cleaned up"); | ||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() | ||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) | ||
.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", dataNode))); | ||
assertFalse(client().admin().cluster().prepareHealth("test").setWaitForGreenStatus() | ||
.setWaitForNoInitializingShards(true).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); | ||
|
||
for (final Path path : masterPaths) { | ||
assertFalse("master: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER))); | ||
} | ||
for (final Path path : mixedPaths) { | ||
assertTrue("mixed: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
assertBusy(() -> { | ||
for (final Path path : dataPaths) { | ||
assertFalse("data: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
}); | ||
|
||
logger.info("--> remove shards from mixed master/data node, to check the index folder is cleaned up"); | ||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() | ||
.put(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name", mixedNode))); | ||
assertFalse(client().admin().cluster().prepareHealth("test").setWaitForGreenStatus() | ||
.setWaitForNoInitializingShards(true).setWaitForEvents(Priority.LANGUID).get().isTimedOut()); | ||
|
||
for (final Path path : masterPaths) { | ||
assertFalse("master: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER))); | ||
} | ||
for (final Path path : dataPaths) { | ||
assertTrue("data: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
assertBusy(() -> { | ||
for (final Path path : mixedPaths) { | ||
assertFalse("mixed: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
}); | ||
|
||
logger.info("--> delete index and check the index folder is cleaned up on all nodes"); | ||
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder() | ||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) | ||
.putNull(IndexMetaData.INDEX_ROUTING_EXCLUDE_GROUP_PREFIX + "._name"))); | ||
ensureGreen("test"); | ||
assertAcked(client().admin().indices().prepareDelete("test")); | ||
|
||
for (final Path path : masterPaths) { | ||
assertFalse("master: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER))); | ||
} | ||
assertBusy(() -> { | ||
for (final Path path : dataPaths) { | ||
assertFalse("data: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
for (final Path path : mixedPaths) { | ||
assertFalse("mixed: " + path, Files.exists(path.resolve(NodeEnvironment.INDICES_FOLDER).resolve(indexUUID))); | ||
} | ||
}); | ||
} | ||
|
||
private void restartNodesOnBrokenClusterState(ClusterState.Builder clusterStateBuilder) throws Exception { | ||
Map<String, LucenePersistedStateFactory> lucenePersistedStateFactories = Stream.of(internalCluster().getNodeNames()) | ||
.collect(Collectors.toMap(Function.identity(), | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -223,50 +223,35 @@ public void testDeleteIndexStore() throws Exception { | |
ClusterService clusterService = getInstanceFromNode(ClusterService.class); | ||
IndexMetaData firstMetaData = clusterService.state().metaData().index("test"); | ||
assertTrue(test.hasShard(0)); | ||
ShardPath firstPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings()); | ||
|
||
try { | ||
indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); | ||
fail(); | ||
} catch (IllegalStateException ex) { | ||
// all good | ||
} | ||
expectThrows(IllegalStateException.class, () -> indicesService.deleteIndexStore("boom", firstMetaData)); | ||
assertTrue(firstPath.exists()); | ||
|
||
GatewayMetaState gwMetaState = getInstanceFromNode(GatewayMetaState.class); | ||
MetaData meta = gwMetaState.getMetaData(); | ||
assertNotNull(meta); | ||
assertNotNull(meta.index("test")); | ||
assertAcked(client().admin().indices().prepareDelete("test")); | ||
|
||
assertFalse(firstPath.exists()); | ||
|
||
meta = gwMetaState.getMetaData(); | ||
assertNotNull(meta); | ||
assertNull(meta.index("test")); | ||
|
||
|
||
test = createIndex("test"); | ||
client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(IMMEDIATE).get(); | ||
client().admin().indices().prepareFlush("test").get(); | ||
assertHitCount(client().prepareSearch("test").get(), 1); | ||
IndexMetaData secondMetaData = clusterService.state().metaData().index("test"); | ||
assertAcked(client().admin().indices().prepareClose("test")); | ||
ShardPath path = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings()); | ||
assertTrue(path.exists()); | ||
ShardPath secondPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings()); | ||
assertTrue(secondPath.exists()); | ||
|
||
try { | ||
indicesService.deleteIndexStore("boom", secondMetaData, clusterService.state()); | ||
fail(); | ||
} catch (IllegalStateException ex) { | ||
// all good | ||
} | ||
|
||
assertTrue(path.exists()); | ||
expectThrows(IllegalStateException.class, () -> indicesService.deleteIndexStore("boom", secondMetaData)); | ||
assertTrue(secondPath.exists()); | ||
|
||
// now delete the old one and make sure we resolve against the name | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This part of this test relates to the old-and-somewhat-bogus code mentioned above -- it was checking that we didn't delete the index metadata for a different index with the same name as one in the cluster state. |
||
try { | ||
indicesService.deleteIndexStore("boom", firstMetaData, clusterService.state()); | ||
fail(); | ||
} catch (IllegalStateException ex) { | ||
// all good | ||
} | ||
assertAcked(client().admin().indices().prepareOpen("test")); | ||
ensureGreen("test"); | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This code looks old and somewhat bogus -- it seems that it would avoid deleting index metadata for indices that still exist in the cluster state by name (not by UUID) on master nodes.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Jup seems there was no deeper reasoning behind this back then in #9985 (comment)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It's great this code can finally go and the code that operates on the data directories is separate from the one that writes out the cluster state.