Skip to content

Commit

Permalink
Switch the rest of SimpleFS usage to NIOFS
Browse files Browse the repository at this point in the history
Signed-off-by: Nicholas Walter Knize <nknize@apache.org>
  • Loading branch information
nknize committed Aug 19, 2021
1 parent ea5190e commit 846f78a
Show file tree
Hide file tree
Showing 12 changed files with 39 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
import org.apache.lucene.codecs.CodecUtil;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.opensearch.common.Randomness;
import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.env.Environment;
Expand Down Expand Up @@ -192,7 +192,7 @@ public void testUpgradeNoop() throws Exception {

public void testFailWhenCannotConsumeSecretStream() throws Exception {
Path configDir = env.configFile();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
indexOutput.writeByte((byte) 0); // No password
Expand Down Expand Up @@ -220,7 +220,7 @@ public void testFailWhenCannotConsumeSecretStream() throws Exception {

public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception {
Path configDir = env.configFile();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
indexOutput.writeByte((byte) 0); // No password
Expand Down Expand Up @@ -249,7 +249,7 @@ public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception {

public void testFailWhenSecretStreamNotConsumed() throws Exception {
Path configDir = env.configFile();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
indexOutput.writeByte((byte) 0); // No password
Expand All @@ -276,7 +276,7 @@ public void testFailWhenSecretStreamNotConsumed() throws Exception {

public void testFailWhenEncryptedBytesStreamIsNotConsumed() throws Exception {
Path configDir = env.configFile();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
try (IndexOutput indexOutput = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
CodecUtil.writeHeader(indexOutput, "opensearch.keystore", 3);
indexOutput.writeByte((byte) 0); // No password
Expand Down Expand Up @@ -362,7 +362,7 @@ public void testIllegalSettingName() throws Exception {
public void testBackcompatV1() throws Exception {
assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm());
Path configDir = env.configFile();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
try (IndexOutput output = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
CodecUtil.writeHeader(output, "opensearch.keystore", 1);
output.writeByte((byte) 0); // hasPassword = false
Expand Down Expand Up @@ -393,7 +393,7 @@ public void testBackcompatV1() throws Exception {
public void testBackcompatV2() throws Exception {
assumeFalse("Can't run in a FIPS JVM as PBE is not available", inFipsJvm());
Path configDir = env.configFile();
SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
byte[] fileBytes = new byte[20];
random().nextBytes(fileBytes);
try (IndexOutput output = directory.createOutput("opensearch.keystore", IOContext.DEFAULT)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@

package org.opensearch.cluster.routing;

import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;

import org.opensearch.action.admin.cluster.allocation.ClusterAllocationExplanation;
import org.opensearch.action.admin.indices.stats.ShardStats;
Expand Down Expand Up @@ -151,7 +151,7 @@ public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStale
});

internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
try(Store store = new Store(shardId, indexSettings, new SimpleFSDirectory(indexPath), new DummyShardLock(shardId))) {
try(Store store = new Store(shardId, indexSettings, new NIOFSDirectory(indexPath), new DummyShardLock(shardId))) {
store.removeCorruptionMarker();
}
node1 = internalCluster().startNode(node1DataPathSettings);
Expand Down Expand Up @@ -229,7 +229,7 @@ private String historyUUID(String node, String indexName) {
}

private void putFakeCorruptionMarker(IndexSettings indexSettings, ShardId shardId, Path indexPath) throws IOException {
try(Store store = new Store(shardId, indexSettings, new SimpleFSDirectory(indexPath), new DummyShardLock(shardId))) {
try(Store store = new Store(shardId, indexSettings, new NIOFSDirectory(indexPath), new DummyShardLock(shardId))) {
store.markStoreCorrupted(new IOException("fake ioexception"));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.SetOnce;
import org.opensearch.cli.ExitCodes;
import org.opensearch.cli.UserException;
Expand Down Expand Up @@ -236,7 +236,7 @@ public static KeyStoreWrapper load(Path configDir, String keystoreFileName) thro
return null;
}

SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
try (IndexInput indexInput = directory.openInput(keystoreFileName, IOContext.READONCE)) {
ChecksumIndexInput input = new BufferedChecksumIndexInput(indexInput);
final int formatVersion;
Expand Down Expand Up @@ -502,7 +502,7 @@ private void decryptLegacyEntries() throws GeneralSecurityException, IOException
public synchronized void save(Path configDir, char[] password) throws Exception {
ensureOpen();

SimpleFSDirectory directory = new SimpleFSDirectory(configDir);
NIOFSDirectory directory = new NIOFSDirectory(configDir);
// write to tmp file first, then overwrite
String tmpFile = KEYSTORE_FILENAME + ".tmp";
try (IndexOutput output = directory.createOutput(tmpFile, IOContext.DEFAULT)) {
Expand Down
4 changes: 2 additions & 2 deletions server/src/main/java/org/opensearch/env/NodeEnvironment.java
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.store.NativeFSLockFactory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.opensearch.OpenSearchException;
import org.opensearch.Version;
import org.opensearch.cluster.metadata.IndexMetadata;
Expand Down Expand Up @@ -501,7 +501,7 @@ public static void acquireFSLockForPaths(IndexSettings indexSettings, Path... sh
// resolve the directory the shard actually lives in
Path p = shardPaths[i].resolve("index");
// open a directory (will be immediately closed) on the shard's location
dirs[i] = new SimpleFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING));
dirs[i] = new NIOFSDirectory(p, indexSettings.getValue(FsDirectoryFactory.INDEX_LOCK_FACTOR_SETTING));
// create a lock for the "write.lock" file
try {
locks[i] = dirs[i].obtainLock(IndexWriter.WRITE_LOCK_NAME);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.opensearch.ExceptionsHelper;
import org.opensearch.common.collect.Tuple;
import org.opensearch.common.lucene.store.IndexOutputOutputStream;
Expand Down Expand Up @@ -323,7 +323,7 @@ public final T read(NamedXContentRegistry namedXContentRegistry, Path file) thro
}

protected Directory newDirectory(Path dir) throws IOException {
return new SimpleFSDirectory(dir);
return new NIOFSDirectory(dir);
}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,6 @@
import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefIterator;
Expand Down Expand Up @@ -229,7 +228,7 @@ private static IndexWriter createIndexWriter(Directory directory, boolean openEx
*/
public static void deleteAll(Path[] dataPaths) throws IOException {
for (Path dataPath : dataPaths) {
Lucene.cleanLuceneIndex(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)));
Lucene.cleanLuceneIndex(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)));
}
}

Expand Down Expand Up @@ -278,7 +277,7 @@ public static NodeMetadata nodeMetadata(Path... dataPaths) throws IOException {
for (final Path dataPath : dataPaths) {
final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME);
if (Files.exists(indexPath)) {
try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
try (DirectoryReader reader = DirectoryReader.open(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
final Map<String, String> userData = reader.getIndexCommit().getUserData();
assert userData.get(NODE_VERSION_KEY) != null;

Expand Down Expand Up @@ -309,12 +308,12 @@ public static void overrideVersion(Version newVersion, Path... dataPaths) throws
for (final Path dataPath : dataPaths) {
final Path indexPath = dataPath.resolve(METADATA_DIRECTORY_NAME);
if (Files.exists(indexPath)) {
try (DirectoryReader reader = DirectoryReader.open(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
try (DirectoryReader reader = DirectoryReader.open(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)))) {
final Map<String, String> userData = reader.getIndexCommit().getUserData();
assert userData.get(NODE_VERSION_KEY) != null;

try (IndexWriter indexWriter =
createIndexWriter(new SimpleFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) {
createIndexWriter(new NIOFSDirectory(dataPath.resolve(METADATA_DIRECTORY_NAME)), true)) {
final Map<String, String> commitData = new HashMap<>(userData);
commitData.put(NODE_VERSION_KEY, Integer.toString(newVersion.id));
indexWriter.setLiveCommitData(commitData.entrySet());
Expand Down
6 changes: 3 additions & 3 deletions server/src/main/java/org/opensearch/index/store/Store.java
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.Lock;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.ArrayUtil;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
Expand Down Expand Up @@ -463,7 +463,7 @@ private void closeInternal() {
public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
Logger logger) throws IOException {
try (ShardLock lock = shardLocker.lock(shardId, "read metadata snapshot", TimeUnit.SECONDS.toMillis(5));
Directory dir = new SimpleFSDirectory(indexLocation)) {
Directory dir = new NIOFSDirectory(indexLocation)) {
failIfCorrupted(dir);
return new MetadataSnapshot(null, dir, logger);
} catch (IndexNotFoundException ex) {
Expand All @@ -484,7 +484,7 @@ public static MetadataSnapshot readMetadataSnapshot(Path indexLocation, ShardId
public static void tryOpenIndex(Path indexLocation, ShardId shardId, NodeEnvironment.ShardLocker shardLocker,
Logger logger) throws IOException, ShardLockObtainFailedException {
try (ShardLock lock = shardLocker.lock(shardId, "open index", TimeUnit.SECONDS.toMillis(5));
Directory dir = new SimpleFSDirectory(indexLocation)) {
Directory dir = new NIOFSDirectory(indexLocation)) {
failIfCorrupted(dir);
SegmentInfos segInfo = Lucene.readSegmentInfos(dir);
logger.trace("{} loaded segment info [{}]", shardId, segInfo);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.store.OutputStreamIndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.opensearch.common.io.Channels;
import org.opensearch.index.seqno.SequenceNumbers;

Expand Down Expand Up @@ -191,7 +191,7 @@ public String toString() {
}

public static Checkpoint read(Path path) throws IOException {
try (Directory dir = new SimpleFSDirectory(path.getParent())) {
try (Directory dir = new NIOFSDirectory(path.getParent())) {
try (IndexInput indexInput = dir.openInput(path.getFileName().toString(), IOContext.DEFAULT)) {
// We checksum the entire file before we even go and parse it. If it's corrupted we barf right here.
CodecUtil.checksumEntireFile(indexInput);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.lucene.analysis.hunspell.Dictionary;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.opensearch.OpenSearchException;
import org.opensearch.core.internal.io.IOUtils;
import org.opensearch.common.io.FileSystemUtils;
Expand Down Expand Up @@ -208,7 +208,7 @@ private Dictionary loadDictionary(String locale, Settings nodeSettings, Environm

affixStream = Files.newInputStream(affixFiles[0]);

try (Directory tmp = new SimpleFSDirectory(env.tmpFile())) {
try (Directory tmp = new NIOFSDirectory(env.tmpFile())) {
return new Dictionary(tmp, "hunspell", affixStream, dicStreams, ignoreCase);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexInput;
import org.apache.lucene.store.MockDirectoryWrapper;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.apache.lucene.util.LuceneTestCase;
import org.opensearch.cluster.ClusterModule;
import org.opensearch.cluster.metadata.Metadata;
Expand Down Expand Up @@ -193,7 +193,7 @@ public void testCorruption() throws IOException {
}

public static void corruptFile(Path fileToCorrupt, Logger logger) throws IOException {
try (SimpleFSDirectory dir = new SimpleFSDirectory(fileToCorrupt.getParent())) {
try (NIOFSDirectory dir = new NIOFSDirectory(fileToCorrupt.getParent())) {
long checksumBeforeCorruption;
try (IndexInput input = dir.openInput(fileToCorrupt.getFileName().toString(), IOContext.DEFAULT)) {
checksumBeforeCorruption = CodecUtil.retrieveChecksum(input);
Expand Down Expand Up @@ -245,7 +245,7 @@ private DummyState writeAndReadStateSuccessfully(Format format, Path... paths) t

private static void ensureOnlyOneStateFile(Path[] paths) throws IOException {
for (Path path : paths) {
try (Directory dir = new SimpleFSDirectory(path.resolve(MetadataStateFormat.STATE_DIR_NAME))) {
try (Directory dir = new NIOFSDirectory(path.resolve(MetadataStateFormat.STATE_DIR_NAME))) {
assertThat(dir.listAll().length, equalTo(1));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@
import org.apache.lucene.store.FilterDirectory;
import org.apache.lucene.store.IOContext;
import org.apache.lucene.store.IndexOutput;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.store.NIOFSDirectory;
import org.opensearch.Version;
import org.opensearch.cluster.ClusterName;
import org.opensearch.cluster.ClusterState;
Expand Down Expand Up @@ -507,7 +507,7 @@ public void testFailsIfGlobalMetadataIsMissing() throws IOException {
}

final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
Expand Down Expand Up @@ -538,8 +538,8 @@ public void testFailsIfGlobalMetadataIsDuplicated() throws IOException {

final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = new SimpleFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = new NIOFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.addIndexes(dupDirectory);
indexWriter.commit();
Expand Down Expand Up @@ -583,8 +583,8 @@ public void testFailsIfIndexMetadataIsDuplicated() throws IOException {

final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
try (Directory directory = new SimpleFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = new SimpleFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
Directory dupDirectory = new NIOFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
indexWriter.deleteDocuments(new Term("type", "global")); // do not duplicate global metadata
indexWriter.addIndexes(dupDirectory);
Expand Down
Loading

0 comments on commit 846f78a

Please sign in to comment.