Skip to content

Default max local storage nodes to one #19964

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Aug 12, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ class ClusterFormationTasks {
'node.attr.testattr' : 'test',
'repositories.url.allowed_urls': 'http://snapshot.test*'
]
esConfig['node.max_local_storage_nodes'] = node.config.numNodes
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cute.

esConfig['http.port'] = node.config.httpPort
esConfig['transport.tcp.port'] = node.config.transportPort
esConfig.putAll(node.config.settings)
Expand Down
14 changes: 11 additions & 3 deletions core/src/main/java/org/elasticsearch/env/NodeEnvironment.java
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Random;
import java.util.Set;
Expand Down Expand Up @@ -151,7 +152,7 @@ public String toString() {
/**
* Maximum number of data nodes that should run in an environment.
*/
public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 50, 1,
public static final Setting<Integer> MAX_LOCAL_STORAGE_NODES_SETTING = Setting.intSetting("node.max_local_storage_nodes", 1, 1,
Property.NodeScope);

/**
Expand Down Expand Up @@ -244,8 +245,15 @@ public NodeEnvironment(Settings settings, Environment environment) throws IOExce
}

if (locks[0] == null) {
throw new IllegalStateException("Failed to obtain node lock, is the following location writable?: "
+ Arrays.toString(environment.dataWithClusterFiles()), lastException);
final String message = String.format(
Locale.ROOT,
"failed to obtain node locks, tried [%s] with lock id%s;" +
" maybe these locations are not writable or multiple nodes were started without increasing [%s] (was [%d])?",
Arrays.toString(environment.dataWithClusterFiles()),
maxLocalStorageNodes == 1 ? " [0]" : "s [0--" + (maxLocalStorageNodes - 1) + "]",
MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
maxLocalStorageNodes);
throw new IllegalStateException(message, lastException);
}
this.nodeMetaData = loadOrCreateNodeMetaData(settings, startupTraceLogger, nodePaths);
this.logger = Loggers.getLogger(getClass(), Node.addNodeNameIfNeeded(settings, this.nodeMetaData.nodeId()));
Expand Down
3 changes: 3 additions & 0 deletions core/src/main/java/org/elasticsearch/tribe/TribeService.java
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,9 @@ public static Settings processSettings(Settings settings) {
sb.put(Node.NODE_MASTER_SETTING.getKey(), false);
sb.put(Node.NODE_DATA_SETTING.getKey(), false);
sb.put(Node.NODE_INGEST_SETTING.getKey(), false);
if (!NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.exists(settings)) {
sb.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), nodesSettings.size());
}
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
// nothing is going to be discovered, since no master will be elected
sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.store.Store;
Expand Down Expand Up @@ -118,6 +119,7 @@ public void blockActions(String... actions) {
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
// manual collection or upon cluster forming.
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(InternalClusterInfoService.INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING.getKey(), "1s")
.build();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@
import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.indices.store.IndicesStoreIntegrationIT;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.ESIntegTestCase;
Expand Down Expand Up @@ -207,6 +208,7 @@ private void configureUnicastCluster(
// TODO: Rarely use default settings form some of these
Settings nodeSettings = Settings.builder()
.put(settings)
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 4)
.put(ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNode)
.build();

Expand Down
16 changes: 6 additions & 10 deletions core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java
Original file line number Diff line number Diff line change
Expand Up @@ -74,18 +74,14 @@ public void testNodeLockSillySettings() {
}

public void testNodeLockSingleEnvironment() throws IOException {
final Settings settings = buildEnvSettings(Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 1).build());
final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 1).build());
NodeEnvironment env = newNodeEnvironment(settings);
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);

try {
// Reuse the same location and attempt to lock again
new NodeEnvironment(settings, new Environment(settings));
fail("env has already locked all the data directories it is allowed");
} catch (IllegalStateException ex) {
assertThat(ex.getMessage(), containsString("Failed to obtain node lock"));
}
// Reuse the same location and attempt to lock again
IllegalStateException ex =
expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, new Environment(settings)));
assertThat(ex.getMessage(), containsString("failed to obtain node lock"));

// Close the environment that holds the lock and make sure we can get the lock after release
env.close();
Expand Down Expand Up @@ -121,7 +117,7 @@ public void testSegmentInfosTracing() {
}

public void testNodeLockMultipleEnvironment() throws IOException {
final Settings settings = buildEnvSettings(Settings.EMPTY);
final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build());
final NodeEnvironment first = newNodeEnvironment(settings);
List<String> dataPaths = Environment.PATH_DATA_SETTING.get(settings);
NodeEnvironment second = new NodeEnvironment(settings, new Environment(settings));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
import org.elasticsearch.search.sort.SortOrder;
import org.elasticsearch.test.ESIntegTestCase;
Expand All @@ -40,6 +41,7 @@ public class CircuitBreakerNoopIT extends ESIntegTestCase {
@Override
protected Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.getKey(), "noop")
// This is set low, because if the "noop" is not a noop, it will break
.put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b")
Expand Down
3 changes: 2 additions & 1 deletion core/src/test/java/org/elasticsearch/tribe/TribeIT.java
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.node.Node;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.InternalTestCluster;
Expand Down Expand Up @@ -128,6 +129,7 @@ private void setupTribeNode(Settings settings) {
tribe1Defaults.putArray("tribe.t2." + UnicastZenPing.DISCOVERY_ZEN_PING_UNICAST_HOSTS_SETTING.getKey(), getUnicastHosts(cluster2.client()));

Settings merged = Settings.builder()
.put(internalCluster().getDefaultSettings())
.put("tribe.t1.cluster.name", internalCluster().getClusterName())
.put("tribe.t2.cluster.name", cluster2.getClusterName())
.put("tribe.t1.transport.type", "local")
Expand All @@ -142,7 +144,6 @@ private void setupTribeNode(Settings settings) {

.put(tribe1Defaults.build())
.put(tribe2Defaults.build())
.put(internalCluster().getDefaultSettings())
.put("node.name", "tribe_node") // make sure we can identify threads from this node
.build();

Expand Down
11 changes: 11 additions & 0 deletions docs/reference/migration/migrate_5_0/settings.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -310,3 +310,14 @@ The unit 'w' representing weeks is no longer supported.

Fractional time values (e.g., 0.5s) are no longer supported. For example, this means when setting
timeouts "0.5s" will be rejected and should instead be input as "500ms".

==== Node max local storage nodes

Previous versions of Elasticsearch defaulted to allowing multiple nodes to share the same data
directory (up to 50). This can be confusing where users accidentally startup multiple nodes and end
up thinking that they've lost data because the second node will start with an empty data directory.
While the default of allowing multiple nodes is friendly to playing with forming a small cluster on
a laptop, and end-users do sometimes run multiple nodes on the same host, this tends to be the
exception. Keeping with Elasticsearch's continual movement towards safer out-of-the-box defaults,
and optimizing for the norm instead of the exception, the default for
`node.max_local_storage_nodes` is now one.
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@ public static void createTribes() {
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put("transport.type", "local")
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()).build();
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), 2)
.build();

tribe1 = new TribeClientNode(
Settings.builder()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
import org.elasticsearch.action.DocWriteResponse;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.transport.MockTcpTransportPlugin;
import org.elasticsearch.action.ActionListener;
Expand Down Expand Up @@ -1630,6 +1631,7 @@ private boolean randomDynamicTemplates() {
*/
protected Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE)
// Default the watermarks to absurdly low to prevent the tests
// from failing on nodes without enough disk space
.put(DiskThresholdDecider.CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING.getKey(), "1b")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,8 @@ public final class InternalTestCluster extends TestCluster {
public final int HTTP_BASE_PORT = GLOBAL_HTTP_BASE_PORT + CLUSTER_BASE_PORT_OFFSET;


static final int DEFAULT_LOW_NUM_MASTER_NODES = 1;
static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3;
public static final int DEFAULT_LOW_NUM_MASTER_NODES = 1;
public static final int DEFAULT_HIGH_NUM_MASTER_NODES = 3;

static final int DEFAULT_MIN_NUM_DATA_NODES = 1;
static final int DEFAULT_MAX_NUM_DATA_NODES = TEST_NIGHTLY ? 6 : 3;
Expand Down Expand Up @@ -300,6 +300,7 @@ public InternalTestCluster(long clusterSeed, Path baseDir,
builder.put(Environment.PATH_DATA_SETTING.getKey(), dataPath.toString());
}
}
builder.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), Integer.MAX_VALUE);
builder.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), baseDir.resolve("custom"));
builder.put(Environment.PATH_HOME_SETTING.getKey(), baseDir);
builder.put(Environment.PATH_REPO_SETTING.getKey(), baseDir.resolve("repos"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.discovery.DiscoveryModule;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.test.InternalTestCluster;
import org.elasticsearch.test.NodeConfigurationSource;
import org.elasticsearch.transport.TransportSettings;
Expand Down Expand Up @@ -108,7 +109,7 @@ private static int calcBasePort() {

@Override
public Settings nodeSettings(int nodeOrdinal) {
Settings.Builder builder = Settings.builder();
Settings.Builder builder = Settings.builder().put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numOfNodes);

String[] unicastHosts = new String[unicastHostOrdinals.length];
if (nodeOrdinal >= unicastHostPorts.length) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,12 +128,16 @@ public void testBeforeTest() throws Exception {
boolean masterNodes = randomBoolean();
int minNumDataNodes = randomIntBetween(0, 3);
int maxNumDataNodes = randomIntBetween(minNumDataNodes, 4);
int numClientNodes = randomIntBetween(0, 2);
final String clusterName1 = "shared1";
final String clusterName2 = "shared2";
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
2 * ((masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes))
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
Expand All @@ -145,7 +149,7 @@ public Settings transportClientSettings() {
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
}
};
int numClientNodes = randomIntBetween(0, 2);

boolean enableHttpPipelining = randomBoolean();
String nodePrefix = "foobar";

Expand Down Expand Up @@ -187,13 +191,17 @@ public void testDataFolderAssignmentAndCleaning() throws IOException, Interrupte
long clusterSeed = randomLong();
boolean masterNodes = randomBoolean();
// we need one stable node
int minNumDataNodes = 2;
int maxNumDataNodes = 2;
final int minNumDataNodes = 2;
final int maxNumDataNodes = 2;
final int numClientNodes = randomIntBetween(0, 2);
final String clusterName1 = "shared1";
NodeConfigurationSource nodeConfigurationSource = new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder().put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(
NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(),
2 + (masterNodes ? InternalTestCluster.DEFAULT_HIGH_NUM_MASTER_NODES : 0) + maxNumDataNodes + numClientNodes)
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
.build();
Expand All @@ -203,7 +211,7 @@ public Settings transportClientSettings() {
return Settings.builder()
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local").build();
}
}; int numClientNodes = randomIntBetween(0, 2);
};
boolean enableHttpPipelining = randomBoolean();
String nodePrefix = "test";
Path baseDir = createTempDir();
Expand Down Expand Up @@ -269,11 +277,13 @@ private Path[] getNodePaths(InternalTestCluster cluster, String name) {

public void testDifferentRolesMaintainPathOnRestart() throws Exception {
final Path baseDir = createTempDir();
final int numNodes = 5;
InternalTestCluster cluster = new InternalTestCluster(randomLong(), baseDir, true, 0, 0, "test",
new NodeConfigurationSource() {
@Override
public Settings nodeSettings(int nodeOrdinal) {
return Settings.builder()
.put(NodeEnvironment.MAX_LOCAL_STORAGE_NODES_SETTING.getKey(), numNodes)
.put(NetworkModule.HTTP_ENABLED.getKey(), false)
.put(NetworkModule.TRANSPORT_TYPE_KEY, "local")
.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local")
Expand All @@ -289,7 +299,7 @@ public Settings transportClientSettings() {
cluster.beforeTest(random(), 0.0);
try {
Map<DiscoveryNode.Role, Set<String>> pathsPerRole = new HashMap<>();
for (int i = 0; i < 5; i++) {
for (int i = 0; i < numNodes; i++) {
final DiscoveryNode.Role role = randomFrom(MASTER, DiscoveryNode.Role.DATA, DiscoveryNode.Role.INGEST);
final String node;
switch (role) {
Expand Down