Skip to content

Commit 4feabd1

Browse files
authored
Merge branch 'apache:trunk' into YARN-11350-V2
2 parents 8e1cbf0 + a71aaef commit 4feabd1

File tree

58 files changed

+806
-378
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

58 files changed

+806
-378
lines changed

LICENSE-binary

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -324,7 +324,7 @@ org.apache.htrace:htrace-core:3.1.0-incubating
324324
org.apache.htrace:htrace-core4:4.1.0-incubating
325325
org.apache.httpcomponents:httpclient:4.5.6
326326
org.apache.httpcomponents:httpcore:4.4.10
327-
org.apache.kafka:kafka-clients:2.8.1
327+
org.apache.kafka:kafka-clients:2.8.2
328328
org.apache.kerby:kerb-admin:2.0.2
329329
org.apache.kerby:kerb-client:2.0.2
330330
org.apache.kerby:kerb-common:2.0.2

hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/PlatformName.java

Lines changed: 60 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,10 @@
1818

1919
package org.apache.hadoop.util;
2020

21+
import java.security.AccessController;
22+
import java.security.PrivilegedAction;
23+
import java.util.Arrays;
24+
2125
import org.apache.hadoop.classification.InterfaceAudience;
2226
import org.apache.hadoop.classification.InterfaceStability;
2327

@@ -33,21 +37,71 @@ public class PlatformName {
3337
* per the java-vm.
3438
*/
3539
public static final String PLATFORM_NAME =
36-
(System.getProperty("os.name").startsWith("Windows")
37-
? System.getenv("os") : System.getProperty("os.name"))
38-
+ "-" + System.getProperty("os.arch")
39-
+ "-" + System.getProperty("sun.arch.data.model");
40+
(System.getProperty("os.name").startsWith("Windows") ?
41+
System.getenv("os") : System.getProperty("os.name"))
42+
+ "-" + System.getProperty("os.arch") + "-"
43+
+ System.getProperty("sun.arch.data.model");
4044

4145
/**
4246
* The java vendor name used in this platform.
4347
*/
4448
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
4549

50+
/**
51+
* Define a system class accessor that is open to changes in underlying implementations
52+
* of the system class loader modules.
53+
*/
54+
private static final class SystemClassAccessor extends ClassLoader {
55+
public Class<?> getSystemClass(String className) throws ClassNotFoundException {
56+
return findSystemClass(className);
57+
}
58+
}
59+
4660
/**
4761
* A public static variable to indicate the current java vendor is
48-
* IBM java or not.
62+
* IBM and the type is Java Technology Edition which provides its
63+
* own implementations of many security packages and Cipher suites.
64+
* Note that these are not provided in Semeru runtimes:
65+
* See https://developer.ibm.com/languages/java/semeru-runtimes for details.
4966
*/
50-
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
67+
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM") &&
68+
hasIbmTechnologyEditionModules();
69+
70+
private static boolean hasIbmTechnologyEditionModules() {
71+
return Arrays.asList(
72+
"com.ibm.security.auth.module.JAASLoginModule",
73+
"com.ibm.security.auth.module.Win64LoginModule",
74+
"com.ibm.security.auth.module.NTLoginModule",
75+
"com.ibm.security.auth.module.AIX64LoginModule",
76+
"com.ibm.security.auth.module.LinuxLoginModule",
77+
"com.ibm.security.auth.module.Krb5LoginModule"
78+
).stream().anyMatch((module) -> isSystemClassAvailable(module));
79+
}
80+
81+
/**
82+
* In rare cases where different behaviour is performed based on the JVM vendor
83+
* this method should be used to test for a unique JVM class provided by the
84+
* vendor rather than using the vendor method. For example if on JVM provides a
85+
* different Kerberos login module testing for that login module being loadable
86+
* before configuring to use it is preferable to using the vendor data.
87+
*
88+
* @param className the name of a class in the JVM to test for
89+
* @return true if the class is available, false otherwise.
90+
*/
91+
private static boolean isSystemClassAvailable(String className) {
92+
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> {
93+
try {
94+
// Using ClassLoader.findSystemClass() instead of
95+
// Class.forName(className, false, null) because Class.forName with a null
96+
// ClassLoader only looks at the boot ClassLoader with Java 9 and above
97+
// which doesn't look at all the modules available to the findSystemClass.
98+
new SystemClassAccessor().getSystemClass(className);
99+
return true;
100+
} catch (Exception ignored) {
101+
return false;
102+
}
103+
});
104+
}
51105

52106
public static void main(String[] args) {
53107
System.out.println(PLATFORM_NAME);

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AvroFSInput.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,6 @@ public AvroFSInput(final FileContext fc, final Path p) throws IOException {
6060
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
6161
.withFileStatus(status)
6262
.build());
63-
fc.open(p);
6463
}
6564

6665
@Override

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/SSLFactory.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@
2525
import org.apache.hadoop.util.StringUtils;
2626
import org.slf4j.Logger;
2727
import org.slf4j.LoggerFactory;
28-
import static org.apache.hadoop.util.PlatformName.JAVA_VENDOR_NAME;
28+
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
2929

3030
import javax.net.ssl.HostnameVerifier;
3131
import javax.net.ssl.HttpsURLConnection;
@@ -102,11 +102,11 @@ public enum Mode { CLIENT, SERVER }
102102
"ssl.server.exclude.cipher.list";
103103

104104
public static final String KEY_MANAGER_SSLCERTIFICATE =
105-
JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" :
105+
IBM_JAVA ? "ibmX509" :
106106
KeyManagerFactory.getDefaultAlgorithm();
107107

108108
public static final String TRUST_MANAGER_SSLCERTIFICATE =
109-
JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" :
109+
IBM_JAVA ? "ibmX509" :
110110
TrustManagerFactory.getDefaultAlgorithm();
111111

112112
public static final String KEYSTORES_FACTORY_CLASS_KEY =

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ApplicationClassLoader.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -108,7 +108,7 @@ static URL[] constructUrlsFromClasspath(String classpath)
108108
throws MalformedURLException {
109109
List<URL> urls = new ArrayList<URL>();
110110
for (String element : classpath.split(File.pathSeparator)) {
111-
if (element.endsWith("/*")) {
111+
if (element.endsWith(File.separator + "*")) {
112112
List<Path> jars = FileUtil.getJarsInDirectory(element);
113113
if (!jars.isEmpty()) {
114114
for (Path jar: jars) {

hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2168,9 +2168,8 @@ The switch to turn S3A auditing on or off.
21682168

21692169
<property>
21702170
<name>fs.azure.enable.readahead</name>
2171-
<value>false</value>
2172-
<description>Disable readahead/prefetching in AbfsInputStream.
2173-
See HADOOP-18521</description>
2171+
<value>true</value>
2172+
<description>Enabled readahead/prefetching in AbfsInputStream.</description>
21742173
</property>
21752174

21762175
<property>

hadoop-common-project/hadoop-common/src/site/markdown/ClusterSetup.md

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,17 @@ Purpose
2222

2323
This document describes how to install and configure Hadoop clusters ranging from a few nodes to extremely large clusters with thousands of nodes. To play with Hadoop, you may first want to install it on a single machine (see [Single Node Setup](./SingleCluster.html)).
2424

25-
This document does not cover advanced topics such as [Security](./SecureMode.html) or High Availability.
25+
This document does not cover advanced topics such as High Availability.
26+
27+
*Important*: all production Hadoop clusters use Kerberos to authenticate callers
28+
and secure access to HDFS data as well as restriction access to computation
29+
services (YARN etc.).
30+
31+
These instructions do not cover integration with any Kerberos services,
32+
-everyone bringing up a production cluster should include connecting to their
33+
organisation's Kerberos infrastructure as a key part of the deployment.
34+
35+
See [Security](./SecureMode.html) for details on how to secure a cluster.
2636

2737
Prerequisites
2838
-------------

hadoop-common-project/hadoop-common/src/site/markdown/SingleCluster.md.vm

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,15 +26,22 @@ Purpose
2626

2727
This document describes how to set up and configure a single-node Hadoop installation so that you can quickly perform simple operations using Hadoop MapReduce and the Hadoop Distributed File System (HDFS).
2828

29+
30+
*Important*: all production Hadoop clusters use Kerberos to authenticate callers
31+
and secure access to HDFS data as well as restriction access to computation
32+
services (YARN etc.).
33+
34+
These instructions do not cover integration with any Kerberos services,
35+
-everyone bringing up a production cluster should include connecting to their
36+
organisation's Kerberos infrastructure as a key part of the deployment.
37+
2938
Prerequisites
3039
-------------
3140

3241
$H3 Supported Platforms
3342

3443
* GNU/Linux is supported as a development and production platform. Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.
3544

36-
* Windows is also a supported platform but the followings steps are for Linux only. To set up Hadoop on Windows, see [wiki page](http://wiki.apache.org/hadoop/Hadoop2OnWindows).
37-
3845
$H3 Required Software
3946

4047
Required software for Linux include:

hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java

Lines changed: 32 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,35 @@
3838
import java.util.Arrays;
3939

4040
public class TestMiniKdc extends KerberosSecurityTestcase {
41-
private static final boolean IBM_JAVA = System.getProperty("java.vendor")
42-
.contains("IBM");
41+
private static final boolean IBM_JAVA = shouldUseIbmPackages();
42+
// duplicated to avoid cycles in the build
43+
private static boolean shouldUseIbmPackages() {
44+
final List<String> ibmTechnologyEditionSecurityModules = Arrays.asList(
45+
"com.ibm.security.auth.module.JAASLoginModule",
46+
"com.ibm.security.auth.module.Win64LoginModule",
47+
"com.ibm.security.auth.module.NTLoginModule",
48+
"com.ibm.security.auth.module.AIX64LoginModule",
49+
"com.ibm.security.auth.module.LinuxLoginModule",
50+
"com.ibm.security.auth.module.Krb5LoginModule"
51+
);
52+
53+
if (System.getProperty("java.vendor").contains("IBM")) {
54+
return ibmTechnologyEditionSecurityModules
55+
.stream().anyMatch((module) -> isSystemClassAvailable(module));
56+
}
57+
58+
return false;
59+
}
60+
61+
private static boolean isSystemClassAvailable(String className) {
62+
try {
63+
Class.forName(className);
64+
return true;
65+
} catch (Exception ignored) {
66+
return false;
67+
}
68+
}
69+
4370
@Test
4471
public void testMiniKdcStart() {
4572
MiniKdc kdc = getKdc();
@@ -117,9 +144,9 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
117144
options.put("debug", "true");
118145

119146
return new AppConfigurationEntry[]{
120-
new AppConfigurationEntry(getKrb5LoginModuleName(),
121-
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
122-
options)};
147+
new AppConfigurationEntry(getKrb5LoginModuleName(),
148+
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
149+
options)};
123150
}
124151
}
125152

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,8 @@
7272
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
7373
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_DEFAULT;
7474
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
75+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT;
76+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
7577
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
7678
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
7779
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
@@ -353,6 +355,7 @@ public class DataNode extends ReconfigurableBase
353355
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
354356
DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY,
355357
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY,
358+
DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY,
356359
FS_DU_INTERVAL_KEY,
357360
FS_GETSPACEUSED_JITTER_KEY,
358361
FS_GETSPACEUSED_CLASSNAME));
@@ -699,6 +702,7 @@ public String reconfigurePropertyImpl(String property, String newVal)
699702
case DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY:
700703
case DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY:
701704
case DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY:
705+
case DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY:
702706
return reconfSlowDiskParameters(property, newVal);
703707
case FS_DU_INTERVAL_KEY:
704708
case FS_GETSPACEUSED_JITTER_KEY:
@@ -877,6 +881,12 @@ private String reconfSlowDiskParameters(String property, String newVal)
877881
Long.parseLong(newVal));
878882
result = Long.toString(threshold);
879883
diskMetrics.setLowThresholdMs(threshold);
884+
} else if (property.equals(DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY)) {
885+
checkNotNull(diskMetrics, "DataNode disk stats may be disabled.");
886+
int maxSlowDisksToExclude = (newVal == null ?
887+
DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT : Integer.parseInt(newVal));
888+
result = Integer.toString(maxSlowDisksToExclude);
889+
diskMetrics.setMaxSlowDisksToExclude(maxSlowDisksToExclude);
880890
}
881891
LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
882892
return result;

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeDiskMetrics.java

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ public class DataNodeDiskMetrics {
8080
/**
8181
* The number of slow disks that needs to be excluded.
8282
*/
83-
private int maxSlowDisksToExclude;
83+
private volatile int maxSlowDisksToExclude;
8484
/**
8585
* List of slow disks that need to be excluded.
8686
*/
@@ -274,6 +274,14 @@ public List<String> getSlowDisksToExclude() {
274274
return slowDisksToExclude;
275275
}
276276

277+
public int getMaxSlowDisksToExclude() {
278+
return maxSlowDisksToExclude;
279+
}
280+
281+
public void setMaxSlowDisksToExclude(int maxSlowDisksToExclude) {
282+
this.maxSlowDisksToExclude = maxSlowDisksToExclude;
283+
}
284+
277285
public void setLowThresholdMs(long thresholdMs) {
278286
Preconditions.checkArgument(thresholdMs > 0,
279287
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY + " should be larger than 0");

hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/moment.min.js

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeReconfiguration.java

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY;
4646
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
4747
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
48+
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
4849
import static org.junit.Assert.assertEquals;
4950
import static org.junit.Assert.assertFalse;
5051
import static org.junit.Assert.assertNull;
@@ -636,13 +637,15 @@ public void testSlowDiskParameters() throws ReconfigurationException, IOExceptio
636637
String[] slowDisksParameters2 = {
637638
DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY,
638639
DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY,
639-
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY};
640+
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY,
641+
DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY};
640642
for (String parameter : slowDisksParameters2) {
641643
dn.reconfigureProperty(parameter, "99");
642644
}
643645
// Assert diskMetrics.
644646
assertEquals(99, dn.getDiskMetrics().getMinOutlierDetectionDisks());
645647
assertEquals(99, dn.getDiskMetrics().getLowThresholdMs());
648+
assertEquals(99, dn.getDiskMetrics().getMaxSlowDisksToExclude());
646649
// Assert dnConf.
647650
assertTrue(dn.getDnConf().diskStatsEnabled);
648651
// Assert profilingEventHook.
@@ -673,12 +676,16 @@ public void testSlowDiskParameters() throws ReconfigurationException, IOExceptio
673676
dn.reconfigureProperty(DFS_DATANODE_FILEIO_PROFILING_SAMPLING_PERCENTAGE_KEY, "1");
674677
dn.reconfigureProperty(DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY, null);
675678
dn.reconfigureProperty(DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY, null);
679+
dn.reconfigureProperty(DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY, null);
676680
assertEquals(String.format("expect %s is not configured",
677681
DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY), null,
678682
dn.getConf().get(DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY));
679683
assertEquals(String.format("expect %s is not configured",
680684
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY), null,
681685
dn.getConf().get(DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY));
686+
assertEquals(String.format("expect %s is not configured",
687+
DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY), null,
688+
dn.getConf().get(DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY));
682689
assertEquals(DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_DEFAULT,
683690
dn.getDiskMetrics().getSlowDiskDetector().getMinOutlierDetectionNodes());
684691
assertEquals(DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_DEFAULT,

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -345,7 +345,7 @@ public void testDataNodeGetReconfigurableProperties() throws IOException, Interr
345345
final List<String> outs = Lists.newArrayList();
346346
final List<String> errs = Lists.newArrayList();
347347
getReconfigurableProperties("datanode", address, outs, errs);
348-
assertEquals(19, outs.size());
348+
assertEquals(20, outs.size());
349349
assertEquals(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, outs.get(1));
350350
}
351351

hadoop-project/pom.xml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@
5050
<!-- Version number for xerces used by JDiff -->
5151
<xerces.jdiff.version>2.12.2</xerces.jdiff.version>
5252

53-
<kafka.version>2.8.1</kafka.version>
53+
<kafka.version>2.8.2</kafka.version>
5454

5555
<commons-daemon.version>1.0.13</commons-daemon.version>
5656

0 commit comments

Comments
 (0)