Skip to content

Commit 5a20358

Browse files
committed
Merge branch 'trunk' into testFixes
2 parents 0dc4ce0 + 7a7b346 commit 5a20358

File tree

10 files changed

+83
-18
lines changed

10 files changed

+83
-18
lines changed

hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfs/CMakeLists.txt

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,23 @@ include_directories(
3232
../libhdfspp/lib
3333
)
3434

35-
hadoop_add_dual_library(hdfs
35+
set(HDFS_SOURCES
3636
exception.c
3737
jni_helper.c
3838
hdfs.c
3939
jclasses.c
4040
${OS_DIR}/mutexes.c
4141
${OS_DIR}/thread_local_storage.c
42+
)
43+
# We want to create an object library for hdfs
44+
# so that we can reuse it for the targets
45+
# (like get_jni_test), where we don't wish to
46+
# link to hdfs's publicly linked libraries
47+
# (like jvm)
48+
add_library(hdfs_obj OBJECT ${HDFS_SOURCES})
49+
set_target_properties(hdfs_obj PROPERTIES POSITION_INDEPENDENT_CODE ON)
50+
hadoop_add_dual_library(hdfs
51+
$<TARGET_OBJECTS:hdfs_obj>
4252
$<TARGET_OBJECTS:x_platform_obj>
4353
$<TARGET_OBJECTS:x_platform_obj_c_api>
4454
)

hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/CMakeLists.txt

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,19 @@ add_executable(uri_test uri_test.cc)
7474
target_link_libraries(uri_test common gmock_main ${CMAKE_THREAD_LIBS_INIT})
7575
add_memcheck_test(uri uri_test)
7676

77+
# We want to link to all the libraries of hdfs_static library,
78+
# except jvm.lib since we want to override some of the functions
79+
# provided by jvm.lib.
80+
get_target_property(HDFS_STATIC_LIBS_NO_JVM hdfs_static LINK_LIBRARIES)
81+
list(REMOVE_ITEM HDFS_STATIC_LIBS_NO_JVM ${JAVA_JVM_LIBRARY})
7782
add_executable(get_jni_test libhdfs_getjni_test.cc)
78-
target_link_libraries(get_jni_test gmock_main hdfs_static ${CMAKE_THREAD_LIBS_INIT})
83+
target_link_libraries(get_jni_test
84+
gmock_main
85+
$<TARGET_OBJECTS:hdfs_obj>
86+
$<TARGET_OBJECTS:x_platform_obj>
87+
$<TARGET_OBJECTS:x_platform_obj_c_api>
88+
${HDFS_STATIC_LIBS_NO_JVM}
89+
${CMAKE_THREAD_LIBS_INIT})
7990
add_memcheck_test(get_jni get_jni_test)
8091

8192
add_executable(remote_block_reader_test remote_block_reader_test.cc)

hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/libhdfspp/tests/libhdfs_getjni_test.cc

Lines changed: 15 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -20,13 +20,26 @@
2020
#include <hdfs/hdfs.h>
2121
#include <jni.h>
2222

23+
#ifdef WIN32
24+
#define DECLSPEC
25+
#else
26+
// Windows cribs when this is declared in the function definition,
27+
// However, Linux needs it.
28+
#define DECLSPEC _JNI_IMPORT_OR_EXPORT_
29+
#endif
30+
31+
// hook the jvm runtime function. expect always failure
32+
DECLSPEC jint JNICALL JNI_GetDefaultJavaVMInitArgs(void*) {
33+
return 1;
34+
}
35+
2336
// hook the jvm runtime function. expect always failure
24-
_JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void*) {
37+
DECLSPEC jint JNICALL JNI_CreateJavaVM(JavaVM**, void**, void*) {
2538
return 1;
2639
}
2740

2841
// hook the jvm runtime function. expect always failure
29-
_JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_CreateJavaVM(JavaVM**, void**, void*) {
42+
DECLSPEC jint JNICALL JNI_GetCreatedJavaVMs(JavaVM**, jsize, jsize*) {
3043
return 1;
3144
}
3245

hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
import java.util.Collections;
2626
import java.util.HashMap;
2727
import java.util.Map;
28+
import java.util.Optional;
2829
import java.util.concurrent.ExecutionException;
2930
import java.util.concurrent.TimeUnit;
3031
import java.util.function.Function;
@@ -476,6 +477,7 @@ private String getNodesImpl(final DatanodeReportType type) {
476477
innerinfo.put("infoSecureAddr", node.getInfoSecureAddr());
477478
innerinfo.put("xferaddr", node.getXferAddr());
478479
innerinfo.put("location", node.getNetworkLocation());
480+
innerinfo.put("uuid", Optional.ofNullable(node.getDatanodeUuid()).orElse(""));
479481
innerinfo.put("lastContact", getLastContact(node));
480482
innerinfo.put("usedSpace", node.getDfsUsed());
481483
innerinfo.put("adminState", node.getAdminState().toString());
@@ -492,6 +494,7 @@ private String getNodesImpl(final DatanodeReportType type) {
492494
innerinfo.put("volfails", -1); // node.getVolumeFailures()
493495
innerinfo.put("blockPoolUsedPercentStdDev",
494496
Util.getBlockPoolUsedPercentStdDev(storageReports));
497+
innerinfo.put("lastBlockReport", getLastBlockReport(node));
495498
info.put(node.getXferAddrWithHostname(),
496499
Collections.unmodifiableMap(innerinfo));
497500
}
@@ -795,6 +798,10 @@ private long getLastContact(DatanodeInfo node) {
795798
return (now() - node.getLastUpdate()) / 1000;
796799
}
797800

801+
private long getLastBlockReport(DatanodeInfo node) {
802+
return (now() - node.getLastBlockReportTime()) / 60000;
803+
}
804+
798805
/////////////////////////////////////////////////////////
799806
// NameNodeStatusMXBean
800807
/////////////////////////////////////////////////////////

hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -135,6 +135,8 @@
135135
import org.apache.hadoop.service.Service.STATE;
136136
import org.apache.hadoop.test.GenericTestUtils;
137137
import org.apache.hadoop.test.LambdaTestUtils;
138+
139+
import org.codehaus.jettison.json.JSONArray;
138140
import org.codehaus.jettison.json.JSONException;
139141
import org.codehaus.jettison.json.JSONObject;
140142
import org.junit.After;
@@ -1880,6 +1882,22 @@ public void testNamenodeMetrics() throws Exception {
18801882
JSONObject jsonObject = new JSONObject(jsonString0);
18811883
assertEquals(NUM_SUBCLUSTERS * NUM_DNS, jsonObject.names().length());
18821884

1885+
JSONObject jsonObjectNn =
1886+
new JSONObject(cluster.getRandomNamenode().getNamenode().getNamesystem().getLiveNodes());
1887+
// DN report by NN and router should be the same
1888+
String randomDn = (String) jsonObjectNn.names().get(0);
1889+
JSONObject randomReportNn = jsonObjectNn.getJSONObject(randomDn);
1890+
JSONObject randomReportRouter = jsonObject.getJSONObject(randomDn);
1891+
JSONArray keys = randomReportNn.names();
1892+
for (int i = 0; i < keys.length(); i++) {
1893+
String key = keys.getString(i);
1894+
// Skip the 2 keys that always return -1
1895+
if (key.equals("blockScheduled") || key.equals("volfails")) {
1896+
continue;
1897+
}
1898+
assertEquals(randomReportRouter.get(key), randomReportNn.get(key));
1899+
}
1900+
18831901
// We should be caching this information
18841902
String jsonString1 = metrics.getLiveNodes();
18851903
assertEquals(jsonString0, jsonString1);

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,15 +80,16 @@ protected StringBuilder initialValue() {
8080
private static final BlockPlacementStatus ONE_RACK_PLACEMENT =
8181
new BlockPlacementStatusDefault(1, 1, 1);
8282

83-
private enum NodeNotChosenReason {
83+
protected enum NodeNotChosenReason {
8484
NOT_IN_SERVICE("the node is not in service"),
8585
NODE_STALE("the node is stale"),
8686
NODE_TOO_BUSY("the node is too busy"),
8787
NODE_TOO_BUSY_BY_VOLUME("the node is too busy based on volume load"),
8888
TOO_MANY_NODES_ON_RACK("the rack has too many chosen nodes"),
8989
NOT_ENOUGH_STORAGE_SPACE("not enough storage space to place the block"),
9090
NO_REQUIRED_STORAGE_TYPE("required storage types are unavailable"),
91-
NODE_SLOW("the node is too slow");
91+
NODE_SLOW("the node is too slow"),
92+
NODE_NOT_CONFORM_TO_UD("the node doesn't conform to upgrade domain policy");
9293

9394
private final String text;
9495

@@ -980,7 +981,7 @@ private static void logNodeIsNotChosen(DatanodeDescriptor node,
980981
logNodeIsNotChosen(node, reason, null);
981982
}
982983

983-
private static void logNodeIsNotChosen(DatanodeDescriptor node,
984+
protected static void logNodeIsNotChosen(DatanodeDescriptor node,
984985
NodeNotChosenReason reason, String reasonDetails) {
985986
assert reason != null;
986987
if (LOG.isDebugEnabled()) {

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithUpgradeDomain.java

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,9 @@ protected boolean isGoodDatanode(DatanodeDescriptor node,
7474
Set<String> upgradeDomains = getUpgradeDomains(results);
7575
if (upgradeDomains.contains(node.getUpgradeDomain())) {
7676
isGoodTarget = false;
77+
logNodeIsNotChosen(node, NodeNotChosenReason.NODE_NOT_CONFORM_TO_UD,
78+
"(The node's upgrade domain: " + node.getUpgradeDomain() +
79+
" is already chosen)");
7780
}
7881
}
7982
}

hadoop-project/pom.xml

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2754,16 +2754,6 @@
27542754
</dependencies>
27552755
</dependencyManagement>
27562756
</profile>
2757-
<!-- We added this profile to support compilation for JDK 9 and above. -->
2758-
<profile>
2759-
<id>java9</id>
2760-
<activation>
2761-
<jdk>[9,)</jdk>
2762-
</activation>
2763-
<properties>
2764-
<maven.compiler.release>${javac.version}</maven.compiler.release>
2765-
</properties>
2766-
</profile>
27672757
</profiles>
27682758

27692759
<repositories>

hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/CopyFromLocalOperation.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ public CopyFromLocalOperation(
130130
this.callbacks = callbacks;
131131
this.deleteSource = deleteSource;
132132
this.overwrite = overwrite;
133-
this.source = source;
133+
this.source = source.toUri().getScheme() == null ? new Path("file://", source) : source;
134134
this.destination = destination;
135135

136136
// Capacity of 1 is a safe default for now since transfer manager can also

hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/ITestS3ACopyFromLocalFile.java

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
package org.apache.hadoop.fs.s3a;
2020

2121
import java.io.File;
22+
import java.io.IOException;
2223
import java.util.Arrays;
2324
import java.util.Collection;
2425

@@ -107,4 +108,15 @@ public void testOnlyFromLocal() throws Throwable {
107108
intercept(IllegalArgumentException.class,
108109
() -> getFileSystem().copyFromLocalFile(true, true, dest, dest));
109110
}
111+
112+
@Test
113+
public void testCopyFromLocalWithNoFileScheme() throws IOException {
114+
describe("Copying from local file with no file scheme to remote s3 destination");
115+
File source = createTempFile("tempData");
116+
Path dest = path(getMethodName());
117+
118+
Path sourcePathWithOutScheme = new Path(source.toURI().getPath());
119+
assertNull(sourcePathWithOutScheme.toUri().getScheme());
120+
getFileSystem().copyFromLocalFile(true, true, sourcePathWithOutScheme, dest);
121+
}
110122
}

0 commit comments

Comments
 (0)