Skip to content

Commit c74ecd9

Browse files
committed
fix
1 parent 1a8e790 commit c74ecd9

File tree

3 files changed

+12
-4
lines changed

3 files changed

+12
-4
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2310,7 +2310,8 @@ BlockReconstructionWork scheduleReconstruction(BlockInfo block,
23102310
byte[] newIndices = new byte[liveBlockIndices.size()];
23112311
adjustSrcNodesAndIndices((BlockInfoStriped)block,
23122312
srcNodes, liveBlockIndices, newSrcNodes, newIndices);
2313-
byte[] liveAndDecommissioningBusyIndices = new byte[liveAndDecommissioningBusyBlockIndices.size()];
2313+
byte[] liveAndDecommissioningBusyIndices =
2314+
new byte[liveAndDecommissioningBusyBlockIndices.size()];
23142315
for (int i = 0; i < liveAndDecommissioningBusyBlockIndices.size(); i++) {
23152316
liveAndDecommissioningBusyIndices[i] = liveAndDecommissioningBusyBlockIndices.get(i);
23162317
}

hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1233,6 +1233,14 @@
12331233
</description>
12341234
</property>
12351235

1236+
<property>
1237+
<name>dfs.namenode.decommission.ec.reconstruction.enable</name>
1238+
<value>false</value>
1239+
<description>
1240+
Whether to use reconstruction to copy ec block when the related node is busy.
1241+
</description>
1242+
</property>
1243+
12361244
<property>
12371245
<name>dfs.namenode.redundancy.interval.seconds</name>
12381246
<value>3</value>

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommissionWithStriped.java

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,6 @@
6363
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
6464
import org.apache.hadoop.security.token.Token;
6565
import org.apache.hadoop.test.GenericTestUtils;
66-
import org.apache.hadoop.util.Lists;
6766
import org.junit.After;
6867
import org.junit.Assert;
6968
import org.junit.Before;
@@ -1215,7 +1214,7 @@ public void testDecommissionBusyNodeWithECReconstruction1() throws Exception {
12151214
assertTrue(newStorageInfos.size() >= 2);
12161215
DatanodeStorageInfo decommissionedNode = null;
12171216
int alive = 0;
1218-
for (int i = 0; i < newStorageInfos.size();i ++) {
1217+
for (int i = 0; i < newStorageInfos.size(); i++) {
12191218
DatanodeStorageInfo datanodeStorageInfo = newStorageInfos.get(i);
12201219
if (datanodeStorageInfo.getDatanodeDescriptor().isDecommissioned()) {
12211220
decommissionedNode = datanodeStorageInfo;
@@ -1286,7 +1285,7 @@ public void testDecommissionBusyNodeWithECReconstruction2() throws Exception {
12861285
assertTrue(newStorageInfos.size() >= 4);
12871286
int alive = 0;
12881287
int decommissioned = 0;
1289-
for (int i = 0; i < newStorageInfos.size();i ++) {
1288+
for (int i = 0; i < newStorageInfos.size(); i++) {
12901289
DatanodeStorageInfo newDatanodeStorageInfo = newStorageInfos.get(i);
12911290
if (newDatanodeStorageInfo.getDatanodeDescriptor().isDecommissioned()) {
12921291
assertTrue(newDatanodeStorageInfo.equals(storageInfos.get(0)) ||

0 commit comments

Comments
 (0)