Skip to content

Commit 719b53a

Browse files
author
Inigo Goiri
committed
HDFS-15351. Blocks scheduled count was wrong on truncate. Contributed by hemanthboyina.
1 parent 785b1de commit 719b53a

File tree

2 files changed

+64
-1
lines changed

2 files changed

+64
-1
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1287,7 +1287,14 @@ public LocatedBlock convertLastBlockToUnderConstruction(
12871287
neededReconstruction.remove(lastBlock, replicas.liveReplicas(),
12881288
replicas.readOnlyReplicas(),
12891289
replicas.outOfServiceReplicas(), getExpectedRedundancyNum(lastBlock));
1290-
pendingReconstruction.remove(lastBlock);
1290+
PendingBlockInfo remove = pendingReconstruction.remove(lastBlock);
1291+
if (remove != null) {
1292+
List<DatanodeStorageInfo> locations = remove.getTargets();
1293+
DatanodeStorageInfo[] removedBlockTargets =
1294+
new DatanodeStorageInfo[locations.size()];
1295+
locations.toArray(removedBlockTargets);
1296+
DatanodeStorageInfo.decrementBlocksScheduled(removedBlockTargets);
1297+
}
12911298

12921299
// remove this block from the list of pending blocks to be deleted.
12931300
for (DatanodeStorageInfo storage : targets) {

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,4 +202,60 @@ public void testScheduledBlocksCounterDecrementOnDeletedBlock()
202202
}
203203
}
204204

205+
/**
206+
* Test Block Scheduled counter on truncating a file.
207+
* @throws Exception
208+
*/
209+
@Test
210+
public void testBlocksScheduledCounterOnTruncate() throws Exception {
211+
final Configuration conf = new HdfsConfiguration();
212+
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
213+
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
214+
cluster.waitActive();
215+
BlockManager bm = cluster.getNamesystem().getBlockManager();
216+
try {
217+
DistributedFileSystem dfs = cluster.getFileSystem();
218+
// 1. stop a datanode
219+
cluster.stopDataNode(0);
220+
221+
// 2. create a file
222+
Path filePath = new Path("/tmp");
223+
DFSTestUtil.createFile(dfs, filePath, 1024, (short) 3, 0L);
224+
225+
DatanodeManager datanodeManager =
226+
cluster.getNamesystem().getBlockManager().getDatanodeManager();
227+
ArrayList<DatanodeDescriptor> dnList =
228+
new ArrayList<DatanodeDescriptor>();
229+
datanodeManager.fetchDatanodes(dnList, dnList, false);
230+
231+
// 3. restart the stopped datanode
232+
cluster.restartDataNode(0);
233+
234+
// 4. disable the heartbeats
235+
for (DataNode dn : cluster.getDataNodes()) {
236+
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
237+
}
238+
239+
cluster.getNamesystem().writeLock();
240+
try {
241+
BlockManagerTestUtil.computeAllPendingWork(bm);
242+
BlockManagerTestUtil.updateState(bm);
243+
assertEquals(1L, bm.getPendingReconstructionBlocksCount());
244+
} finally {
245+
cluster.getNamesystem().writeUnlock();
246+
}
247+
248+
// 5.truncate the file whose block exists in pending reconstruction
249+
dfs.truncate(filePath, 10);
250+
int blocksScheduled = 0;
251+
for (DatanodeDescriptor descriptor : dnList) {
252+
if (descriptor.getBlocksScheduled() != 0) {
253+
blocksScheduled += descriptor.getBlocksScheduled();
254+
}
255+
}
256+
assertEquals(0, blocksScheduled);
257+
} finally {
258+
cluster.shutdown();
259+
}
260+
}
205261
}

0 commit comments

Comments
 (0)