Skip to content

Commit 5c6be25

Browse files
committed
HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches in BlockManager. Contributed by Konstantin Shvachko.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@801500 13f79535-47bb-0310-9956-ffa450edef68
1 parent a51727d commit 5c6be25

File tree

4 files changed

+30
-12
lines changed

4 files changed

+30
-12
lines changed

CHANGES.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,11 @@ Trunk (unreleased changes)
8383

8484
HDFS-527. Remove/deprecate unnecessary DFSClient constructors. (szetszwo)
8585

86+
HDFS-529. Use BlockInfo instead of Block to avoid redundant block searches
87+
in BlockManager. (shv)
88+
8689
BUG FIXES
90+
8791
HDFS-76. Better error message to users when commands fail because of
8892
lack of quota. Allow quota to be set even if the limit is lower than
8993
current consumption. (Boris Shkolnik via rangadi)

src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -893,11 +893,11 @@ private Block addStoredBlock(final Block block,
893893
} else {
894894
// new replica is larger in size than existing block.
895895
// Mark pre-existing replicas as corrupt.
896-
int numNodes = blocksMap.numNodes(block);
896+
int numNodes = storedBlock.numNodes();
897897
int count = 0;
898898
DatanodeDescriptor nodes[] = new DatanodeDescriptor[numNodes];
899-
Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
900-
for (; it != null && it.hasNext(); ) {
899+
Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(storedBlock);
900+
while (it.hasNext()) {
901901
DatanodeDescriptor dd = it.next();
902902
if (!dd.equals(node)) {
903903
nodes[count++] = dd;
@@ -1262,9 +1262,9 @@ int getActiveBlockCount() {
12621262
return blocksMap.size() - (int)pendingDeletionBlocksCount;
12631263
}
12641264

1265-
DatanodeDescriptor[] getNodes(Block block) {
1265+
DatanodeDescriptor[] getNodes(BlockInfo block) {
12661266
DatanodeDescriptor[] nodes =
1267-
new DatanodeDescriptor[blocksMap.numNodes(block)];
1267+
new DatanodeDescriptor[block.numNodes()];
12681268
Iterator<DatanodeDescriptor> it = blocksMap.nodeIterator(block);
12691269
for (int i = 0; it != null && it.hasNext(); i++) {
12701270
nodes[i] = it.next();

src/java/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,20 @@ BlockInfo getStoredBlock(Block b) {
108108
return map.get(b);
109109
}
110110

111-
/** Returned Iterator does not support. */
111+
/**
112+
* Searches for the block in the BlocksMap and
113+
* returns Iterator that iterates through the nodes the block belongs to.
114+
*/
112115
Iterator<DatanodeDescriptor> nodeIterator(Block b) {
113-
return new NodeIterator(map.get(b));
116+
return nodeIterator(map.get(b));
117+
}
118+
119+
/**
120+
* For a block that has already been retrieved from the BlocksMap
121+
* returns Iterator that iterates through the nodes the block belongs to.
122+
*/
123+
Iterator<DatanodeDescriptor> nodeIterator(BlockInfo storedBlock) {
124+
return new NodeIterator(storedBlock);
114125
}
115126

116127
/** counts number of containing nodes. Better than using iterator. */

src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1030,13 +1030,16 @@ LocatedBlock appendFile(String src, String holder, String clientMachine
10301030
synchronized (this) {
10311031
INodeFileUnderConstruction file = (INodeFileUnderConstruction)dir.getFileINode(src);
10321032

1033-
Block[] blocks = file.getBlocks();
1033+
BlockInfo[] blocks = file.getBlocks();
10341034
if (blocks != null && blocks.length > 0) {
1035-
Block last = blocks[blocks.length-1];
1035+
BlockInfo last = blocks[blocks.length-1];
1036+
// this is a redundant search in blocksMap
1037+
// should be resolved by the new implementation of append
10361038
BlockInfo storedBlock = blockManager.getStoredBlock(last);
1039+
assert last == storedBlock : "last block should be in the blocksMap";
10371040
if (file.getPreferredBlockSize() > storedBlock.getNumBytes()) {
10381041
long fileLength = file.computeContentSummary().getLength();
1039-
DatanodeDescriptor[] targets = blockManager.getNodes(last);
1042+
DatanodeDescriptor[] targets = blockManager.getNodes(storedBlock);
10401043
// remove the replica locations of this block from the node
10411044
for (int i = 0; i < targets.length; i++) {
10421045
targets[i].removeBlock(storedBlock);
@@ -1578,8 +1581,8 @@ void internalReleaseLease(Lease lease, String src) throws IOException {
15781581
}
15791582
// setup the Inode.targets for the last block from the blockManager
15801583
//
1581-
Block[] blocks = pendingFile.getBlocks();
1582-
Block last = blocks[blocks.length-1];
1584+
BlockInfo[] blocks = pendingFile.getBlocks();
1585+
BlockInfo last = blocks[blocks.length-1];
15831586
DatanodeDescriptor[] targets = blockManager.getNodes(last);
15841587
pendingFile.setTargets(targets);
15851588
}

0 commit comments

Comments
 (0)