Skip to content

Commit a16f922

Browse files
committed
HDFS-510. Rename DatanodeBlockInfo to be ReplicaInfo. Contributed by Jakob Homan & Hairong Kuang.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@799146 13f79535-47bb-0310-9956-ffa450edef68
1 parent 4c3cb04 commit a16f922

File tree

4 files changed

+33
-30
lines changed

4 files changed

+33
-30
lines changed

CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,9 @@ Trunk (unreleased changes)
5454

5555
HDFS-508. Factor out BlockInfo from BlocksMap. (shv)
5656

57+
HDFS-510. Rename DatanodeBlockInfo to be ReplicaInfo.
58+
(Jakob Homan & Hairong Kuang via shv)
59+
5760
BUG FIXES
5861
HDFS-76. Better error message to users when commands fail because of
5962
lack of quota. Allow quota to be set even if the limit is lower than

src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

Lines changed: 22 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ long getGenerationStampFromFile(File[] listdir, File blockFile) {
171171
return Block.GRANDFATHER_GENERATION_STAMP;
172172
}
173173

174-
void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap, FSVolume volume) {
174+
void getVolumeMap(HashMap<Block, ReplicaInfo> volumeMap, FSVolume volume) {
175175
if (children != null) {
176176
for (int i = 0; i < children.length; i++) {
177177
children[i].getVolumeMap(volumeMap, volume);
@@ -183,7 +183,7 @@ void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap, FSVolume volume)
183183
if (Block.isBlockFilename(blockFiles[i])) {
184184
long genStamp = getGenerationStampFromFile(blockFiles, blockFiles[i]);
185185
volumeMap.put(new Block(blockFiles[i], blockFiles[i].length(), genStamp),
186-
new DatanodeBlockInfo(volume, blockFiles[i]));
186+
new ReplicaInfo(volume, blockFiles[i]));
187187
}
188188
}
189189
}
@@ -403,7 +403,7 @@ void checkDirs() throws DiskErrorException {
403403
DiskChecker.checkDir(tmpDir);
404404
}
405405

406-
void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap) {
406+
void getVolumeMap(HashMap<Block, ReplicaInfo> volumeMap) {
407407
dataDir.getVolumeMap(volumeMap, this);
408408
}
409409

@@ -496,7 +496,7 @@ long getRemaining() throws IOException {
496496
return remaining;
497497
}
498498

499-
synchronized void getVolumeMap(HashMap<Block, DatanodeBlockInfo> volumeMap) {
499+
synchronized void getVolumeMap(HashMap<Block, ReplicaInfo> volumeMap) {
500500
for (int idx = 0; idx < volumes.length; idx++) {
501501
volumes[idx].getVolumeMap(volumeMap);
502502
}
@@ -653,7 +653,7 @@ public MetaDataInputStream getMetaDataInputStream(Block b)
653653
FSVolumeSet volumes;
654654
private HashMap<Block,ActiveFile> ongoingCreates = new HashMap<Block,ActiveFile>();
655655
private int maxBlocksPerDir = 0;
656-
HashMap<Block,DatanodeBlockInfo> volumeMap = null;
656+
HashMap<Block,ReplicaInfo> volumeMap = null;
657657
static Random random = new Random();
658658

659659
// Used for synchronizing access to usage stats
@@ -669,7 +669,7 @@ public FSDataset(DataStorage storage, Configuration conf) throws IOException {
669669
volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
670670
}
671671
volumes = new FSVolumeSet(volArray);
672-
volumeMap = new HashMap<Block, DatanodeBlockInfo>();
672+
volumeMap = new HashMap<Block, ReplicaInfo>();
673673
volumes.getVolumeMap(volumeMap);
674674
registerMBean(storage.getStorageID());
675675
}
@@ -742,7 +742,7 @@ public synchronized InputStream getBlockInputStream(Block b, long seekOffset) th
742742
public synchronized BlockInputStreams getTmpInputStreams(Block b,
743743
long blkOffset, long ckoff) throws IOException {
744744

745-
DatanodeBlockInfo info = volumeMap.get(b);
745+
ReplicaInfo info = volumeMap.get(b);
746746
if (info == null) {
747747
throw new IOException("Block " + b + " does not exist in volumeMap.");
748748
}
@@ -777,7 +777,7 @@ private BlockWriteStreams createBlockWriteStreams( File f , File metafile) throw
777777
* @return - true if the specified block was detached
778778
*/
779779
public boolean detachBlock(Block block, int numLinks) throws IOException {
780-
DatanodeBlockInfo info = null;
780+
ReplicaInfo info = null;
781781

782782
synchronized (this) {
783783
info = volumeMap.get(block);
@@ -1006,12 +1006,12 @@ public BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOExce
10061006
v = volumes.getNextVolume(blockSize);
10071007
// create temporary file to hold block in the designated volume
10081008
f = createTmpFile(v, b);
1009-
volumeMap.put(b, new DatanodeBlockInfo(v));
1009+
volumeMap.put(b, new ReplicaInfo(v));
10101010
} else if (f != null) {
10111011
DataNode.LOG.info("Reopen already-open Block for append " + b);
10121012
// create or reuse temporary file to hold block in the designated volume
10131013
v = volumeMap.get(b).getVolume();
1014-
volumeMap.put(b, new DatanodeBlockInfo(v));
1014+
volumeMap.put(b, new ReplicaInfo(v));
10151015
} else {
10161016
// reopening block for appending to it.
10171017
DataNode.LOG.info("Reopen Block for append " + b);
@@ -1042,7 +1042,7 @@ public BlockWriteStreams writeToBlock(Block b, boolean isRecovery) throws IOExce
10421042
" to tmp dir " + f);
10431043
}
10441044
}
1045-
volumeMap.put(b, new DatanodeBlockInfo(v));
1045+
volumeMap.put(b, new ReplicaInfo(v));
10461046
}
10471047
if (f == null) {
10481048
DataNode.LOG.warn("Block " + b + " reopen failed " +
@@ -1147,7 +1147,7 @@ public synchronized void finalizeBlock(Block b) throws IOException {
11471147

11481148
File dest = null;
11491149
dest = v.addBlock(b, f);
1150-
volumeMap.put(b, new DatanodeBlockInfo(v, dest));
1150+
volumeMap.put(b, new ReplicaInfo(v, dest));
11511151
ongoingCreates.remove(b);
11521152
}
11531153

@@ -1248,7 +1248,7 @@ File validateBlockFile(Block b) {
12481248

12491249
/** {@inheritDoc} */
12501250
public void validateBlockMetadata(Block b) throws IOException {
1251-
DatanodeBlockInfo info = volumeMap.get(b);
1251+
ReplicaInfo info = volumeMap.get(b);
12521252
if (info == null) {
12531253
throw new IOException("Block " + b + " does not exist in volumeMap.");
12541254
}
@@ -1306,7 +1306,7 @@ public void invalidate(Block invalidBlks[]) throws IOException {
13061306
FSVolume v;
13071307
synchronized (this) {
13081308
f = getFile(invalidBlks[i]);
1309-
DatanodeBlockInfo dinfo = volumeMap.get(invalidBlks[i]);
1309+
ReplicaInfo dinfo = volumeMap.get(invalidBlks[i]);
13101310
if (dinfo == null) {
13111311
DataNode.LOG.warn("Unexpected error trying to delete block "
13121312
+ invalidBlks[i] +
@@ -1369,7 +1369,7 @@ public void invalidate(Block invalidBlks[]) throws IOException {
13691369
* Turn the block identifier into a filename.
13701370
*/
13711371
public synchronized File getFile(Block b) {
1372-
DatanodeBlockInfo info = volumeMap.get(b);
1372+
ReplicaInfo info = volumeMap.get(b);
13731373
if (info != null) {
13741374
return info.getFile();
13751375
}
@@ -1448,8 +1448,8 @@ public String getStorageInfo() {
14481448
* generation stamp</li>
14491449
* <li>If the block length in memory does not match the actual block file length
14501450
* then mark the block as corrupt and update the block length in memory</li>
1451-
* <li>If the file in {@link DatanodeBlockInfo} does not match the file on
1452-
* the disk, update {@link DatanodeBlockInfo} with the correct file</li>
1451+
* <li>If the file in {@link ReplicaInfo} does not match the file on
1452+
* the disk, update {@link ReplicaInfo} with the correct file</li>
14531453
* </ul>
14541454
*
14551455
* @param blockId Block that differs
@@ -1472,7 +1472,7 @@ public void checkAndUpdate(long blockId, File diskFile,
14721472
Block.getGenerationStamp(diskMetaFile.getName()) :
14731473
Block.GRANDFATHER_GENERATION_STAMP;
14741474

1475-
DatanodeBlockInfo memBlockInfo = volumeMap.get(block);
1475+
ReplicaInfo memBlockInfo = volumeMap.get(block);
14761476
if (diskFile == null || !diskFile.exists()) {
14771477
if (memBlockInfo == null) {
14781478
// Block file does not exist and block does not exist in memory
@@ -1507,7 +1507,7 @@ public void checkAndUpdate(long blockId, File diskFile,
15071507
*/
15081508
if (memBlockInfo == null) {
15091509
// Block is missing in memory - add the block to volumeMap
1510-
DatanodeBlockInfo diskBlockInfo = new DatanodeBlockInfo(vol, diskFile);
1510+
ReplicaInfo diskBlockInfo = new ReplicaInfo(vol, diskFile);
15111511
Block diskBlock = new Block(diskFile, diskFile.length(), diskGS);
15121512
volumeMap.put(diskBlock, diskBlockInfo);
15131513
if (datanode.blockScanner != null) {
@@ -1540,7 +1540,7 @@ public void checkAndUpdate(long blockId, File diskFile,
15401540
+ memFile.getAbsolutePath()
15411541
+ " does not exist. Updating it to the file found during scan "
15421542
+ diskFile.getAbsolutePath());
1543-
DatanodeBlockInfo info = volumeMap.remove(memBlock);
1543+
ReplicaInfo info = volumeMap.remove(memBlock);
15441544
info.setFile(diskFile);
15451545
memFile = diskFile;
15461546

@@ -1571,7 +1571,7 @@ public void checkAndUpdate(long blockId, File diskFile,
15711571
DataNode.LOG.warn("Updating generation stamp for block " + blockId
15721572
+ " from " + memBlock.getGenerationStamp() + " to " + gs);
15731573

1574-
DatanodeBlockInfo info = volumeMap.remove(memBlock);
1574+
ReplicaInfo info = volumeMap.remove(memBlock);
15751575
memBlock.setGenerationStamp(gs);
15761576
volumeMap.put(memBlock, info);
15771577
}
@@ -1583,7 +1583,7 @@ public void checkAndUpdate(long blockId, File diskFile,
15831583
corruptBlock = new Block(memBlock);
15841584
DataNode.LOG.warn("Updating size of block " + blockId + " from "
15851585
+ memBlock.getNumBytes() + " to " + memFile.length());
1586-
DatanodeBlockInfo info = volumeMap.remove(memBlock);
1586+
ReplicaInfo info = volumeMap.remove(memBlock);
15871587
memBlock.setNumBytes(memFile.length());
15881588
volumeMap.put(memBlock, info);
15891589
}

src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -32,19 +32,19 @@
3232
* This class is used by the datanode to maintain the map from a block
3333
* to its metadata.
3434
*/
35-
class DatanodeBlockInfo {
35+
class ReplicaInfo {
3636

3737
private FSVolume volume; // volume where the block belongs
3838
private File file; // block file
3939
private boolean detached; // copy-on-write done for block
4040

41-
DatanodeBlockInfo(FSVolume vol, File file) {
41+
ReplicaInfo(FSVolume vol, File file) {
4242
this.volume = vol;
4343
this.file = file;
4444
detached = false;
4545
}
4646

47-
DatanodeBlockInfo(FSVolume vol) {
47+
ReplicaInfo(FSVolume vol) {
4848
this.volume = vol;
4949
this.file = null;
5050
detached = false;

src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ private void createFile(String fileName, long fileLen) throws IOException {
6767
/** Truncate a block file */
6868
private long truncateBlockFile() throws IOException {
6969
synchronized (fds) {
70-
for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
70+
for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
7171
Block b = entry.getKey();
7272
File f = entry.getValue().getFile();
7373
File mf = FSDataset.getMetaFile(f, b);
@@ -87,7 +87,7 @@ private long truncateBlockFile() throws IOException {
8787
/** Delete a block file */
8888
private long deleteBlockFile() {
8989
synchronized(fds) {
90-
for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
90+
for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
9191
Block b = entry.getKey();
9292
File f = entry.getValue().getFile();
9393
File mf = FSDataset.getMetaFile(f, b);
@@ -104,7 +104,7 @@ private long deleteBlockFile() {
104104
/** Delete block meta file */
105105
private long deleteMetaFile() {
106106
synchronized(fds) {
107-
for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
107+
for (Entry<Block, ReplicaInfo> entry : fds.volumeMap.entrySet()) {
108108
Block b = entry.getKey();
109109
String blkfile = entry.getValue().getFile().getAbsolutePath();
110110
long genStamp = b.getGenerationStamp();
@@ -126,7 +126,7 @@ private long getFreeBlockId() {
126126
while (true) {
127127
id = rand.nextLong();
128128
Block b = new Block(id);
129-
DatanodeBlockInfo info = null;
129+
ReplicaInfo info = null;
130130
synchronized(fds) {
131131
info = fds.volumeMap.get(b);
132132
}
@@ -326,7 +326,7 @@ public void test() throws Exception {
326326
private void verifyAddition(long blockId, long genStamp, long size) {
327327
Block memBlock = fds.getBlockKey(blockId);
328328
assertNotNull(memBlock);
329-
DatanodeBlockInfo blockInfo;
329+
ReplicaInfo blockInfo;
330330
synchronized(fds) {
331331
blockInfo = fds.volumeMap.get(memBlock);
332332
}

0 commit comments

Comments
 (0)