Skip to content

Commit 68a40a1

Browse files
author
Dhruba Borthakur
committed
HDFS-532. Allow applications to know that a read request failed
because block is missing. (dhruba) git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@803973 13f79535-47bb-0310-9956-ffa450edef68
1 parent 5a86a7c commit 68a40a1

File tree

4 files changed

+229
-1
lines changed

4 files changed

+229
-1
lines changed

CHANGES.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,9 @@ Trunk (unreleased changes)
150150

151151
HDFS-534. Include avro in ivy. (szetszwo)
152152

153+
HDFS-532. Allow applications to know that a read request failed
154+
because block is missing. (dhruba)
155+
153156
Release 0.20.1 - Unreleased
154157

155158
IMPROVEMENTS
Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package org.apache.hadoop.hdfs;
20+
21+
import java.io.IOException;
22+
23+
/**
24+
* This exception is thrown when a read encounters a block that has no locations
25+
* associated with it.
26+
*/
27+
public class BlockMissingException extends IOException {
28+
29+
private static final long serialVersionUID = 1L;
30+
31+
private String filename;
32+
private long offset;
33+
34+
/**
35+
* An exception that indicates that file was corrupted.
36+
* @param filename name of corrupted file
37+
* @param description a description of the corruption details
38+
*/
39+
public BlockMissingException(String filename, String description, long offset) {
40+
super(description);
41+
this.filename = filename;
42+
this.offset = offset;
43+
}
44+
45+
/**
46+
* Returns the name of the corrupted file.
47+
* @return name of corrupted file
48+
*/
49+
public String getFile() {
50+
return filename;
51+
}
52+
53+
/**
54+
* Returns the offset at which this file is corrupted
55+
* @return offset of corrupted file
56+
*/
57+
public long getOffset() {
58+
return offset;
59+
}
60+
}

src/java/org/apache/hadoop/hdfs/DFSClient.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1906,7 +1906,8 @@ private DNAddrPair chooseDataNode(LocatedBlock block)
19061906
} catch (IOException ie) {
19071907
String blockInfo = block.getBlock() + " file=" + src;
19081908
if (failures >= maxBlockAcquireFailures) {
1909-
throw new IOException("Could not obtain block: " + blockInfo);
1909+
throw new BlockMissingException(src, "Could not obtain block: " + blockInfo,
1910+
block.getStartOffset());
19101911
}
19111912

19121913
if (nodes == null || nodes.length == 0) {
Lines changed: 164 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,164 @@
1+
/**
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
package org.apache.hadoop.hdfs;
19+
20+
import java.io.File;
21+
import java.io.IOException;
22+
import java.util.Properties;
23+
24+
import junit.framework.TestCase;
25+
import org.apache.commons.logging.Log;
26+
import org.apache.commons.logging.LogFactory;
27+
28+
import org.apache.hadoop.util.StringUtils;
29+
import org.apache.hadoop.fs.BlockLocation;
30+
import org.apache.hadoop.fs.FileSystem;
31+
import org.apache.hadoop.fs.Path;
32+
import org.apache.hadoop.fs.FSDataOutputStream;
33+
import org.apache.hadoop.fs.FSDataInputStream;
34+
import org.apache.hadoop.conf.Configuration;
35+
import org.apache.hadoop.hdfs.MiniDFSCluster;
36+
import org.apache.hadoop.hdfs.protocol.Block;
37+
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
38+
import org.apache.hadoop.hdfs.DistributedFileSystem;
39+
import org.apache.hadoop.hdfs.BlockMissingException;
40+
41+
public class TestBlockMissingException extends TestCase {
42+
final static Log LOG = LogFactory.getLog("org.apache.hadoop.hdfs.TestBlockMissing");
43+
final static int NUM_DATANODES = 3;
44+
45+
Configuration conf;
46+
MiniDFSCluster dfs = null;
47+
DistributedFileSystem fileSys = null;
48+
49+
/**
50+
* Test DFS Raid
51+
*/
52+
public void testBlockMissingException() throws Exception {
53+
LOG.info("Test testBlockMissingException started.");
54+
long blockSize = 1024L;
55+
int numBlocks = 4;
56+
conf = new Configuration();
57+
try {
58+
dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
59+
dfs.waitActive();
60+
fileSys = (DistributedFileSystem)dfs.getFileSystem();
61+
Path file1 = new Path("/user/dhruba/raidtest/file1");
62+
createOldFile(fileSys, file1, 1, numBlocks, blockSize);
63+
64+
// extract block locations from File system. Wait till file is closed.
65+
LocatedBlocks locations = null;
66+
locations = fileSys.dfs.getNamenode().getBlockLocations(file1.toString(),
67+
0, numBlocks * blockSize);
68+
// remove block of file
69+
LOG.info("Remove first block of file");
70+
corruptBlock(file1, locations.get(0).getBlock());
71+
72+
// validate that the system throws BlockMissingException
73+
validateFile(fileSys, file1);
74+
} finally {
75+
if (fileSys != null) fileSys.close();
76+
if (dfs != null) dfs.shutdown();
77+
}
78+
LOG.info("Test testBlockMissingException completed.");
79+
}
80+
81+
//
82+
// creates a file and populate it with data.
83+
//
84+
private void createOldFile(FileSystem fileSys, Path name, int repl, int numBlocks, long blocksize)
85+
throws IOException {
86+
FSDataOutputStream stm = fileSys.create(name, true,
87+
fileSys.getConf().getInt("io.file.buffer.size", 4096),
88+
(short)repl, blocksize);
89+
// fill data into file
90+
final byte[] b = new byte[(int)blocksize];
91+
for (int i = 0; i < numBlocks; i++) {
92+
stm.write(b);
93+
}
94+
stm.close();
95+
}
96+
97+
//
98+
// validates that file encounters BlockMissingException
99+
//
100+
private void validateFile(FileSystem fileSys, Path name)
101+
throws IOException {
102+
103+
FSDataInputStream stm = fileSys.open(name);
104+
final byte[] b = new byte[4192];
105+
int num = 0;
106+
boolean gotException = false;
107+
108+
try {
109+
while (num >= 0) {
110+
num = stm.read(b);
111+
if (num < 0) {
112+
break;
113+
}
114+
}
115+
} catch (BlockMissingException e) {
116+
gotException = true;
117+
}
118+
stm.close();
119+
assertTrue("Expected BlockMissingException ", gotException);
120+
}
121+
122+
/*
123+
* The Data directories for a datanode
124+
*/
125+
private File[] getDataNodeDirs(int i) throws IOException {
126+
File base_dir = new File(System.getProperty("test.build.data"), "dfs/");
127+
File data_dir = new File(base_dir, "data");
128+
File dir1 = new File(data_dir, "data"+(2*i+1));
129+
File dir2 = new File(data_dir, "data"+(2*i+2));
130+
if (dir1.isDirectory() && dir2.isDirectory()) {
131+
File[] dir = new File[2];
132+
dir[0] = new File(dir1, "current");
133+
dir[1] = new File(dir2, "current");
134+
return dir;
135+
}
136+
return new File[0];
137+
}
138+
139+
//
140+
// Corrupt specified block of file
141+
//
142+
void corruptBlock(Path file, Block blockNum) throws IOException {
143+
long id = blockNum.getBlockId();
144+
145+
// Now deliberately remove/truncate data blocks from the block.
146+
//
147+
for (int i = 0; i < NUM_DATANODES; i++) {
148+
File[] dirs = getDataNodeDirs(i);
149+
150+
for (int j = 0; j < dirs.length; j++) {
151+
File[] blocks = dirs[j].listFiles();
152+
assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length >= 0));
153+
for (int idx = 0; idx < blocks.length; idx++) {
154+
if (blocks[idx].getName().startsWith("blk_" + id) &&
155+
!blocks[idx].getName().endsWith(".meta")) {
156+
blocks[idx].delete();
157+
LOG.info("Deleted block " + blocks[idx]);
158+
}
159+
}
160+
}
161+
}
162+
}
163+
164+
}

0 commit comments

Comments
 (0)