Skip to content

Commit

Permalink
HDFS-7943. Append cannot handle the last block with length greater th…
Browse files Browse the repository at this point in the history
…an the preferred block size. Contributed by Jing Zhao.
  • Loading branch information
Jing9 committed Mar 19, 2015
1 parent 8234fd0 commit bee5a6a
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 2 deletions.
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1199,6 +1199,9 @@ Release 2.7.0 - UNRELEASED
HDFS-7945. The WebHdfs system on DN does not honor the length parameter.
(wheat9)

HDFS-7943. Append cannot handle the last block with length greater than
the preferred block size. (jing9)

BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS

HDFS-7720. Quota by Storage Type API, tools and ClientNameNode
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,16 @@

import static org.apache.hadoop.util.Time.now;

/**
* Restrictions for a concat operation:
* <pre>
* 1. the src file and the target file are in the same dir
* 2. all the source files are not in snapshot
* 3. any source file cannot be the same with the target file
* 4. source files cannot be under construction or empty
* 5. source file's preferred block size cannot be greater than the target file
* </pre>
*/
class FSDirConcatOp {

static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs,
Expand Down Expand Up @@ -123,14 +133,25 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
throw new SnapshotException("Concat: the source file " + src
+ " is referred by some other reference in some snapshot.");
}
// source file cannot be the same with the target file
if (srcINode == targetINode) {
throw new HadoopIllegalArgumentException("concat: the src file " + src
+ " is the same with the target file " + targetIIP.getPath());
}
// source file cannot be under construction or empty
if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) {
throw new HadoopIllegalArgumentException("concat: source file " + src
+ " is invalid or empty or underConstruction");
}
// source file's preferred block size cannot be greater than the target
// file
if (srcINodeFile.getPreferredBlockSize() >
targetINode.getPreferredBlockSize()) {
throw new HadoopIllegalArgumentException("concat: source file " + src
+ " has preferred block size " + srcINodeFile.getPreferredBlockSize()
+ " which is greater than the target file's preferred block size "
+ targetINode.getPreferredBlockSize());
}
si.add(srcINodeFile);
}

Expand All @@ -143,9 +164,10 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs,
return si.toArray(new INodeFile[si.size()]);
}

private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) {
private static QuotaCounts computeQuotaDeltas(FSDirectory fsd,
INodeFile target, INodeFile[] srcList) {
QuotaCounts deltas = new QuotaCounts.Builder().build();
short targetRepl = target.getBlockReplication();
final short targetRepl = target.getBlockReplication();
for (INodeFile src : srcList) {
short srcRepl = src.getBlockReplication();
long fileSize = src.computeFileSize();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
Expand Down Expand Up @@ -388,6 +389,22 @@ public void testIllegalArg() throws IOException {
} catch (Exception e) {
// exspected
}

// the source file's preferred block size cannot be greater than the target
{
final Path src1 = new Path(parentDir, "src1");
DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L);
final Path src2 = new Path(parentDir, "src2");
// create a file whose preferred block size is greater than the target
DFSTestUtil.createFile(dfs, src2, 1024, fileLen,
dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L);
try {
dfs.concat(trg, new Path[] {src1, src2});
fail("didn't fail for src with greater preferred block size");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("preferred block size", e);
}
}
}

/**
Expand Down

0 comments on commit bee5a6a

Please sign in to comment.