diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 385c39b1f8b37..d9d9e1d440e7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1199,6 +1199,9 @@ Release 2.7.0 - UNRELEASED HDFS-7945. The WebHdfs system on DN does not honor the length parameter. (wheat9) + HDFS-7943. Append cannot handle the last block with length greater than + the preferred block size. (jing9) + BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS HDFS-7720. Quota by Storage Type API, tools and ClientNameNode diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 5ccd3eafb36c7..31a6af7b7bcf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -34,6 +34,16 @@ import static org.apache.hadoop.util.Time.now; +/** + * Restrictions for a concat operation: + *
+ * 1. the src file and the target file are in the same dir
+ * 2. all the source files are not in snapshot
+ * 3. any source file cannot be the same with the target file
+ * 4. source files cannot be under construction or empty
+ * 5. source file's preferred block size cannot be greater than the target file
+ * 
+ */ class FSDirConcatOp { static HdfsFileStatus concat(FSDirectory fsd, String target, String[] srcs, @@ -123,14 +133,25 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs, throw new SnapshotException("Concat: the source file " + src + " is referred by some other reference in some snapshot."); } + // source file cannot be the same with the target file if (srcINode == targetINode) { throw new HadoopIllegalArgumentException("concat: the src file " + src + " is the same with the target file " + targetIIP.getPath()); } + // source file cannot be under construction or empty if(srcINodeFile.isUnderConstruction() || srcINodeFile.numBlocks() == 0) { throw new HadoopIllegalArgumentException("concat: source file " + src + " is invalid or empty or underConstruction"); } + // source file's preferred block size cannot be greater than the target + // file + if (srcINodeFile.getPreferredBlockSize() > + targetINode.getPreferredBlockSize()) { + throw new HadoopIllegalArgumentException("concat: source file " + src + + " has preferred block size " + srcINodeFile.getPreferredBlockSize() + + " which is greater than the target file's preferred block size " + + targetINode.getPreferredBlockSize()); + } si.add(srcINodeFile); } @@ -143,9 +164,10 @@ private static INodeFile[] verifySrcFiles(FSDirectory fsd, String[] srcs, return si.toArray(new INodeFile[si.size()]); } - private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, INodeFile target, INodeFile[] srcList) { + private static QuotaCounts computeQuotaDeltas(FSDirectory fsd, + INodeFile target, INodeFile[] srcList) { QuotaCounts deltas = new QuotaCounts.Builder().build(); - short targetRepl = target.getBlockReplication(); + final short targetRepl = target.getBlockReplication(); for (INodeFile src : srcList) { short srcRepl = src.getBlockReplication(); long fileSize = src.computeFileSize(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java index ddf5a3e23d83b..e1c3c0f5f67db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHDFSConcat.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -388,6 +389,22 @@ public void testIllegalArg() throws IOException { } catch (Exception e) { // exspected } + + // the source file's preferred block size cannot be greater than the target + { + final Path src1 = new Path(parentDir, "src1"); + DFSTestUtil.createFile(dfs, src1, fileLen, REPL_FACTOR, 0L); + final Path src2 = new Path(parentDir, "src2"); + // create a file whose preferred block size is greater than the target + DFSTestUtil.createFile(dfs, src2, 1024, fileLen, + dfs.getDefaultBlockSize(trg) * 2, REPL_FACTOR, 0L); + try { + dfs.concat(trg, new Path[] {src1, src2}); + fail("didn't fail for src with greater preferred block size"); + } catch (Exception e) { + GenericTestUtils.assertExceptionContains("preferred block size", e); + } + } } /**