diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index c60acaa003150..ee0bf8a5fb165 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -80,8 +80,12 @@ private static void verifyQuotaForRename(FSDirectory fsd, INodesInPath src, // Assume dstParent existence check done by callers. INode dstParent = dst.getINode(-2); // Use the destination parent's storage policy for quota delta verify. + final boolean isSrcSetSp = src.getLastINode().isSetStoragePolicy(); + final byte storagePolicyID = isSrcSetSp ? + src.getLastINode().getLocalStoragePolicyID() : + dstParent.getStoragePolicyID(); final QuotaCounts delta = src.getLastINode() - .computeQuotaUsage(bsps, dstParent.getStoragePolicyID(), false, + .computeQuotaUsage(bsps, storagePolicyID, false, Snapshot.CURRENT_STATE_ID); // Reduce the required quota by dst that is being removed diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 7b902d5ff1b3d..fc17eaebf7f19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1363,9 +1363,13 @@ public INodesInPath addLastINode(INodesInPath existing, INode inode, // always verify inode name verifyINodeName(inode.getLocalNameBytes()); + final boolean isSrcSetSp = inode.isSetStoragePolicy(); + final byte storagePolicyID = isSrcSetSp ? + inode.getLocalStoragePolicyID() : + parent.getStoragePolicyID(); final QuotaCounts counts = inode .computeQuotaUsage(getBlockStoragePolicySuite(), - parent.getStoragePolicyID(), false, Snapshot.CURRENT_STATE_ID); + storagePolicyID, false, Snapshot.CURRENT_STATE_ID); updateCount(existing, pos, counts, checkQuota); boolean isRename = (inode.getParent() != null); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 03f01eb32eef1..8e417fe43aa5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -340,6 +340,16 @@ public boolean isFile() { return false; } + /** + * Check if this inode itself has a storage policy set. + */ + public boolean isSetStoragePolicy() { + if (isSymlink()) { + return false; + } + return getLocalStoragePolicyID() != HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; + } + /** Cast this inode to an {@link INodeFile}. */ public INodeFile asFile() { throw new IllegalStateException("Current inode is not a file: " diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 79088d3be8555..e14ea4dc26579 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.client.impl.LeaseRenewer; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.HdfsConstants; @@ -958,6 +959,44 @@ public void testQuotaByStorageType() throws Exception { 6 * fileSpace); } + @Test + public void testRenameInodeWithStorageType() throws IOException { + final int size = 64; + final short repl = 1; + final Path foo = new Path("/foo"); + final Path bs1 = new Path(foo, "bs1"); + final Path wow = new Path(bs1, "wow"); + final Path bs2 = new Path(foo, "bs2"); + final Path wow2 = new Path(bs2, "wow2"); + final Path wow3 = new Path(bs2, "wow3"); + + dfs.mkdirs(bs1, FsPermission.getDirDefault()); + dfs.mkdirs(bs2, FsPermission.getDirDefault()); + dfs.setQuota(bs1, 1000, 434217728); + dfs.setQuota(bs2, 1000, 434217728); + // file wow3 without storage policy + DFSTestUtil.createFile(dfs, wow3, size, repl, 0); + + dfs.setStoragePolicy(bs2, HdfsConstants.ONESSD_STORAGE_POLICY_NAME); + + DFSTestUtil.createFile(dfs, wow, size, repl, 0); + DFSTestUtil.createFile(dfs, wow2, size, repl, 0); + assertTrue("Without storage policy, typeConsumed should be 0.", + dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD) == 0); + assertTrue("With storage policy, typeConsumed should not be 0.", + dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) != 0); + // wow3 without storage policy , rename will not change typeConsumed + dfs.rename(wow3, bs1); + assertTrue("Rename src without storagePolicy, dst typeConsumed should not be changed.", + dfs.getQuotaUsage(bs2).getTypeConsumed(StorageType.SSD) == 0); + + long srcTypeQuota = dfs.getQuotaUsage(bs2).getTypeQuota(StorageType.SSD); + dfs.rename(bs2, bs1); + long dstTypeQuota = dfs.getQuotaUsage(bs1).getTypeConsumed(StorageType.SSD); + assertTrue("Rename with storage policy, typeConsumed should not be 0.", + dstTypeQuota != srcTypeQuota); + } + private static void checkContentSummary(final ContentSummary expected, final ContentSummary computed) { assertEquals(expected.toString(), computed.toString());