From 3816597de7c3b4d05cc6f058fd6f510964e88c78 Mon Sep 17 00:00:00 2001 From: "zengqiang.xu" Date: Fri, 5 Aug 2022 23:29:27 +0800 Subject: [PATCH] HDFS-16720. Import detailed class instead of import * in hadoop-hdfs moulde --- .../BlockPlacementPolicyDefault.java | 12 ++++- ...BlockPlacementPolicyRackFaultTolerant.java | 8 ++- .../BlockPlacementPolicyWithNodeGroup.java | 11 +++- .../blockmanagement/DatanodeManager.java | 12 ++++- .../server/datanode/BlockPoolManager.java | 8 ++- .../hdfs/server/datanode/FileIoProvider.java | 14 ++++- .../checker/StorageLocationChecker.java | 10 +++- .../impl/RamDiskReplicaLruTracker.java | 6 ++- .../hadoop/hdfs/server/mover/Mover.java | 22 +++++++- .../hdfs/server/namenode/ImageServlet.java | 16 +++++- .../server/namenode/SecondaryNameNode.java | 6 ++- .../namenode/ha/StandbyCheckpointer.java | 9 +++- .../DirectoryWithSnapshotFeature.java | 8 ++- .../web/resources/NamenodeWebHdfsMethods.java | 53 ++++++++++++++++++- 14 files changed, 177 insertions(+), 18 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 39a40f52ae5f7..d2077798cc1dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -23,7 +23,17 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_CONSIDERLOADBYSTORAGETYPE_KEY; import static org.apache.hadoop.util.Time.monotonicNow; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.EnumMap; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import org.apache.hadoop.util.Preconditions; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java index a3b3f482e8c23..714dbdb1ed373 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java @@ -23,7 +23,13 @@ import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; -import java.util.*; +import java.util.Collection; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; /** * The class is responsible for choosing the desired number of targets diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index 39f15191534d1..bf639f2fbc75d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import java.util.*; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -27,6 +25,15 @@ import org.apache.hadoop.net.Node; import org.apache.hadoop.net.NodeBase; +import java.util.ArrayList; +import java.util.Collection; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + /** The class is responsible for choosing the desired number of targets * for placing block replicas on environment with node-group layer. * The replica placement strategy is adjusted to: diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 237daed0960ff..9e7b527dbde8e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -68,7 +68,17 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index 073576546c790..76a8302de1a06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -20,7 +20,13 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; -import java.util.*; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java index 552f5199f1d3a..fdc4eeec43215 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FileIoProvider.java @@ -53,7 +53,19 @@ import java.nio.file.Path; import java.util.List; -import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.*; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.OPEN; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.EXISTS; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.LIST; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.DELETE; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.MOVE; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.MKDIRS; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.TRANSFER; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.SYNC; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.FADVISE; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.READ; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.WRITE; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.FLUSH; +import static org.apache.hadoop.hdfs.server.datanode.FileIoProvider.OPERATION.NATIVE_COPY; /** * This class abstracts out various file IO operations performed by the diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java index 3d49dd5362864..29d22727f09e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/StorageLocationChecker.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdfs.server.datanode.checker; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; - import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -55,6 +53,14 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_PERMISSION_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY; + /** * A utility class that encapsulates checking storage locations during DataNode * startup. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java index aebedaab0ef8d..d0b8b646c81f8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaLruTracker.java @@ -25,7 +25,11 @@ import org.apache.hadoop.util.Time; import java.io.File; -import java.util.*; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; +import java.util.Queue; /** * An implementation of RamDiskReplicaTracker that uses an LRU diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index c8c43967dd0f8..e0965df61d09f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -17,7 +17,14 @@ */ package org.apache.hadoop.hdfs.server.mover; -import org.apache.commons.cli.*; +import org.apache.commons.cli.CommandLine; +import org.apache.commons.cli.CommandLineParser; +import org.apache.commons.cli.GnuParser; +import org.apache.commons.cli.Option; +import org.apache.commons.cli.OptionBuilder; +import org.apache.commons.cli.OptionGroup; +import org.apache.commons.cli.Options; +import org.apache.commons.cli.ParseException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -67,7 +74,18 @@ import java.net.InetSocketAddress; import java.net.URI; import java.text.DateFormat; -import java.util.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.EnumMap; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index 442c1aba95b1c..b523c06cfd2ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -17,6 +17,20 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdfs.server.aliasmap.InMemoryAliasMap; import org.apache.hadoop.hdfs.server.common.Util; @@ -29,8 +43,6 @@ import java.net.HttpURLConnection; import java.security.PrivilegedExceptionAction; -import java.util.*; -import java.io.*; import javax.servlet.ServletContext; import javax.servlet.ServletException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index e95200b35aaa5..b85c8a33ca7e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -27,7 +27,11 @@ import java.net.URL; import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.List; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index ec848668d2561..ebe155e39d570 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -27,7 +27,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.*; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index 3893f42fde2de..1b7b530dc7a2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -30,7 +30,13 @@ import java.io.DataOutput; import java.io.IOException; -import java.util.*; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Deque; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.NO_SNAPSHOT_ID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index a3250c213caf0..9631e63a7bc11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -59,6 +59,58 @@ import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; +import org.apache.hadoop.hdfs.web.resources.AclPermissionParam; +import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; +import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; +import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam; +import org.apache.hadoop.hdfs.web.resources.CreateFlagParam; +import org.apache.hadoop.hdfs.web.resources.CreateParentParam; +import org.apache.hadoop.hdfs.web.resources.DelegationParam; +import org.apache.hadoop.hdfs.web.resources.DeleteOpParam; +import org.apache.hadoop.hdfs.web.resources.DestinationParam; +import org.apache.hadoop.hdfs.web.resources.DoAsParam; +import org.apache.hadoop.hdfs.web.resources.ECPolicyParam; +import org.apache.hadoop.hdfs.web.resources.ExcludeDatanodesParam; +import org.apache.hadoop.hdfs.web.resources.FsActionParam; +import org.apache.hadoop.hdfs.web.resources.GetOpParam; +import org.apache.hadoop.hdfs.web.resources.GroupParam; +import org.apache.hadoop.hdfs.web.resources.HttpOpParam; +import org.apache.hadoop.hdfs.web.resources.LengthParam; +import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam; +import org.apache.hadoop.hdfs.web.resources.NameSpaceQuotaParam; +import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam; +import org.apache.hadoop.hdfs.web.resources.NewLengthParam; +import org.apache.hadoop.hdfs.web.resources.NoRedirectParam; +import org.apache.hadoop.hdfs.web.resources.OffsetParam; +import org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam; +import org.apache.hadoop.hdfs.web.resources.OverwriteParam; +import org.apache.hadoop.hdfs.web.resources.OwnerParam; +import org.apache.hadoop.hdfs.web.resources.Param; +import org.apache.hadoop.hdfs.web.resources.PermissionParam; +import org.apache.hadoop.hdfs.web.resources.PostOpParam; +import org.apache.hadoop.hdfs.web.resources.PutOpParam; +import org.apache.hadoop.hdfs.web.resources.RecursiveParam; +import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam; +import org.apache.hadoop.hdfs.web.resources.RenewerParam; +import org.apache.hadoop.hdfs.web.resources.ReplicationParam; +import org.apache.hadoop.hdfs.web.resources.SnapshotDiffIndexParam; +import org.apache.hadoop.hdfs.web.resources.SnapshotDiffStartPathParam; +import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam; +import org.apache.hadoop.hdfs.web.resources.StartAfterParam; +import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam; +import org.apache.hadoop.hdfs.web.resources.StorageSpaceQuotaParam; +import org.apache.hadoop.hdfs.web.resources.StorageTypeParam; +import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; +import org.apache.hadoop.hdfs.web.resources.TokenKindParam; +import org.apache.hadoop.hdfs.web.resources.TokenServiceParam; +import org.apache.hadoop.hdfs.web.resources.UnmaskedPermissionParam; +import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; +import org.apache.hadoop.hdfs.web.resources.UserParam; +import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam; +import org.apache.hadoop.hdfs.web.resources.XAttrNameParam; +import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam; +import org.apache.hadoop.hdfs.web.resources.XAttrValueParam; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -103,7 +155,6 @@ import org.apache.hadoop.hdfs.web.ParamFilter; import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; -import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.http.JettyUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.ExternalCall;