Skip to content

Commit 16ca8b7

Browse files
amahusseinjbrennan333
authored andcommitted
HDFS-15717. Improve fsck logging. (#2529) Contributed by Kihwal Lee and Ahmed Hussein
(cherry picked from commit be35fa1)
1 parent 71bda1a commit 16ca8b7

File tree

4 files changed

+41
-25
lines changed

4 files changed

+41
-25
lines changed

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6251,13 +6251,19 @@ boolean isExternalInvocation() {
62516251
private static UserGroupInformation getRemoteUser() throws IOException {
62526252
return NameNode.getRemoteUser();
62536253
}
6254-
6254+
62556255
/**
6256-
* Log fsck event in the audit log
6256+
* Log fsck event in the audit log.
6257+
*
6258+
* @param succeeded Whether authorization succeeded.
6259+
* @param src Path of affected source file.
6260+
* @param remoteAddress Remote address of the request.
6261+
* @throws IOException if {@link #getRemoteUser()} fails.
62576262
*/
6258-
void logFsckEvent(String src, InetAddress remoteAddress) throws IOException {
6263+
void logFsckEvent(boolean succeeded, String src, InetAddress remoteAddress)
6264+
throws IOException {
62596265
if (isAuditEnabled()) {
6260-
logAuditEvent(true, getRemoteUser(),
6266+
logAuditEvent(succeeded, getRemoteUser(),
62616267
remoteAddress,
62626268
"fsck", src, null, null);
62636269
}

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

Lines changed: 18 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -55,21 +55,25 @@ public void doGet(HttpServletRequest request, HttpServletResponse response
5555

5656
final UserGroupInformation ugi = getUGI(request, conf);
5757
try {
58-
ugi.doAs(new PrivilegedExceptionAction<Object>() {
59-
@Override
60-
public Object run() throws Exception {
61-
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
62-
63-
final FSNamesystem namesystem = nn.getNamesystem();
64-
final BlockManager bm = namesystem.getBlockManager();
65-
final int totalDatanodes =
66-
namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
67-
new NamenodeFsck(conf, nn,
68-
bm.getDatanodeManager().getNetworkTopology(), pmap, out,
69-
totalDatanodes, remoteAddress).fsck();
70-
71-
return null;
58+
ugi.doAs((PrivilegedExceptionAction<Object>) () -> {
59+
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
60+
61+
final FSNamesystem namesystem = nn.getNamesystem();
62+
final BlockManager bm = namesystem.getBlockManager();
63+
final int totalDatanodes =
64+
namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
65+
NamenodeFsck fsck = new NamenodeFsck(conf, nn,
66+
bm.getDatanodeManager().getNetworkTopology(), pmap, out,
67+
totalDatanodes, remoteAddress);
68+
String auditSource = fsck.getAuditSource();
69+
boolean success = false;
70+
try {
71+
fsck.fsck();
72+
success = true;
73+
} finally {
74+
namesystem.logFsckEvent(success, auditSource, remoteAddress);
7275
}
76+
return null;
7377
});
7478
} catch (InterruptedException e) {
7579
response.sendError(400, e.getMessage());

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -155,6 +155,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
155155
private boolean showMaintenanceState = false;
156156
private long staleInterval;
157157
private Tracer tracer;
158+
private String auditSource;
158159

159160
/**
160161
* True if we encountered an internal error during FSCK, such as not being
@@ -186,7 +187,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
186187

187188
String path = "/";
188189

189-
private String blockIds = null;
190+
private String[] blockIds = null;
190191

191192
// We return back N files that are corrupt; the list of files returned is
192193
// ordered by block id; to allow continuation support, pass in the last block
@@ -262,11 +263,17 @@ else if (key.equals("replicadetails")) {
262263
} else if (key.equals("includeSnapshots")) {
263264
this.snapshottableDirs = new ArrayList<String>();
264265
} else if (key.equals("blockId")) {
265-
this.blockIds = pmap.get("blockId")[0];
266+
this.blockIds = pmap.get("blockId")[0].split(" ");
266267
} else if (key.equals("replicate")) {
267268
this.doReplicate = true;
268269
}
269270
}
271+
this.auditSource = (blockIds != null)
272+
? "blocksIds=" + Arrays.asList(blockIds) : path;
273+
}
274+
275+
public String getAuditSource() {
276+
return auditSource;
270277
}
271278

272279
/**
@@ -368,18 +375,18 @@ private void printDatanodeReplicaStatus(Block block,
368375
/**
369376
* Check files on DFS, starting from the indicated path.
370377
*/
371-
public void fsck() {
378+
public void fsck() throws AccessControlException {
372379
final long startTime = Time.monotonicNow();
373380
try {
374381
if(blockIds != null) {
375-
String[] blocks = blockIds.split(" ");
382+
namenode.getNamesystem().checkSuperuserPrivilege();
376383
StringBuilder sb = new StringBuilder();
377384
sb.append("FSCK started by " +
378385
UserGroupInformation.getCurrentUser() + " from " +
379386
remoteAddress + " at " + new Date());
380387
out.println(sb);
381388
sb.append(" for blockIds: \n");
382-
for (String blk: blocks) {
389+
for (String blk: blockIds) {
383390
if(blk == null || !blk.contains(Block.BLOCK_FILE_PREFIX)) {
384391
out.println("Incorrect blockId format: " + blk);
385392
continue;
@@ -389,7 +396,6 @@ public void fsck() {
389396
sb.append(blk + "\n");
390397
}
391398
LOG.info("{}", sb.toString());
392-
namenode.getNamesystem().logFsckEvent("/", remoteAddress);
393399
out.flush();
394400
return;
395401
}
@@ -398,7 +404,6 @@ public void fsck() {
398404
+ " from " + remoteAddress + " for path " + path + " at " + new Date();
399405
LOG.info(msg);
400406
out.println(msg);
401-
namenode.getNamesystem().logFsckEvent(path, remoteAddress);
402407

403408
if (snapshottableDirs != null) {
404409
SnapshottableDirectoryStatus[] snapshotDirs =

hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,7 @@ private void setupAuditLogs() throws IOException {
254254
file.delete();
255255
}
256256
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
257+
logger.removeAllAppenders();
257258
logger.setLevel(Level.INFO);
258259
PatternLayout layout = new PatternLayout("%m%n");
259260
RollingFileAppender appender =

0 commit comments

Comments
 (0)