Skip to content

HADOOP-18631 (ADDENDUM) Use LogCapturer to match audit log pattern and remove hdfs async audit log configs #5451

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Mar 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -733,43 +733,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default";
public static final String DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY = "dfs.namenode.audit.log.token.tracking.id";
public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT = false;
/**
* Deprecated. Use log4j properties instead.
* Set system env variable HDFS_AUDIT_LOGGER, which in tern assigns the value to
* "hdfs.audit.logger" for log4j properties to determine log level and appender.
*/
@Deprecated
public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY = "dfs.namenode.audit.log.async";
@Deprecated
public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT = false;

/**
* Deprecated. Use log4j properties instead.
* Set value to Async appender "blocking" property as part of log4j properties configuration.
* <p>
* For example,
* log4j.appender.ASYNCAPPENDER=org.apache.log4j.AsyncAppender
* log4j.appender.ASYNCAPPENDER.blocking=false
*/
@Deprecated
public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY =
"dfs.namenode.audit.log.async.blocking";
@Deprecated
public static final boolean DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_DEFAULT = true;

/**
* Deprecated. Use log4j properties instead.
* Set value to Async appender "bufferSize" property as part of log4j properties configuration.
* <p>
* For example,
* log4j.appender.ASYNCAPPENDER=org.apache.log4j.AsyncAppender
* log4j.appender.ASYNCAPPENDER.bufferSize=128
*/
@Deprecated
public static final String DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY =
"dfs.namenode.audit.log.async.buffer.size";
@Deprecated
public static final int DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT = 128;
public static final String DFS_NAMENODE_AUDIT_LOG_DEBUG_CMDLIST = "dfs.namenode.audit.log.debug.cmdlist";
public static final String DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_KEY =
"dfs.namenode.metrics.logger.period.seconds";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,6 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT;
Expand Down Expand Up @@ -1069,11 +1067,11 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException {
}
}

@SuppressWarnings("deprecation")
private static void checkForAsyncLogEnabledByOldConfigs(Configuration conf) {
if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) {
LOG.warn("Use log4j properties to enable async log for audit logs. {} is deprecated",
DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY);
// dfs.namenode.audit.log.async is no longer in use. Use log4j properties instead.
if (conf.getBoolean("dfs.namenode.audit.log.async", false)) {
LOG.warn("Use log4j properties to enable async log for audit logs. "
+ "dfs.namenode.audit.log.async is no longer in use.");
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5099,35 +5099,6 @@
</description>
</property>

<property>
<name>dfs.namenode.audit.log.async</name>
<value>false</value>
<description>
If true, enables asynchronous audit log.
</description>
</property>

<property>
<name>dfs.namenode.audit.log.async.blocking</name>
<value>true</value>
<description>
Only used when enables asynchronous audit log. Sets whether audit log async
appender should wait if there is no space available in the event buffer or
immediately return. Default value is true.
</description>
</property>

<property>
<name>dfs.namenode.audit.log.async.buffer.size</name>
<value>128</value>
<description>
Only used when enables asynchronous audit log. Sets the number of audit
logs allowed in the event buffer before the calling thread is blocked
(if dfs.namenode.audit.log.async.blocking is true) or until logs are
summarized and discarded. Default value is 128.
</description>
</property>

<property>
<name>dfs.namenode.audit.log.token.tracking.id</name>
<value>false</value>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,12 +20,7 @@

import static org.junit.Assert.*;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
Expand All @@ -46,12 +41,15 @@
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.log4j.Appender;
import org.apache.log4j.AsyncAppender;
import org.apache.log4j.Logger;

import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
Expand All @@ -66,11 +64,10 @@ public class TestAuditLogs {

private static final org.slf4j.Logger LOG = LoggerFactory.getLogger(TestAuditLogs.class);

private static final File AUDIT_LOG_FILE =
new File(System.getProperty("hadoop.log.dir"), "hdfs-audit.log");

final boolean useAsyncEdits;

private static LogCapturer auditLogCapture;

@Parameters
public static Collection<Object[]> data() {
Collection<Object[]> params = new ArrayList<>();
Expand Down Expand Up @@ -111,9 +108,6 @@ public TestAuditLogs(boolean useAsyncEdits) {

@Before
public void setupCluster() throws Exception {
try (PrintWriter writer = new PrintWriter(AUDIT_LOG_FILE)) {
writer.print("");
}
// must configure prior to instantiating the namesystem because it
// will reconfigure the logger if async is enabled
conf = new HdfsConfiguration();
Expand All @@ -132,21 +126,15 @@ public void setupCluster() throws Exception {
"org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit");
@SuppressWarnings("unchecked")
List<Appender> appenders = Collections.list(logger.getAllAppenders());
assertEquals(1, appenders.size());
assertTrue(appenders.get(0) instanceof AsyncAppender);

fnames = util.getFileNames(fileName);
util.waitReplication(fs, fileName, (short)3);
userGroupInfo = UserGroupInformation.createUserForTesting(username, groups);
LOG.info("Audit log file: {}, exists: {}, length: {}", AUDIT_LOG_FILE, AUDIT_LOG_FILE.exists(),
AUDIT_LOG_FILE.length());
}

@After
public void teardownCluster() throws Exception {
try (PrintWriter writer = new PrintWriter(AUDIT_LOG_FILE)) {
writer.print("");
}
util.cleanup(fs, "/srcdat");
if (fs != null) {
fs.close();
Expand All @@ -158,6 +146,17 @@ public void teardownCluster() throws Exception {
}
}

@BeforeClass
public static void beforeClass() {
auditLogCapture = LogCapturer.captureLogs(FSNamesystem.AUDIT_LOG);
}

@AfterClass
public static void afterClass() {
auditLogCapture.stopCapturing();
}


/** test that allowed operation puts proper entry in audit log */
@Test
public void testAuditAllowed() throws Exception {
Expand Down Expand Up @@ -273,54 +272,47 @@ public void testAuditCharacterEscape() throws Exception {
verifySuccessCommandsAuditLogs(1, "foo", "cmd=create");
}

private void verifySuccessCommandsAuditLogs(int leastExpected, String file, String cmd)
throws IOException {

try (BufferedReader reader = new BufferedReader(new FileReader(AUDIT_LOG_FILE))) {
String line;
int success = 0;
while ((line = reader.readLine()) != null) {
assertNotNull(line);
LOG.info("Line: {}", line);
if (SUCCESS_PATTERN.matcher(line).matches() && line.contains(file) && line.contains(
cmd)) {
assertTrue("Expected audit event not found in audit log",
AUDIT_PATTERN.matcher(line).matches());
LOG.info("Successful verification. Log line: {}", line);
success++;
}
private void verifySuccessCommandsAuditLogs(int leastExpected, String file, String cmd) {
String[] auditLogOutputLines = auditLogCapture.getOutput().split("\\n");
int success = 0;
for (String auditLogLine : auditLogOutputLines) {
if (!auditLogLine.contains("allowed=")) {
continue;
}
if (success < leastExpected) {
throw new AssertionError(
"Least expected: " + leastExpected + ". Actual success: " + success);
String line = "allowed=" + auditLogLine.split("allowed=")[1];
LOG.info("Line: {}", line);
if (SUCCESS_PATTERN.matcher(line).matches() && line.contains(file) && line.contains(cmd)) {
assertTrue("Expected audit event not found in audit log",
AUDIT_PATTERN.matcher(line).matches());
LOG.info("Successful verification. Log line: {}", line);
success++;
}
}
if (success < leastExpected) {
throw new AssertionError(
"Least expected: " + leastExpected + ". Actual success: " + success);
}
}

private void verifyFailedCommandsAuditLogs(int leastExpected, String file, String cmd)
throws IOException {

try (BufferedReader reader = new BufferedReader(new FileReader(AUDIT_LOG_FILE))) {
String line;
int success = 0;
while ((line = reader.readLine()) != null) {
assertNotNull(line);
LOG.info("Line: {}", line);
if (FAILURE_PATTERN.matcher(line).matches() && line.contains(file) && line.contains(
cmd)) {
assertTrue("Expected audit event not found in audit log",
AUDIT_PATTERN.matcher(line).matches());
LOG.info("Failure verification. Log line: {}", line);
success++;
}
private void verifyFailedCommandsAuditLogs(int expected, String file, String cmd) {
String[] auditLogOutputLines = auditLogCapture.getOutput().split("\\n");
int success = 0;
for (String auditLogLine : auditLogOutputLines) {
if (!auditLogLine.contains("allowed=")) {
continue;
}
assertEquals("Expected: " + leastExpected + ". Actual failure: " + success, leastExpected,
success);
if (success < leastExpected) {
throw new AssertionError(
"Least expected: " + leastExpected + ". Actual success: " + success);
String line = "allowed=" + auditLogLine.split("allowed=")[1];
LOG.info("Line: {}", line);
if (FAILURE_PATTERN.matcher(line).matches() && line.contains(file) && line.contains(
cmd)) {
assertTrue("Expected audit event not found in audit log",
AUDIT_PATTERN.matcher(line).matches());
LOG.info("Failure verification. Log line: {}", line);
success++;
}
}
assertEquals("Expected: " + expected + ". Actual failure: " + success, expected,
success);
}

}
Loading