Skip to content

Configure JMS Server logs on PV and verify #1924

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
Sep 17, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -384,8 +384,6 @@ private void rollingRestartDomainAndVerify() {
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
logger.info("New restart version : {0}", newRestartVersion);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"), "Rolling restart failed");
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace), "Rolling restart failed");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -287,27 +287,8 @@ public void beforeEach() {
@DisplayName("Check the server logs are written on PV, look for string RUNNING in server log")
public void testServerLogsAreOnPV() {

// check server logs are written on PV
String command = "grep RUNNING /shared/logs/" + adminServerName + ".log";
logger.info("Checking server logs are written on PV by running the command {0} on pod {1}, namespace {2}",
command, adminServerPodName, domainNamespace);
V1Pod adminPod = assertDoesNotThrow(() ->
Kubernetes.getPod(domainNamespace, null, adminServerPodName),
"Could not get the admin pod in namespace " + domainNamespace);

ExecResult result = assertDoesNotThrow(() -> Kubernetes.exec(adminPod, null, true,
"/bin/sh", "-c", command),
String.format("Could not execute the command %s in pod %s, namespace %s",
command, adminServerPodName, domainNamespace));
logger.info("Command {0} returned with exit value {1}, stderr {2}, stdout {3}",
command, result.exitValue(), result.stderr(), result.stdout());

// checking for exitValue 0 for success fails sometimes as k8s exec api returns non-zero exit value even on success,
// so checking for exitValue non-zero and stderr not empty for failure, otherwise its success
assertFalse(result.exitValue() != 0 && result.stderr() != null && !result.stderr().isEmpty(),
String.format("Command %s failed with exit value %s, stderr %s, stdout %s",
command, result.exitValue(), result.stderr(), result.stdout()));

// check server logs are written on PV and look for string RUNNING in log
checkLogsOnPV("grep RUNNING /shared/logs/" + adminServerName + ".log", adminServerPodName);
}

/**
Expand Down Expand Up @@ -391,10 +372,8 @@ public void testMiiDeleteSystemResources() {
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
logger.log(Level.INFO, "New restart version is {0}", newRestartVersion);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
"Rolling restart failed");
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

// Even if pods are created, need the service to created
for (int i = 1; i <= replicaCount; i++) {
Expand All @@ -411,12 +390,13 @@ public void testMiiDeleteSystemResources() {

/**
* Start a WebLogic domain using model-in-image.
* Create a configmap with sparse JDBC/JMS model files.
* Create a configmap with sparse JDBC/JMS model files using LOG_HOME(which is on PV) ENV var for JMS Server log file.
* Patch the domain resource with the configmap.
* Update the restart version of the domain resource.
* Verify rolling restart of the domain by comparing PodCreationTimestamp
* for all the server pods before and after rolling restart.
* Verify SystemResource configurations using Rest API call to admin server.
* Verify JMS Server logs are written on PV.
*/
@Test
@Order(4)
Expand Down Expand Up @@ -455,10 +435,8 @@ public void testMiiAddSystemResources() {
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
logger.log(Level.INFO, "New restart version is {0}", newRestartVersion);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
"Rolling restart failed");
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

// Even if pods are created, need the service to created
for (int i = 1; i <= replicaCount; i++) {
Expand All @@ -474,6 +452,9 @@ public void testMiiAddSystemResources() {
assertTrue(checkSystemResourceConfiguration("JMSSystemResources",
"TestClusterJmsModule2", "200"), "JMSSystemResources not found");
logger.info("Found the JMSSystemResource configuration");

// check JMS logs are written on PV
checkLogsOnPV("ls -ltr /shared/logs/*jms_messages.log", managedServerPrefix + "1");
}

/**
Expand Down Expand Up @@ -521,9 +502,7 @@ public void testMiiAddDynmicClusteriWithNoReplica() {
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
logger.log(Level.INFO, "New restart version : {0}", newRestartVersion);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

// The ServerNamePrefix for the new configured cluster is config-server
Expand Down Expand Up @@ -599,9 +578,7 @@ public void testMiiAddDynamicCluster() {
// Check if the admin server pod has been restarted
// by comparing the PodCreationTime before and after rolling restart

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

// The ServerNamePrefix for the new dynamic cluster is dynamic-server
Expand Down Expand Up @@ -676,9 +653,7 @@ public void testMiiAddConfiguredCluster() {
String newRestartVersion = patchDomainResourceWithNewRestartVersion(domainUid, domainNamespace);
logger.log(Level.INFO, "New restart version : {0}", newRestartVersion);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

// The ServerNamePrefix for the new configured cluster is config-server
Expand Down Expand Up @@ -744,9 +719,7 @@ public void testMiiUpdateWebLogicCredential() {
logger.info("Wait for domain {0} admin server pod {1} in namespace {2} to be restarted",
domainUid, adminServerPodName, domainNamespace);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

// check if the new credentials are valid and the old credentials are not valid any more
Expand Down Expand Up @@ -1030,4 +1003,27 @@ private static void createJobToChangePermissionsOnPvHostPath(String pvName, Stri
}
}

private void checkLogsOnPV(String commandToExecuteInsidePod, String podName) {
logger.info("Checking logs are written on PV by running the command {0} on pod {1}, namespace {2}",
commandToExecuteInsidePod, podName, domainNamespace);
V1Pod serverPod = assertDoesNotThrow(() ->
Kubernetes.getPod(domainNamespace, null, podName),
String.format("Could not get the server Pod {0} in namespace {1}",
podName, domainNamespace));

ExecResult result = assertDoesNotThrow(() -> Kubernetes.exec(serverPod, null, true,
"/bin/sh", "-c", commandToExecuteInsidePod),
String.format("Could not execute the command %s in pod %s, namespace %s",
commandToExecuteInsidePod, podName, domainNamespace));
logger.info("Command {0} returned with exit value {1}, stderr {2}, stdout {3}",
commandToExecuteInsidePod, result.exitValue(), result.stderr(), result.stdout());

// checking for exitValue 0 for success fails sometimes as k8s exec api returns non-zero exit value even on success,
// so checking for exitValue non-zero and stderr not empty for failure, otherwise its success
assertFalse(result.exitValue() != 0 && result.stderr() != null && !result.stderr().isEmpty(),
String.format("Command %s failed with exit value %s, stderr %s, stdout %s",
commandToExecuteInsidePod, result.exitValue(), result.stderr(), result.stdout()));

}

}
Original file line number Diff line number Diff line change
Expand Up @@ -300,9 +300,7 @@ public void testOperatorRestartWhenPodRoll() {
logger.info("Wait for domain {0} server pods in namespace {1} to be restarted",
domainUid, domainNamespace);

assertTrue(assertDoesNotThrow(
() -> (verifyRollingRestartOccurred(pods, 1, domainNamespace)),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(pods, 1, domainNamespace),
"Rolling restart failed");

for (int i = 1; i <= replicaCount; i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ public static void initAll(@Namespaces(2) List<String> namespaces) {
// get a unique operator namespace
logger.info("Getting a unique namespace for operator");
assertNotNull(namespaces.get(0), "Namespace list is null");
String opNamespace = namespaces.get(0);
final String opNamespace = namespaces.get(0);

// get a unique domain namespace
logger.info("Getting a unique namespace for WebLogic domain");
Expand Down Expand Up @@ -215,9 +215,7 @@ public void testServerPodsRestartByChangingResource() {
// verify the server pods are rolling restarted and back to ready state
logger.info("Verifying rolling restart occurred for domain {0} in namespace {1}",
domainUid, domainNamespace);
assertTrue(assertDoesNotThrow(
() -> verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
String.format("Rolling restart failed for domain %s in namespace %s", domainUid, domainNamespace));

}
Expand Down Expand Up @@ -272,9 +270,7 @@ public void testServerPodsRestartByChangingIncludeServerOutInPodLog() {
// verify the server pods are rolling restarted and back to ready state
logger.info("Verifying rolling restart occurred for domain {0} in namespace {1}",
domainUid, domainNamespace);
assertTrue(assertDoesNotThrow(
() -> verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
String.format("Rolling restart failed for domain %s in namespace %s", domainUid, domainNamespace));

}
Expand Down Expand Up @@ -340,9 +336,7 @@ public void testServerPodsRestartByChangingEnvProperty() {
// verify the server pods are rolling restarted and back to ready state
logger.info("Verifying rolling restart occurred for domain {0} in namespace {1}",
domainUid, domainNamespace);
assertTrue(assertDoesNotThrow(
() -> verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
String.format("Rolling restart failed for domain %s in namespace %s", domainUid, domainNamespace));

}
Expand Down Expand Up @@ -407,9 +401,7 @@ public void testServerPodsRestartByChaningPodSecurityContext() {
// verify the server pods are rolling restarted and back to ready state
logger.info("Verifying rolling restart occurred for domain {0} in namespace {1}",
domainUid, domainNamespace);
assertTrue(assertDoesNotThrow(
() -> verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
String.format("Rolling restart failed for domain %s in namespace %s", domainUid, domainNamespace));

}
Expand Down Expand Up @@ -465,9 +457,7 @@ public void testServerPodsRestartByChangingImagePullPolicy() {
// verify the server pods are rolling restarted and back to ready state
logger.info("Verifying rolling restart occurred for domain {0} in namespace {1}",
domainUid, domainNamespace);
assertTrue(assertDoesNotThrow(
() -> verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
"More than one pod was restarted at same time"),
assertTrue(verifyRollingRestartOccurred(podsWithTimeStamps, 1, domainNamespace),
String.format("Rolling restart failed for domain %s in namespace %s", domainUid, domainNamespace));

}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
import static java.util.concurrent.TimeUnit.SECONDS;
import static oracle.weblogic.kubernetes.utils.ThreadSafeLogger.getLogger;
import static org.awaitility.Awaitility.with;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;

public class Pod {

Expand Down Expand Up @@ -48,8 +47,7 @@ public static boolean verifyRollingRestartOccurred(Map<String, DateTime> pods, i
namespace,
condition.getElapsedTimeInMS(),
condition.getRemainingTimeInMS()))
.until(assertDoesNotThrow(() -> podRestarted(entry.getKey(), pods, maxUnavailable, namespace),
String.format("pod %s didn't restart in namespace %s", entry.getKey(), namespace)));
.until(podRestarted(entry.getKey(), pods, maxUnavailable, namespace));

// check pods are in ready status
retry
Expand All @@ -60,8 +58,7 @@ public static boolean verifyRollingRestartOccurred(Map<String, DateTime> pods, i
namespace,
condition.getElapsedTimeInMS(),
condition.getRemainingTimeInMS()))
.until(assertDoesNotThrow(() -> podReady(namespace, null, entry.getKey()),
String.format("pod %s didn't become ready in namespace %s", entry.getKey(), namespace)));
.until(podReady(namespace, null, entry.getKey()));
}

return true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,14 @@ resources:
Target: 'cluster-1'
JMSServer:
TestClusterJmsServer2:
ProductionPausedAtStartup: false
ConsumptionPausedAtStartup: false
Target: 'cluster-1'
PersistentStore: 'TestClusterFileStore2'
InsertionPausedAtStartup: false
MessageCompressionOptions: GZIP_DEFAULT_COMPRESSION
JmsMessageLogFile:
FileName: "@@ENV:LOG_HOME@@/jms_messages.log"
ProductionPausedAtStartup: false
ConsumptionPausedAtStartup: false
Target: 'cluster-1'
PersistentStore: 'TestClusterFileStore2'
InsertionPausedAtStartup: false
MessageCompressionOptions: GZIP_DEFAULT_COMPRESSION

JMSSystemResource:
TestClusterJmsModule2:
Expand Down