@@ -287,27 +287,8 @@ public void beforeEach() {
287
287
@ DisplayName ("Check the server logs are written on PV, look for string RUNNING in server log" )
288
288
public void testServerLogsAreOnPV () {
289
289
290
- // check server logs are written on PV
291
- String command = "grep RUNNING /shared/logs/" + adminServerName + ".log" ;
292
- logger .info ("Checking server logs are written on PV by running the command {0} on pod {1}, namespace {2}" ,
293
- command , adminServerPodName , domainNamespace );
294
- V1Pod adminPod = assertDoesNotThrow (() ->
295
- Kubernetes .getPod (domainNamespace , null , adminServerPodName ),
296
- "Could not get the admin pod in namespace " + domainNamespace );
297
-
298
- ExecResult result = assertDoesNotThrow (() -> Kubernetes .exec (adminPod , null , true ,
299
- "/bin/sh" , "-c" , command ),
300
- String .format ("Could not execute the command %s in pod %s, namespace %s" ,
301
- command , adminServerPodName , domainNamespace ));
302
- logger .info ("Command {0} returned with exit value {1}, stderr {2}, stdout {3}" ,
303
- command , result .exitValue (), result .stderr (), result .stdout ());
304
-
305
- // checking for exitValue 0 for success fails sometimes as k8s exec api returns non-zero exit value even on success,
306
- // so checking for exitValue non-zero and stderr not empty for failure, otherwise its success
307
- assertFalse (result .exitValue () != 0 && result .stderr () != null && !result .stderr ().isEmpty (),
308
- String .format ("Command %s failed with exit value %s, stderr %s, stdout %s" ,
309
- command , result .exitValue (), result .stderr (), result .stdout ()));
310
-
290
+ // check server logs are written on PV and look for string RUNNING in log
291
+ checkLogsOnPV ("grep RUNNING /shared/logs/" + adminServerName + ".log" , adminServerPodName );
311
292
}
312
293
313
294
/**
@@ -391,10 +372,8 @@ public void testMiiDeleteSystemResources() {
391
372
String newRestartVersion = patchDomainResourceWithNewRestartVersion (domainUid , domainNamespace );
392
373
logger .log (Level .INFO , "New restart version is {0}" , newRestartVersion );
393
374
394
- assertTrue (assertDoesNotThrow (
395
- () -> (verifyRollingRestartOccurred (pods , 1 , domainNamespace )),
396
- "More than one pod was restarted at same time" ),
397
- "Rolling restart failed" );
375
+ assertTrue (verifyRollingRestartOccurred (pods , 1 , domainNamespace ),
376
+ "Rolling restart failed" );
398
377
399
378
// Even if pods are created, need the service to created
400
379
for (int i = 1 ; i <= replicaCount ; i ++) {
@@ -411,12 +390,13 @@ public void testMiiDeleteSystemResources() {
411
390
412
391
/**
413
392
* Start a WebLogic domain using model-in-image.
414
- * Create a configmap with sparse JDBC/JMS model files.
393
+ * Create a configmap with sparse JDBC/JMS model files using LOG_HOME(which is on PV) ENV var for JMS Server log file .
415
394
* Patch the domain resource with the configmap.
416
395
* Update the restart version of the domain resource.
417
396
* Verify rolling restart of the domain by comparing PodCreationTimestamp
418
397
* for all the server pods before and after rolling restart.
419
398
* Verify SystemResource configurations using Rest API call to admin server.
399
+ * Verify JMS Server logs are written on PV.
420
400
*/
421
401
@ Test
422
402
@ Order (4 )
@@ -455,10 +435,8 @@ public void testMiiAddSystemResources() {
455
435
String newRestartVersion = patchDomainResourceWithNewRestartVersion (domainUid , domainNamespace );
456
436
logger .log (Level .INFO , "New restart version is {0}" , newRestartVersion );
457
437
458
- assertTrue (assertDoesNotThrow (
459
- () -> (verifyRollingRestartOccurred (pods , 1 , domainNamespace )),
460
- "More than one pod was restarted at same time" ),
461
- "Rolling restart failed" );
438
+ assertTrue (verifyRollingRestartOccurred (pods , 1 , domainNamespace ),
439
+ "Rolling restart failed" );
462
440
463
441
// Even if pods are created, need the service to created
464
442
for (int i = 1 ; i <= replicaCount ; i ++) {
@@ -474,6 +452,9 @@ public void testMiiAddSystemResources() {
474
452
assertTrue (checkSystemResourceConfiguration ("JMSSystemResources" ,
475
453
"TestClusterJmsModule2" , "200" ), "JMSSystemResources not found" );
476
454
logger .info ("Found the JMSSystemResource configuration" );
455
+
456
+ // check JMS logs are written on PV
457
+ checkLogsOnPV ("ls -ltr /shared/logs/*jms_messages.log" , managedServerPrefix + "1" );
477
458
}
478
459
479
460
/**
@@ -521,9 +502,7 @@ public void testMiiAddDynmicClusteriWithNoReplica() {
521
502
String newRestartVersion = patchDomainResourceWithNewRestartVersion (domainUid , domainNamespace );
522
503
logger .log (Level .INFO , "New restart version : {0}" , newRestartVersion );
523
504
524
- assertTrue (assertDoesNotThrow (
525
- () -> (verifyRollingRestartOccurred (pods , 1 , domainNamespace )),
526
- "More than one pod was restarted at same time" ),
505
+ assertTrue (verifyRollingRestartOccurred (pods , 1 , domainNamespace ),
527
506
"Rolling restart failed" );
528
507
529
508
// The ServerNamePrefix for the new configured cluster is config-server
@@ -599,9 +578,7 @@ public void testMiiAddDynamicCluster() {
599
578
// Check if the admin server pod has been restarted
600
579
// by comparing the PodCreationTime before and after rolling restart
601
580
602
- assertTrue (assertDoesNotThrow (
603
- () -> (verifyRollingRestartOccurred (pods , 1 , domainNamespace )),
604
- "More than one pod was restarted at same time" ),
581
+ assertTrue (verifyRollingRestartOccurred (pods , 1 , domainNamespace ),
605
582
"Rolling restart failed" );
606
583
607
584
// The ServerNamePrefix for the new dynamic cluster is dynamic-server
@@ -676,9 +653,7 @@ public void testMiiAddConfiguredCluster() {
676
653
String newRestartVersion = patchDomainResourceWithNewRestartVersion (domainUid , domainNamespace );
677
654
logger .log (Level .INFO , "New restart version : {0}" , newRestartVersion );
678
655
679
- assertTrue (assertDoesNotThrow (
680
- () -> (verifyRollingRestartOccurred (pods , 1 , domainNamespace )),
681
- "More than one pod was restarted at same time" ),
656
+ assertTrue (verifyRollingRestartOccurred (pods , 1 , domainNamespace ),
682
657
"Rolling restart failed" );
683
658
684
659
// The ServerNamePrefix for the new configured cluster is config-server
@@ -744,9 +719,7 @@ public void testMiiUpdateWebLogicCredential() {
744
719
logger .info ("Wait for domain {0} admin server pod {1} in namespace {2} to be restarted" ,
745
720
domainUid , adminServerPodName , domainNamespace );
746
721
747
- assertTrue (assertDoesNotThrow (
748
- () -> (verifyRollingRestartOccurred (pods , 1 , domainNamespace )),
749
- "More than one pod was restarted at same time" ),
722
+ assertTrue (verifyRollingRestartOccurred (pods , 1 , domainNamespace ),
750
723
"Rolling restart failed" );
751
724
752
725
// check if the new credentials are valid and the old credentials are not valid any more
@@ -1030,4 +1003,27 @@ private static void createJobToChangePermissionsOnPvHostPath(String pvName, Stri
1030
1003
}
1031
1004
}
1032
1005
1006
+ private void checkLogsOnPV (String commandToExecuteInsidePod , String podName ) {
1007
+ logger .info ("Checking logs are written on PV by running the command {0} on pod {1}, namespace {2}" ,
1008
+ commandToExecuteInsidePod , podName , domainNamespace );
1009
+ V1Pod serverPod = assertDoesNotThrow (() ->
1010
+ Kubernetes .getPod (domainNamespace , null , podName ),
1011
+ String .format ("Could not get the server Pod {0} in namespace {1}" ,
1012
+ podName , domainNamespace ));
1013
+
1014
+ ExecResult result = assertDoesNotThrow (() -> Kubernetes .exec (serverPod , null , true ,
1015
+ "/bin/sh" , "-c" , commandToExecuteInsidePod ),
1016
+ String .format ("Could not execute the command %s in pod %s, namespace %s" ,
1017
+ commandToExecuteInsidePod , podName , domainNamespace ));
1018
+ logger .info ("Command {0} returned with exit value {1}, stderr {2}, stdout {3}" ,
1019
+ commandToExecuteInsidePod , result .exitValue (), result .stderr (), result .stdout ());
1020
+
1021
+ // checking for exitValue 0 for success fails sometimes as k8s exec api returns non-zero exit value even on success,
1022
+ // so checking for exitValue non-zero and stderr not empty for failure, otherwise its success
1023
+ assertFalse (result .exitValue () != 0 && result .stderr () != null && !result .stderr ().isEmpty (),
1024
+ String .format ("Command %s failed with exit value %s, stderr %s, stdout %s" ,
1025
+ commandToExecuteInsidePod , result .exitValue (), result .stderr (), result .stdout ()));
1026
+
1027
+ }
1028
+
1033
1029
}
0 commit comments