Skip to content

Commit

Permalink
test: add realtime logs to track test execution progress
Browse files Browse the repository at this point in the history
Signed-off-by: Yang Chiu <yang.chiu@suse.com>
  • Loading branch information
yangchiu authored and David Ko committed Sep 22, 2023
1 parent 318f1dd commit 03ab273
Show file tree
Hide file tree
Showing 19 changed files with 156 additions and 187 deletions.
5 changes: 1 addition & 4 deletions e2e/libs/keywords/common_keywords.py
Original file line number Diff line number Diff line change
@@ -1,20 +1,17 @@
from utility.utility import init_k8s_api_client
from node_exec import NodeExec
import logging

class common_keywords:

def __init__(self):
logging.warn("initialize common_keywords class")
pass

def init_k8s_api_client(self):
init_k8s_api_client()

def init_node_exec(self, test_name):
namespace = test_name.lower().replace(' ', '-')[:63]
logging.warn(f"namespace = {namespace}")
NodeExec.get_instance().set_namespace(namespace)

def cleanup_node_exec(self):
logging.info('cleaning up resources')
NodeExec.get_instance().cleanup()
6 changes: 2 additions & 4 deletions e2e/libs/keywords/engine_keywords.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging

from utility.utility import logging
from common_keywords import common_keywords

class engine_keywords:
Expand All @@ -8,8 +7,7 @@ def __init__(self):
self.engine = common_keywords.engine_instance

def get_engine_state(self, volume_name, node_name):
logging.info(
f"getting the volume {volume_name} engine on the node {node_name} state")
logging(f"Getting the volume {volume_name} engine on the node {node_name} state")

resp = self.engine.get_engine(volume_name, node_name)
if resp == "" or resp is None:
Expand Down
2 changes: 0 additions & 2 deletions e2e/libs/keywords/node_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,10 @@
from utility.utility import wait_for_all_instance_manager_running
from robot.libraries.BuiltIn import BuiltIn
from node import Node
import logging

class node_keywords:

def __init__(self):
logging.warn("initialize node_keywords class")
self.node = Node()

def reboot_volume_node(self, volume_name):
Expand Down
6 changes: 4 additions & 2 deletions e2e/libs/keywords/pod_keywords.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import logging
import time

from node import Nodes

retry_count = 200
retry_interval = 5

class pod_keywords:

#TODO
# keywords layer can only call lower implementation layer to complete its work
# and should not have any business logic here

def wait_all_pods_evicted(self, node_index):
node_name = Nodes.get_name_by_index(int(node_index))

Expand Down
7 changes: 3 additions & 4 deletions e2e/libs/keywords/recurring_job_keywords.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
from utility.utility import get_test_case_namespace, generate_volume_name
from utility.utility import get_node, list_nodes
from utility.utility import get_test_pod_running_node, get_test_pod_not_running_node
from utility.utility import logging
from robot.libraries.BuiltIn import BuiltIn
from recurring_job import RecurringJob
import logging

class recurring_job_keywords:

def __init__(self):
logging.warn("initialize recurring_job_keywords class")
self.recurring_job = RecurringJob()


Expand All @@ -17,15 +16,15 @@ def create_snapshot_recurring_job_for_volume(self, volume_name):
self.recurring_job.create(job_name, task="snapshot")
self.recurring_job.add_to_volume(job_name, volume_name)
self.recurring_job.get(job_name)
logging.warn(f'==> create recurring job {job_name} for volume {volume_name}')
logging(f'Created recurring job {job_name} for volume {volume_name}')


def create_backup_recurring_job_for_volume(self, volume_name):
job_name = volume_name + '-bak'
self.recurring_job.create(job_name, task="backup")
self.recurring_job.add_to_volume(job_name, volume_name)
self.recurring_job.get(job_name)
logging.warn(f'==> create recurring job {job_name} for volume {volume_name}')
logging(f'Created recurring job {job_name} for volume {volume_name}')


def check_recurring_jobs_work(self, volume_name):
Expand Down
19 changes: 9 additions & 10 deletions e2e/libs/keywords/replica_keywords.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import logging

from utility.utility import logging
from node import Nodes
from common_keywords import common_keywords

Expand All @@ -11,27 +10,27 @@ def __init__(self):

def delete_replica(self, volume_name, node_index):
node_name = Nodes.get_name_by_index(int(node_index))
logging.info(
f"deleting volume {volume_name}'s replica on the node {node_name}")
logging(f"Deleting volume {volume_name}'s replica on the node {node_name}")
self.replica.delete_replica(volume_name, node_name)

def wait_for_replica_rebuilding_start(self, volume_name, node_index):
node_name = Nodes.get_name_by_index(int(node_index))
logging.info(
f"waiting the {volume_name} replica on node {node_name} rebuilding start")
logging(f"Waiting volume {volume_name}'s replica on node {node_name} rebuilding start")
self.replica.wait_for_replica_rebuilding_start(volume_name, node_name)

def wait_for_replica_rebuilding_complete(self, volume_name, node_index):
node_name = Nodes.get_name_by_index(int(node_index))
logging.info(
f"waiting the {volume_name} replica on node {node_name} rebuilding complete")
logging(f"Waiting volume {volume_name}'s replica on node {node_name} rebuilding complete")
self.replica.wait_for_replica_rebuilding_complete(
volume_name, node_name)

#TODO
# keywords layer can only call lower implementation layer to complete its work
# and should not have any business logic here

def get_replica_state(self, volume_name, node_index):
node_name = Nodes.get_name_by_index(int(node_index))
logging.info(
f"getting the volume {volume_name} replica on the node {node_name} state")
logging(f"Getting volume {volume_name}'s replica on the node {node_name} state")

resp = self.replica.get_replica(volume_name, node_name)
assert resp != "", f"failed to get the volume {volume_name} replicas"
Expand Down
21 changes: 7 additions & 14 deletions e2e/libs/keywords/volume_keywords.py
Original file line number Diff line number Diff line change
@@ -1,33 +1,31 @@
from utility.utility import get_test_case_namespace, generate_volume_name
from utility.utility import get_node, list_nodes
from utility.utility import get_test_pod_running_node, get_test_pod_not_running_node
from utility.utility import logging
from robot.libraries.BuiltIn import BuiltIn
from volume import Volume
import logging

class volume_keywords:

def __init__(self):
logging.warn("initialize volume_keywords class")
self.volume = Volume()


def create_volume(self, size, replica_count):
volume_name = generate_volume_name()
self.volume.create(volume_name, size, replica_count)
logging.info(f'==> create volume {volume_name}')
logging(f'Created volume {volume_name}')
return volume_name


def attach_volume(self, volume_name):
attach_node = get_test_pod_not_running_node()
logging.info(f'==> attach volume {volume_name} to {attach_node}')
logging(f'Attached volume {volume_name} to {attach_node}')
self.volume.attach(volume_name, attach_node)


def get_volume_node(self, volume_name):
volume = self.volume.get(volume_name)
print(volume)
return volume['spec']['nodeID']
# return volume.controllers[0].hostId

Expand All @@ -42,7 +40,6 @@ def get_replica_node(self, volume_name):


def write_volume_random_data(self, volume_name, size_in_mb):
print('write_volume_random_data')
return self.volume.write_random_data(volume_name, size_in_mb)


Expand All @@ -51,23 +48,21 @@ def keep_writing_data(self, volume_name):


def check_data(self, volume_name, checksum):
print(f"check volume {volume_name} data with checksum {checksum}")
logging(f"Checking volume {volume_name} data with checksum {checksum}")
self.volume.check_data(volume_name, checksum)


def delete_replica(self, volume_name, replica_node):
if str(replica_node).isdigit():
replica_node = get_node(replica_node)
logging.info(f"==> delete volume {volume_name}'s replica\
on node {replica_node}")
logging(f"Deleting volume {volume_name}'s replica on node {replica_node}")
self.volume.delete_replica(volume_name, replica_node)


def wait_for_replica_rebuilding_start(self, volume_name, replica_node):
if str(replica_node).isdigit():
replica_node = get_node(replica_node)
logging.info(f"==> wait for volume {volume_name}'s replica\
on node {replica_node} rebuilding started")
logging(f"Waiting for volume {volume_name}'s replica on node {replica_node} rebuilding started")
self.volume.wait_for_replica_rebuilding_start(
volume_name,
replica_node
Expand All @@ -77,8 +72,7 @@ def wait_for_replica_rebuilding_start(self, volume_name, replica_node):
def wait_for_replica_rebuilding_complete(self, volume_name, replica_node):
if str(replica_node).isdigit():
replica_node = get_node(replica_node)
logging.info(f"==> wait for volume {volume_name}'s replica\
on node {replica_node} rebuilding completed")
logging(f"Waiting for volume {volume_name}'s replica on node {replica_node} rebuilding completed")
self.volume.wait_for_replica_rebuilding_complete(
volume_name,
replica_node
Expand All @@ -91,5 +85,4 @@ def wait_for_volume_healthy(self, volume_name):
self.volume.wait_for_volume_healthy(volume_name)

def cleanup_volumes(self, volume_names):
logging.warn(f"cleanup volumes {volume_names}")
self.volume.cleanup(volume_names)
8 changes: 3 additions & 5 deletions e2e/libs/keywords/workload_keywords.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,17 @@
from workload.workload import *
import logging

class workload_keywords:

def __init__(self):
logging.warn("initialize workload_keywords class")
pass

def init_storageclasses(self):
create_storageclass('longhorn-test')
create_storageclass('strict-local')
create_storageclass('longhorn-test-strict-local')

def cleanup_storageclasses(self):
delete_storageclass('longhorn-test')
delete_storageclass('strict-local')
delete_storageclass('longhorn-test-strict-local')

def create_deployment(self, volume_type="rwo", option=""):
pvc_name = create_pvc(volume_type, option)
Expand All @@ -36,7 +35,6 @@ def write_pod_random_data(self, pod, size_in_mb):
return write_pod_random_data(pod, size_in_mb)

def check_pod_data(self, pod_name, checksum):
print(f"check pod {pod_name} data with checksum {checksum}")
check_pod_data(pod_name, checksum)

def cleanup_deployments(self, deployment_names):
Expand Down
30 changes: 13 additions & 17 deletions e2e/libs/node/node.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from kubernetes import client
import yaml
import time
import logging
from utility.utility import logging
from utility.utility import apply_cr_from_yaml, get_cr
from utility.utility import wait_for_cluster_ready
from utility.utility import list_nodes
Expand All @@ -16,59 +16,55 @@ def __init__(self):
with open('/tmp/instance_mapping', 'r') as f:
self.mapping = yaml.safe_load(f)
self.aws_client = boto3.client('ec2')
#logging.warn(f"describe_instances = {self.aws_client.describe_instances()}")

def reboot_all_nodes(self, shut_down_time_in_sec=60):
instance_ids = [value for value in self.mapping.values()]
print(instance_ids)

resp = self.aws_client.stop_instances(InstanceIds=instance_ids)
print(resp)
logging(f"Stopping instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=instance_ids)
print(f"all instances stopped")
logging(f"Stopped instances")

time.sleep(shut_down_time_in_sec)

resp = self.aws_client.start_instances(InstanceIds=instance_ids)
print(resp)
logging(f"Starting instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
wait_for_cluster_ready()
print(f"all instances running")
logging(f"Started instances")

def reboot_node(self, reboot_node_name, shut_down_time_in_sec=60):
instance_ids = [self.mapping[reboot_node_name]]
print(instance_ids)

resp = self.aws_client.stop_instances(InstanceIds=instance_ids)
print(resp)
logging(f"Stopping instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=instance_ids)
print(f"instances {instance_ids} stopped")
logging(f"Stopped instances")

time.sleep(shut_down_time_in_sec)

resp = self.aws_client.start_instances(InstanceIds=instance_ids)
print(resp)
logging(f"Starting instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
print(f"instances {instance_ids} running")
logging(f"Started instances")

def reboot_all_worker_nodes(self, shut_down_time_in_sec=60):
instance_ids = [self.mapping[value] for value in list_nodes()]
print(instance_ids)

resp = self.aws_client.stop_instances(InstanceIds=instance_ids)
print(resp)
logging(f"Stopping instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_stopped')
waiter.wait(InstanceIds=instance_ids)
print(f"instances {instance_ids} stopped")
logging(f"Stopped instances")

time.sleep(shut_down_time_in_sec)

resp = self.aws_client.start_instances(InstanceIds=instance_ids)
print(resp)
logging(f"Starting instances {instance_ids} response: {resp}")
waiter = self.aws_client.get_waiter('instance_running')
waiter.wait(InstanceIds=instance_ids)
print(f"instances {instance_ids} running")
logging(f"Started instances")
5 changes: 3 additions & 2 deletions e2e/libs/node_exec/node_exec.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from kubernetes import client
from kubernetes.stream import stream
import time
import logging
from utility.utility import wait_delete_pod
from utility.utility import wait_delete_ns
from utility.utility import logging

DEFAULT_POD_TIMEOUT = 180
DEFAULT_POD_INTERVAL = 1
Expand Down Expand Up @@ -39,10 +39,11 @@ def set_namespace(self, namespace):
self.core_api.create_namespace(
body=namespace_manifest
)
logging(f"Created namespace {namespace}")

def cleanup(self):
for pod in self.node_exec_pod.values():
logging.warn(f"==> cleanup pod {pod.metadata.name} {pod.metadata.uid}")
logging(f"Cleaning up pod {pod.metadata.name} {pod.metadata.uid}")
res = self.core_api.delete_namespaced_pod(
name=pod.metadata.name,
namespace=self.namespace,
Expand Down
Loading

0 comments on commit 03ab273

Please sign in to comment.