@@ -38,6 +38,9 @@ def setUpClass(cls):
3838 # set a single K8s wrapper for all tests
3939 k8s = cls .k8s = K8s ()
4040
41+ # remove existing local storage class and create hostpath class
42+ k8s .api .storage_v1_api .delete_storage_class ("standard" )
43+
4144 # operator deploys pod service account there on start up
4245 # needed for test_multi_namespace_support()
4346 cls .namespace = "test"
@@ -54,7 +57,8 @@ def setUpClass(cls):
5457 "configmap.yaml" ,
5558 "postgres-operator.yaml" ,
5659 "infrastructure-roles.yaml" ,
57- "infrastructure-roles-new.yaml" ]:
60+ "infrastructure-roles-new.yaml" ,
61+ "e2e-storage-class.yaml" ]:
5862 result = k8s .create_with_kubectl ("manifests/" + filename )
5963 print ("stdout: {}, stderr: {}" .format (result .stdout , result .stderr ))
6064
@@ -600,8 +604,8 @@ def test_infrastructure_roles(self):
600604 get_config_cmd = "wget --quiet -O - localhost:8080/config"
601605 result = k8s .exec_with_kubectl (operator_pod .metadata .name , get_config_cmd )
602606 roles_dict = (json .loads (result .stdout )
603- .get ("controller" , {})
604- .get ("InfrastructureRoles" ))
607+ .get ("controller" , {})
608+ .get ("InfrastructureRoles" ))
605609
606610 self .assertTrue ("robot_zmon_acid_monitoring_new" in roles_dict )
607611 role = roles_dict ["robot_zmon_acid_monitoring_new" ]
@@ -685,12 +689,13 @@ def get_failover_targets(self, master_node, replica_nodes):
685689 If all pods live on the same node, failover will happen to other worker(s)
686690 '''
687691 k8s = self .k8s
692+ k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
688693
689694 failover_targets = [x for x in replica_nodes if x != master_node ]
690695 if len (failover_targets ) == 0 :
691- nodes = k8s .api .core_v1 .list_node ()
696+ nodes = k8s .api .core_v1 .list_node (label_selector = k8s_master_exclusion )
692697 for n in nodes .items :
693- if "node-role.kubernetes.io/master" not in n . metadata . labels and n .metadata .name != master_node :
698+ if n .metadata .name != master_node :
694699 failover_targets .append (n .metadata .name )
695700
696701 return failover_targets
@@ -738,8 +743,7 @@ def assert_distributed_pods(self, master_node, replica_nodes, cluster_label):
738743 }
739744 }
740745 k8s .update_config (patch_enable_antiaffinity )
741- self .assert_failover (
742- master_node , len (replica_nodes ), failover_targets , cluster_label )
746+ self .assert_failover (master_node , len (replica_nodes ), failover_targets , cluster_label )
743747
744748 # now disable pod anti affintiy again which will cause yet another failover
745749 patch_disable_antiaffinity = {
@@ -767,6 +771,7 @@ def __init__(self):
767771 self .batch_v1_beta1 = client .BatchV1beta1Api ()
768772 self .custom_objects_api = client .CustomObjectsApi ()
769773 self .policy_v1_beta1 = client .PolicyV1beta1Api ()
774+ self .storage_v1_api = client .StorageV1Api ()
770775
771776
772777class K8s :
@@ -944,8 +949,8 @@ def create_with_kubectl(self, path):
944949
945950 def exec_with_kubectl (self , pod , cmd ):
946951 return subprocess .run (["./exec.sh" , pod , cmd ],
947- stdout = subprocess .PIPE ,
948- stderr = subprocess .PIPE )
952+ stdout = subprocess .PIPE ,
953+ stderr = subprocess .PIPE )
949954
950955 def get_effective_pod_image (self , pod_name , namespace = 'default' ):
951956 '''
0 commit comments