Skip to content

Commit

Permalink
Re-enable upgrade testing (whamcloud#873)
Browse files Browse the repository at this point in the history
Fixes #717.
Fixes #803.
Fixes #882.
Fixes #883.

Add upgrade testing back to jenkins and ensure it works from 4.x -> 5.0

Signed-off-by: Joe Grund <jgrund@whamcloud.io>
  • Loading branch information
jgrund authored Apr 24, 2019
1 parent 627779f commit adcaed2
Show file tree
Hide file tree
Showing 18 changed files with 222 additions and 452 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,7 @@ tests/framework/utils/defaults.sh chroma-bundles/chroma_support.repo.in: substs
ssi_tests: tests/framework/utils/defaults.sh chroma-bundles/chroma_support.repo.in
CHROMA_DIR=$$PWD tests/framework/integration/shared_storage_configuration/full_cluster/jenkins_steps/main $@

upgrade_tests:
upgrade_tests: tests/framework/utils/defaults.sh chroma-bundles/chroma_support.repo.in
tests/framework/integration/installation_and_upgrade/jenkins_steps/main $@

efs_tests: tests/framework/utils/defaults.sh chroma-bundles/chroma_support.repo.in
Expand Down
8 changes: 7 additions & 1 deletion chroma_core/models/host.py
Original file line number Diff line number Diff line change
Expand Up @@ -1491,6 +1491,11 @@ def run(self, kwargs):
)


class RemovePackagesStep(Step):
def run(self, kwargs):
self.invoke_agent_expect_result(kwargs["host"], "remove_packages", {"packages": kwargs["packages"]})


class UpdateJob(Job):
host = models.ForeignKey(ManagedHost)

Expand All @@ -1512,8 +1517,9 @@ def get_steps(self):
base_repo_url = os.path.join(str(settings.SERVER_HTTP_URL), "repo")

return [
(UpdatePackagesStep, {"host": self.host, "enablerepos": [], "packages": ["python2-iml-agent"]}),
(UpdateYumFileStep, {"host": self.host, "filename": REPO_FILENAME, "file_contents": repo_file_contents}),
(UpdatePackagesStep, {"host": self.host, "enablerepos": [], "packages": ["python2-iml-agent"]}),
(RemovePackagesStep, {"host": self.host, "packages": ["lustre-all-dkms"]}),
(
UpdatePackagesStep,
{"host": self.host, "enablerepos": [], "packages": list(self.host.server_profile.packages)},
Expand Down
9 changes: 6 additions & 3 deletions chroma_core/models/target.py
Original file line number Diff line number Diff line change
Expand Up @@ -972,10 +972,13 @@ def run(self, kwargs):

@classmethod
def describe(cls, kwargs):
if kwargs["start_target"] is True:
return help_text["mounting_target_on_node"] % (kwargs["target"], kwargs["active_volume_node"].host)
if kwargs["active_volume_node"] is None:
return help_text["export_target_from_nodes"] % kwargs["target"]
else:
return help_text["moving_target_to_node"] % (kwargs["target"], kwargs["active_volume_node"].host)
if kwargs["start_target"] is True:
return help_text["mounting_target_on_node"] % (kwargs["target"], kwargs["active_volume_node"].host)
else:
return help_text["moving_target_to_node"] % (kwargs["target"], kwargs["active_volume_node"].host)

@classmethod
def create_parameters(cls, target, host, start_target):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"%(agent)s %(options)s -a %(address)s -u %(port)s -l %(username)s -k %(home)s/.ssh/id_rsa -o monitor",
"outlet_query_template":
"%(agent)s %(options)s -a %(address)s -u %(port)s -l %(username)s -k %(home)s/.ssh/id_rsa -o status -n %(identifier)s",
"default_username": "brian",
"default_username": "@VMHOST_ACCOUNT@",
"default_password": "",
"model": "Virtual PDU",
"powercycle_template":
Expand Down Expand Up @@ -43,6 +43,10 @@
{
"backend_filesystem": "zfs",
"path_index": 4
},
{
"backend_filesystem": "ldiskfs",
"path_index": 5
}
],
"power_distribution_units": [
Expand All @@ -54,16 +58,17 @@
],
"lustre_clients": [
{
"nodename": "vm2",
"nodename": "@HOSTNAME@vm@CLUSTER@2",
"device_paths": [],
"destroy_command": "virsh destroy vm2",
"fqdn": "vm2",
"start_command": "virsh start vm2",
"status_command": "virsh domstate vm2",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@2",
"fqdn": "@HOSTNAME@vm@CLUSTER@2@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@2",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@2",
"host": "@VMHOST@",
"lustre_client_version": "",
"address": "vm2",
"ip_address": "10.73.10.32",
"address": "@HOSTNAME@vm@CLUSTER@2@DOMAINNAME@",
"ip_address": "@VM2_IPADDRESS@",
"lnet_address": "@VM2_LNETADDRESS@",
"distro": "el7.4"
}
],
Expand All @@ -76,144 +81,152 @@
},
"provision": true,
"hosts": {
"host": {
"@VMHOST@": {
"virsh_as_root": false,
"cluster_num": 0,
"nodename": "host",
"ip_address": "192.168.121.1",
"fqdn": "host",
"address": "host"
"cluster_num": @CLUSTER_NUM@,
"nodename": "@VMHOST@@DOMAINNAME@",
"ip_address": "@HOST_IP_ADDRESS@",
"fqdn": "@VMHOST@@DOMAINNAME@",
"address": "@VMHOST@@DOMAINNAME@"
}
},
"failover_is_configured": true,
"test_runners": [
{
"nodename": "vm4",
"nodename": "@HOSTNAME@vm@CLUSTER@4",
"device_paths": [],
"repos": ["chroma"],
"destroy_command": "virsh destroy vm4",
"fqdn": "vm4",
"start_command": "virsh start vm4",
"status_command": "virsh domstate vm4",
"host": "host",
"address": "vm4",
"ip_address": "10.73.10.8",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@4",
"fqdn": "@HOSTNAME@vm@CLUSTER@4@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@4",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@4",
"host": "@VMHOST@",
"address": "@HOSTNAME@vm@CLUSTER@4@DOMAINNAME@",
"ip_address": "@VM4_IPADDRESS@",
"distro": "el7.4"
}
],
"lustre_servers": [
{
"bridges": ["a"],
"firewall_enabled": true,
"nodename": "vm5",
"nodename": "@HOSTNAME@vm@CLUSTER@5",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target6"
],
"corosync_config": {
"mcast_port": "4242",
"ring1_iface": "eth3"
},
"repos": ["chroma"],
"destroy_command": "virsh destroy vm5",
"fqdn": "vm5",
"start_command": "virsh start vm5",
"status_command": "virsh domstate vm5",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@5",
"fqdn": "@HOSTNAME@vm@CLUSTER@5@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@5",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@5",
"host": "@VMHOST@",
"selinux_enabled": true,
"root_password": "vagrant",
"address": "vm5",
"ip_address": "10.73.10.21",
"address": "@HOSTNAME@vm@CLUSTER@5@DOMAINNAME@",
"ip_address": "@VM5_IPADDRESS@",
"lnet_address": "@VM5_LNETADDRESS@",
"distro": "el7.4"
},
{
"bridges": ["a"],
"firewall_enabled": true,
"nodename": "vm6",
"nodename": "@HOSTNAME@vm@CLUSTER@6",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target6"
],
"corosync_config": {
"mcast_port": "4242",
"ring1_iface": "eth3"
},
"repos": ["chroma"],
"destroy_command": "virsh destroy vm6",
"fqdn": "vm6",
"start_command": "virsh start vm6",
"status_command": "virsh domstate vm6",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@6",
"fqdn": "@HOSTNAME@vm@CLUSTER@6@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@6",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@6",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"address": "vm6",
"ip_address": "10.73.10.22",
"address": "@HOSTNAME@vm@CLUSTER@6@DOMAINNAME@",
"ip_address": "@VM6_IPADDRESS@",
"lnet_address": "@VM6_LNETADDRESS@",
"distro": "el7.4"
},
{
"bridges": ["b"],
"firewall_enabled": true,
"nodename": "vm7",
"nodename": "@HOSTNAME@vm@CLUSTER@7",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target6"
],
"corosync_config": {
"mcast_port": "4244",
"ring1_iface": "eth3"
},
"repos": ["chroma"],
"destroy_command": "virsh destroy vm7",
"fqdn": "vm7",
"start_command": "virsh start vm7",
"status_command": "virsh domstate vm7",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@7",
"fqdn": "@HOSTNAME@vm@CLUSTER@7@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@7",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@7",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"address": "vm7",
"ip_address": "10.73.10.23",
"address": "@HOSTNAME@vm@CLUSTER@7@DOMAINNAME@",
"ip_address": "@VM7_IPADDRESS@",
"lnet_address": "@VM7_LNETADDRESS@",
"distro": "el7.4"
},
{
"bridges": ["b"],
"firewall_enabled": true,
"nodename": "vm8",
"nodename": "@HOSTNAME@vm@CLUSTER@8",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target2",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target3",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target4",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5"
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target5",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_target6"
],
"corosync_config": {
"mcast_port": "4244",
"ring1_iface": "eth3"
},
"repos": ["chroma"],
"destroy_command": "virsh destroy vm8",
"fqdn": "vm8",
"start_command": "virsh start vm8",
"status_command": "virsh domstate vm8",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@8",
"fqdn": "@HOSTNAME@vm@CLUSTER@8@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@8",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@8",
"host": "@VMHOST@",
"selinux_enabled": false,
"root_password": "vagrant",
"address": "vm8",
"ip_address": "10.73.10.24",
"address": "@HOSTNAME@vm@CLUSTER@8@DOMAINNAME@",
"ip_address": "@VM8_IPADDRESS@",
"lnet_address": "@VM8_LNETADDRESS@",
"distro": "el7.4"
},
{
"profile": "posix_copytool_worker",
"firewall_enabled": true,
"nodename": "vm9",
"nodename": "@HOSTNAME@vm@CLUSTER@9",
"device_paths": [
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_disk1",
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_disk2",
Expand All @@ -222,22 +235,23 @@
"/dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_disk5"
],
"repos": ["chroma"],
"destroy_command": "virsh destroy vm8",
"fqdn": "vm9",
"start_command": "virsh start vm9",
"status_command": "virsh domstate vm9",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@8",
"fqdn": "@HOSTNAME@vm@CLUSTER@9@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@9",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@9",
"host": "@VMHOST@",
"selinux_enabled": true,
"root_password": "vagrant",
"address": "vm9",
"ip_address": "10.73.10.231",
"address": "@HOSTNAME@vm@CLUSTER@9@DOMAINNAME@",
"ip_address": "@VM9_IPADDRESS@",
"lnet_address": "@VM9_LNETADDRESS@",
"distro": "el7.4"
}
],
"pdu_outlets": [],
"chroma_managers": [
{
"server_http_url": "https://vm3/",
"server_http_url": "https://@HOSTNAME@vm@CLUSTER@3@DOMAINNAME@/",
"firewall_enabled": true,
"users": [
{
Expand All @@ -247,17 +261,17 @@
"email": "nobody@example.com"
}
],
"nodename": "vm3",
"nodename": "@HOSTNAME@vm@CLUSTER@3",
"device_paths": [],
"repos": ["chroma"],
"destroy_command": "virsh destroy vm3",
"fqdn": "vm3",
"start_command": "virsh start vm3",
"status_command": "virsh domstate vm3",
"host": "host",
"destroy_command": "virsh destroy @HOSTNAME@vm@CLUSTER@3",
"fqdn": "@HOSTNAME@vm@CLUSTER@3@DOMAINNAME@",
"start_command": "virsh start @HOSTNAME@vm@CLUSTER@3",
"status_command": "virsh domstate @HOSTNAME@vm@CLUSTER@3",
"host": "@VMHOST@",
"selinux_enabled": true,
"address": "vm3",
"ip_address": "10.73.10.10",
"address": "@HOSTNAME@vm@CLUSTER@3@DOMAINNAME@",
"ip_address": "@VM3_IPADDRESS@",
"distro": "el7.4"
}
]
Expand Down
Loading

0 comments on commit adcaed2

Please sign in to comment.