Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[proxmox_vm_info] Return empty list when requested VM doesn't exist #7049

Merged
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Address review comments
  • Loading branch information
UnderGreen committed Aug 2, 2023
commit 20891d97924dce5893b8e2cd1e080194d00ea02f
34 changes: 16 additions & 18 deletions plugins/modules/proxmox_vm_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,12 @@
vmid:
description:
- Restrict results to a specific virtual machine by using its ID.
- If VM with vmid doesn't exist in a cluster then resulted list will be empty.
- If VM with the specified vmid does not exist in a cluster then resulting list will be empty.
type: int
name:
description:
- Restrict results to a specific virtual machine by using its name.
- If VM with name doesn't exist in a cluster then resulted list will be empty.
- If multiple virtual machines have the same name then vmid must be used instead.
- Restrict results to a specific virtual machine(s) by using their name.
- If VM(s) with the specified name does not exist in a cluster then resulting list will be empty.
type: str
extends_documentation_fragment:
- community.general.proxmox.documentation
Expand Down Expand Up @@ -155,13 +154,14 @@ def get_vms_from_cluster_resources(self):
msg="Failed to retrieve VMs information from cluster resources: %s" % e
)

def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, node=None):
def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, name=None, node=None):
vms = []
for vm in vms_unfiltered:
if (
type != vm["type"]
or (node and vm["node"] != node)
or (vmid and int(vm["vmid"]) != vmid)
or (name and vm["name"] != name)
):
continue
vms.append(vm)
Expand All @@ -181,15 +181,15 @@ def get_vms_from_nodes(self, vms_unfiltered, type, vmid=None, node=None):

return vms

def get_qemu_vms(self, vms_unfiltered, vmid=None, node=None):
def get_qemu_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
try:
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, node)
return self.get_vms_from_nodes(vms_unfiltered, "qemu", vmid, name, node)
except Exception as e:
self.module.fail_json(msg="Failed to retrieve QEMU VMs information: %s" % e)

def get_lxc_vms(self, vms_unfiltered, vmid=None, node=None):
def get_lxc_vms(self, vms_unfiltered, vmid=None, name=None, node=None):
try:
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, node)
return self.get_vms_from_nodes(vms_unfiltered, "lxc", vmid, name, node)
except Exception as e:
self.module.fail_json(msg="Failed to retrieve LXC VMs information: %s" % e)

Expand Down Expand Up @@ -224,22 +224,20 @@ def main():
if node and proxmox.get_node(node) is None:
module.fail_json(msg="Node %s doesn't exist in PVE cluster" % node)

if not vmid and name:
vmid = proxmox.get_vmid(name, ignore_missing=True)
if vmid is not None:
vmid = int(vmid)

vms_cluster_resources = proxmox.get_vms_from_cluster_resources()
vms = []

if type == "lxc":
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
vms = proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)
elif type == "qemu":
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, node)
vms = proxmox.get_qemu_vms(vms_cluster_resources, vmid, name, node)
else:
vms = proxmox.get_qemu_vms(
vms_cluster_resources, vmid, node
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, node)
vms_cluster_resources,
vmid,
name,
node,
) + proxmox.get_lxc_vms(vms_cluster_resources, vmid, name, node)

result["proxmox_vms"] = vms
module.exit_json(**result)
Expand Down
90 changes: 83 additions & 7 deletions tests/unit/plugins/modules/test_proxmox_vm_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,27 @@
"uptime": 0,
"vmid": 103,
},
{
"cpu": 0,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/104",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"mem": 0,
"name": "test-lxc.home.arpa",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"template": 0,
"type": "lxc",
"uptime": 0,
"vmid": 104,
},
]
RAW_LXC_OUTPUT = [
{
Expand Down Expand Up @@ -154,6 +175,25 @@
"uptime": 161,
"vmid": "102",
},
{
"cpu": 0,
"cpus": 2,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 0,
"name": "test-lxc.home.arpa",
"netin": 0,
"netout": 0,
"status": "stopped",
"swap": 0,
"type": "lxc",
"uptime": 0,
"vmid": "104",
},
]
RAW_QEMU_OUTPUT = [
{
Expand Down Expand Up @@ -283,6 +323,30 @@
"uptime": 0,
"vmid": 103,
},
{
"cpu": 0,
"cpus": 2,
"disk": 0,
"diskread": 0,
"diskwrite": 0,
"id": "lxc/104",
"maxcpu": 2,
"maxdisk": 10737418240,
"maxmem": 536870912,
"maxswap": 536870912,
"mem": 0,
"name": "test-lxc.home.arpa",
"netin": 0,
"netout": 0,
"node": NODE2,
"pool": "pool1",
"status": "stopped",
"swap": 0,
"template": False,
"type": "lxc",
"uptime": 0,
"vmid": 104,
},
]


Expand Down Expand Up @@ -408,9 +472,9 @@ def test_get_specific_vm_information(self):
assert len(result["proxmox_vms"]) == 1

def test_get_specific_vm_information_by_using_name(self):
name = "test-lxc.home.arpa"
name = "test1-lxc.home.arpa"
self.connect_mock.return_value.cluster.resources.get.return_value = [
{"name": name, "vmid": "102"}
{"name": name, "vmid": "103"}
]

with pytest.raises(AnsibleExitJson) as exc_info:
Expand All @@ -422,6 +486,22 @@ def test_get_specific_vm_information_by_using_name(self):
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 1

def test_get_multiple_vms_with_the_same_name(self):
name = "test-lxc.home.arpa"
self.connect_mock.return_value.cluster.resources.get.return_value = [
{"name": name, "vmid": "102"},
{"name": name, "vmid": "104"},
]

with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["name"] == name]
set_module_args(get_module_args(type="all", name=name))
self.module.main()

result = exc_info.value.args[0]
assert result["proxmox_vms"] == expected_output
assert len(result["proxmox_vms"]) == 2

def test_get_all_lxc_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
Expand Down Expand Up @@ -452,11 +532,7 @@ def test_get_all_qemu_vms_from_specific_node(self):

def test_get_all_vms_from_specific_node(self):
with pytest.raises(AnsibleExitJson) as exc_info:
expected_output = [
vm
for vm in EXPECTED_VMS_OUTPUT
if vm["node"] == NODE1
]
expected_output = [vm for vm in EXPECTED_VMS_OUTPUT if vm["node"] == NODE1]
set_module_args(get_module_args(node=NODE1))
self.module.main()

Expand Down