Skip to content

Commit

Permalink
Merge branch 'trunk' into trunk
Browse files Browse the repository at this point in the history
  • Loading branch information
Kami authored Oct 28, 2023
2 parents aeea7c8 + a9f0dcf commit 5530f54
Show file tree
Hide file tree
Showing 5 changed files with 138 additions and 2 deletions.
8 changes: 8 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,14 @@ Compute
(#1946)
[Michael Galaxy - @mraygalaxy2]

- [ECS] Add VPC IP and Elastic IP to ECS node as private and public IP.
(#1954)
[Victor Herrero Otal - @vicwicker]

- [Azure ARM] When deleting a node, delete VM OS disk if it's a managed disk.
(#1957)
[Victor Herrero Otal - @vicwicker]

Storage
~~~~~~~

Expand Down
11 changes: 11 additions & 0 deletions docs/upgrade_notes.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,17 @@ This page describes how to upgrade from a previous version to a new version
which contains backward incompatible or semi-incompatible changes and how to
preserve the old behavior when this is possible.

Libcloud 3.9.0
--------------

* [AZURE ARM] Added a new argument to destroy_node() to also delete node's managed
OS disk as part of the node's deletion. Defaults to true. This can be reverted by
setting the argument to false in the call:

.. sourcecode:: python

destroy_node(..., ex_destroy_os_disk=False)

Libcloud 3.8.0
--------------

Expand Down
27 changes: 25 additions & 2 deletions libcloud/compute/drivers/azure_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -808,6 +808,7 @@ def destroy_node(
ex_destroy_vhd=True,
ex_poll_qty=10,
ex_poll_wait=10,
ex_destroy_os_disk=True,
):
"""
Destroy a node.
Expand All @@ -830,12 +831,16 @@ def destroy_node(
:param ex_poll_wait: Delay in seconds between retries (default 10).
:type node: ``int``
:param ex_destroy_os_disk: Destroy the OS disk associated with
this node either if it is a managed disk (default True).
:type node: ``bool``
:return: True if the destroy was successful, raises exception
otherwise.
:rtype: ``bool``
"""

do_node_polling = ex_destroy_nic or ex_destroy_vhd
do_node_polling = ex_destroy_nic or ex_destroy_vhd or ex_destroy_os_disk

# This returns a 202 (Accepted) which means that the delete happens
# asynchronously.
Expand All @@ -856,7 +861,7 @@ def destroy_node(
raise

# Poll until the node actually goes away (otherwise attempt to delete
# NIC and VHD will fail with "resource in use" errors).
# NIC, VHD and OS Disk will fail with "resource in use" errors).
retries = ex_poll_qty
while do_node_polling and retries > 0:
try:
Expand Down Expand Up @@ -912,6 +917,24 @@ def destroy_node(
raise
time.sleep(10)

# Optionally destroy the OS managed disk
managed_disk = (
node.extra.get("properties", {})
.get("storageProfile", {})
.get("osDisk", {})
.get("managedDisk")
)
if ex_destroy_os_disk and managed_disk is not None:
managed_disk_id = managed_disk["id"]
try:
self.connection.request(
managed_disk_id, params={"api-version": DISK_API_VERSION}, method="DELETE"
)
except BaseHTTPError as h:
if h.code not in (202, 204):
# 202 or 204 mean destroy is accepted (but deferred) or already destroyed
raise

return True

def create_volume(
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
{
"properties": {
"vmId": "99999999-9999-9999-9999-999999999999",
"hardwareProfile": {
"vmSize": "Standard_A1"
},
"storageProfile": {
"osDisk": {
"osType": "Linux",
"name": "test-disk-1",
"createOption": "FromImage",
"caching": "ReadWrite",
"managedDisk": {
"storageAccountType": "Standard_LRS",
"id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/000000/providers/Microsoft.Compute/disks/test-disk-1"
}
},
"dataDisks": [
{
"lun": 0,
"name": "DD0-9999",
"createOption": "Attach",
"caching": "None",
"managedDisk": {
"storageAccountType": "Standard_LRS",
"id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/000000/providers/Microsoft.Compute/disks/DD0-9999"
},
"diskSizeGB": 8
}
]
},
"networkProfile": {
"networkInterfaces": [
{
"id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/0000/providers/Microsoft.Network/networkInterfaces/test-nic",
"properties": {
"primary": true
}
}
]
},
"provisioningState": "Succeeded"
},
"type": "Microsoft.Compute/virtualMachines",
"location": "eastus",
"tags": {
"tag_key1": "tag_val1"
},
"id": "/subscriptions/99999999-9999-9999-9999-999999999999/resourceGroups/0000/providers/Microsoft.Compute/virtualMachines/test_vm",
"name": "test_vm"
}
43 changes: 43 additions & 0 deletions libcloud/test/compute/test_azure_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -376,6 +376,49 @@ def error(e, **kwargs):
self.assertTrue(ret)
self.assertEqual(4, time_sleep_mock.call_count) # Retries

@mock.patch("time.sleep", return_value=None)
def test_destroy_node__os_managed_disk(self, time_sleep_mock):
def error(e, **kwargs):
raise e(**kwargs)

node = self.driver.list_nodes()[0]

# ok responses
for response in (200, 202, 204):
AzureMockHttp.responses = [
# OK to the DELETE request
lambda f: (httplib.OK, None, {}, "OK"),
# 404 - Node destroyed
lambda f: error(BaseHTTPError, code=404, message="Not found"),
# 200 - NIC destroyed
lambda f: (httplib.OK, None, {}, "OK"),
# 200 - managed OS disk destroyed
lambda f: (httplib.OK, None, {}, "OK")
if response == 200
else error(BaseHTTPError, code=response, message="Deleted or deferred"),
]
ret = self.driver.destroy_node(node, ex_destroy_os_disk=True)
self.assertTrue(ret)

@mock.patch("time.sleep", return_value=None)
def test_destroy_node__os_managed_disk_error(self, time_sleep_mock):
def error(e, **kwargs):
raise e(**kwargs)

node = self.driver.list_nodes()[0]
AzureMockHttp.responses = [
# OK to the DELETE request
lambda f: (httplib.OK, None, {}, "OK"),
# 404 - Node destroyed
lambda f: error(BaseHTTPError, code=404, message="Not found"),
# 200 - NIC destroyed
lambda f: (httplib.OK, None, {}, "OK"),
# 500 - transient error when trying to destroy the OS disk
lambda f: error(BaseHTTPError, code=500, message="Cloud weather"),
]
with self.assertRaises(BaseHTTPError):
self.driver.destroy_node(node, ex_destroy_os_disk=True)

@mock.patch("time.sleep", return_value=None)
def test_destroy_node__destroy_nic_retries(self, time_sleep_mock):
def error(e, **kwargs):
Expand Down

0 comments on commit 5530f54

Please sign in to comment.