From 55ee06248481ed0304881747009172913a001f63 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Thu, 13 Mar 2014 21:36:58 -0700 Subject: [PATCH 001/315] Add `ex_set_volume_auto_delete` to the GCE driver. Sets the auto-delete flag for a volume attached to a node. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 27 +++++++++++++++++++ ...instances_node_name_setDiskAutoDelete.json | 16 +++++++++++ ...erations_operation_volume_auto_delete.json | 16 +++++++++++ libcloud/test/compute/test_gce.py | 19 +++++++++++++ 4 files changed, 78 insertions(+) create mode 100644 libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json create mode 100644 libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index c3c00d4ad4..abe264e634 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1868,6 +1868,33 @@ def detach_volume(self, volume, ex_node=None): data='ignored') return True + def ex_set_volume_auto_delete(self, volume, node, auto_delete=True): + """ + Sets the auto-delete flag for a volume attached to a node. + + :param volume: Volume object to auto-delete + :type volume: :class:`StorageVolume` + + :param ex_node: Node object to auto-delete volume from + :type ex_node: :class:`Node` + + :keyword auto_delete: Flag to set for the auto-delete value + :type auto_delete: ``bool`` (default True) + + :return: True if successfull + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/setDiskAutoDelete' % ( + node.extra['zone'].name, node.name + ) + delete_params = { + 'deviceName': volume, + 'autoDelete': auto_delete, + } + self.connection.async_request(request, method='POST', + params=delete_params) + return True + def ex_destroy_address(self, address): """ Destroy a static address. diff --git a/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json b/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json new file mode 100644 index 0000000000..4087a420c4 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us_central1_a_instances_node_name_setDiskAutoDelete.json @@ -0,0 +1,16 @@ +{ + "kind": "compute#operation", + "id": "14265294323024381703", + "name": "operation-volume-auto-delete", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "operationType": "setDiskAutoDelete", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/dev-test", + "targetId": "4313186599918690450", + "status": "PENDING", + "user": "user@developer.gserviceaccount.com", + "progress": 0, + "insertTime": "2014-03-13T21:50:57.612-07:00", + "startTime": "2014-03-13T21:50:57.717-07:00", + "endTime": "2014-03-13T21:50:58.047-07:00", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-volume-auto-delete" +} diff --git a/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json b/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json new file mode 100644 index 0000000000..dc8231c84b --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/zones_us_central1_a_operations_operation_volume_auto_delete.json @@ -0,0 +1,16 @@ +{ + "kind": "compute#operation", + "id": "14265294323024381703", + "name": "operation-volume-auto-delete", + "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a", + "operationType": "setDiskAutoDelete", + "targetLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/instances/dev-test", + "targetId": "4313186599918690450", + "status": "DONE", + "user": "user@developer.gserviceaccount.com", + "progress": 100, + "insertTime": "2014-03-13T21:50:57.612-07:00", + "startTime": "2014-03-13T21:50:57.717-07:00", + "endTime": "2014-03-13T21:50:58.047-07:00", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/operations/operation-volume-auto-delete" +} diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index a24be1d1db..91fd9d7c9b 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -464,6 +464,13 @@ def test_destroy_volume(self): destroyed = disk.destroy() self.assertTrue(destroyed) + def test_ex_set_volume_auto_delete(self): + node = self.driver.ex_get_node('node-name') + volume = node.extra['boot_disk'] + auto_delete = self.driver.ex_set_volume_auto_delete( + volume, node) + self.assertTrue(auto_delete) + def test_destroy_volume_snapshot(self): snapshot = self.driver.ex_get_snapshot('lcsnapshot') destroyed = snapshot.destroy() @@ -876,6 +883,18 @@ def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_d 'operations_operation_zones_us-central1-a_disks_lcdisk_delete.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + def _zones_us_central1_a_instances_node_name_setDiskAutoDelete( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us_central1_a_instances_node_name_setDiskAutoDelete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + + def _zones_us_central1_a_operations_operation_volume_auto_delete( + self, method, url, body, headers): + body = self.fixtures.load( + 'zones_us_central1_a_operations_operation_volume_auto_delete.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + def _zones_us_central1_a_operations_operation_zones_us_central1_a_disks_lcdisk_createSnapshot_post( self, method, url, body, headers): body = self.fixtures.load( From 428e14823934a4dfe9ef8a76c283c242f410e165 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 10:14:47 +0100 Subject: [PATCH 002/315] Update CHANGES. --- CHANGES.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index cd754a23a5..d2b73cca08 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -69,6 +69,10 @@ Compute (GITHUB-258) [Franck Cuny] +- Ad new ex_set_volume_auto_delete method to the GCE driver, + (GITHUB-264) + [Franck Cuny] + Load Balancer ~~~~~~~~~~~~~ From ae7d6aebb7fafbc1ea2368da89e8ecfbaf6e62d8 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 13:39:24 +0100 Subject: [PATCH 003/315] Modify update_record method so user doesn't need to specify all the arguments. --- libcloud/dns/drivers/route53.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index 38abca84cc..09dc250b4d 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -190,7 +190,18 @@ def create_record(self, name, zone, type, data, extra=None): return Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, extra=extra) - def update_record(self, record, name, type, data, extra): + def update_record(self, record, name=None, type=None, data=None, + extra=None): + + if not name: + name = record.name + + if not type: + type = record.type + + if not extra: + extra = record.extra + batch = [ ('DELETE', record.name, record.type, record.data, record.extra), ('CREATE', name, type, data, extra)] From 692c5283459e7f6a6ccf064292fa4c0d1a7e9508 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 14:31:18 +0100 Subject: [PATCH 004/315] Update Route53 driver so update_record method works correctly for records with multiple values. --- libcloud/dns/drivers/route53.py | 128 ++++++++++++++++++++++++++++++-- 1 file changed, 120 insertions(+), 8 deletions(-) diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index 09dc250b4d..5cc8664477 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -21,6 +21,7 @@ import hmac import datetime import uuid +import copy from libcloud.utils.py3 import httplib from hashlib import sha1 @@ -190,9 +191,71 @@ def create_record(self, name, zone, type, data, extra=None): return Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, extra=extra) + def _update_single_value_record(self, record, name=None, type=None, + data=None, extra=None): + batch = [ + ('DELETE', record.name, record.type, record.data, record.extra), + ('CREATE', name, type, data, extra) + ] + + return self._post_changeset(record.zone, batch) + + def _update_multi_value_record(self, record, name=None, type=None, + data=None, extra=None): + other_records = record.extra.get('_other_records', []) + + attrs = {'xmlns': NAMESPACE} + changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) + batch = ET.SubElement(changeset, 'ChangeBatch') + changes = ET.SubElement(batch, 'Changes') + + # Delete existing records + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = 'DELETE' + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = record.name + '.' + \ + record.zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[record.type] + ET.SubElement(rrs, 'TTL').text = str(record.extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = record.data + + for other_record in other_records: + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = other_record['data'] + + # Re-create new (updated) records. Since we are updating a multi value + # record, only a single record is updated and others are left as is. + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = 'CREATE' + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = name + '.' + record.zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type] + ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = data + + for other_record in other_records: + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = other_record['data'] + + uri = API_ROOT + 'hostedzone/' + record.zone.id + '/rrset' + data = ET.tostring(changeset) + self.connection.set_context({'zone_id': record.zone.id}) + response = self.connection.request(uri, method='POST', data=data) + + return response.status == httplib.OK + def update_record(self, record, name=None, type=None, data=None, extra=None): - if not name: name = record.name @@ -202,10 +265,20 @@ def update_record(self, record, name=None, type=None, data=None, if not extra: extra = record.extra - batch = [ - ('DELETE', record.name, record.type, record.data, record.extra), - ('CREATE', name, type, data, extra)] - self._post_changeset(record.zone, batch) + # Multiple value records need to be handled specially - we need to + # pass values for other records as well + multiple_value_record = record.extra.get('_multi_value', False) + other_records = record.extra.get('_other_records', []) + + if multiple_value_record and other_records: + self._update_multi_value_record(record=record, name=name, + type=type, data=data, + extra=extra) + else: + self._update_single_value_record(record=record, name=name, + type=type, data=data, + extra=extra) + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) return Record(id=id, name=name, type=type, data=data, zone=record.zone, driver=self, extra=extra) @@ -247,7 +320,7 @@ def _post_changeset(self, zone, changes_list): ET.SubElement(change, 'Action').text = action rrs = ET.SubElement(change, 'ResourceRecordSet') - ET.SubElement(rrs, 'Name').text = name + "." + zone.domain + ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type_] ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) @@ -258,7 +331,9 @@ def _post_changeset(self, zone, changes_list): uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset' data = ET.tostring(changeset) self.connection.set_context({'zone_id': zone.id}) - self.connection.request(uri, method='POST', data=data) + response = self.connection.request(uri, method='POST', data=data) + + return response.status == httplib.OK def _to_zones(self, data): zones = [] @@ -294,8 +369,45 @@ def _to_records(self, data, zone): record_set = elem.findall(fixxpath( xpath='ResourceRecords/ResourceRecord', namespace=NAMESPACE)) + record_count = len(record_set) + multiple_value_record = (record_count > 1) + + record_set_records = [] + for index, record in enumerate(record_set): - records.append(self._to_record(elem, zone, index)) + # Need to special handling for records with multiple values for + # update to work correctly + record = self._to_record(elem=elem, zone=zone, index=index) + record.extra['_multi_value'] = multiple_value_record + + if multiple_value_record: + record.extra['_other_records'] = [] + + record_set_records.append(record) + + # Store reference to other records so update works correctly + if multiple_value_record: + for index in range(0, len(record_set_records)): + record = record_set_records[index] + + for other_index, other_record in \ + enumerate(record_set_records): + if index == other_index: + # Skip current record + continue + + extra = copy.deepcopy(other_record.extra) + extra.pop('_multi_value') + extra.pop('_other_records') + + item = {'name': other_record.name, + 'data': other_record.data, + 'type': other_record.type, + 'extra': extra} + record.extra['_other_records'].append(item) + + records.extend(record_set_records) + return records def _to_record(self, elem, zone, index=0): From 5e5eaeab4c4d24bae199f5bc061fcc3baab336cd Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 14:46:44 +0100 Subject: [PATCH 005/315] Use shorther notation. --- libcloud/dns/drivers/route53.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index 5cc8664477..86e37544c0 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -185,6 +185,7 @@ def delete_zone(self, zone, ex_delete_records=False): return response.status in [httplib.OK] def create_record(self, name, zone, type, data, extra=None): + extra = extra or {} batch = [('CREATE', name, type, data, extra)] self._post_changeset(zone, batch) id = ':'.join((self.RECORD_TYPE_MAP[type], name)) @@ -256,11 +257,9 @@ def _update_multi_value_record(self, record, name=None, type=None, def update_record(self, record, name=None, type=None, data=None, extra=None): - if not name: - name = record.name - - if not type: - type = record.type + name = name or record.name + type = type or record.type + extra = extra or record.extra if not extra: extra = record.extra From 9ececf6531dcf10c80615f0f0fe51ea4aab85c65 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 14:46:53 +0100 Subject: [PATCH 006/315] Update CHANGES. --- CHANGES.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index d2b73cca08..95f7ffa7fa 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -80,6 +80,13 @@ Load Balancer (LIBCLOUD-522, GITHUB-253) [Rahul Ranjan] +DNS +~~~ + +- Fix update_record method in the Route56 driver so it works correctly for + records with multiple values. + [Tomaz Muraus] + Changes with Apache Libcloud 0.14.1 ----------------------------------- From 243e97f0202f7e2040163da359249a5766db08cd Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 14:48:54 +0100 Subject: [PATCH 007/315] Re-order methods. --- libcloud/dns/drivers/route53.py | 106 ++++++++++++++++---------------- 1 file changed, 53 insertions(+), 53 deletions(-) diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index 86e37544c0..19a5bdf5d8 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -192,6 +192,59 @@ def create_record(self, name, zone, type, data, extra=None): return Record(id=id, name=name, type=type, data=data, zone=zone, driver=self, extra=extra) + def update_record(self, record, name=None, type=None, data=None, + extra=None): + name = name or record.name + type = type or record.type + extra = extra or record.extra + + if not extra: + extra = record.extra + + # Multiple value records need to be handled specially - we need to + # pass values for other records as well + multiple_value_record = record.extra.get('_multi_value', False) + other_records = record.extra.get('_other_records', []) + + if multiple_value_record and other_records: + self._update_multi_value_record(record=record, name=name, + type=type, data=data, + extra=extra) + else: + self._update_single_value_record(record=record, name=name, + type=type, data=data, + extra=extra) + + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) + return Record(id=id, name=name, type=type, data=data, zone=record.zone, + driver=self, extra=extra) + + def delete_record(self, record): + try: + r = record + batch = [('DELETE', r.name, r.type, r.data, r.extra)] + self._post_changeset(record.zone, batch) + except InvalidChangeBatch: + raise RecordDoesNotExistError(value='', driver=self, + record_id=r.id) + return True + + def ex_delete_all_records(self, zone): + """ + Remove all the records for the provided zone. + + :param zone: Zone to delete records for. + :type zone: :class:`Zone` + """ + deletions = [] + for r in zone.list_records(): + if r.type in (RecordType.NS, RecordType.SOA): + continue + deletions.append(('DELETE', r.name, r.type, r.data, r.extra)) + + if deletions: + self._post_changeset(zone, deletions) + def _update_single_value_record(self, record, name=None, type=None, data=None, extra=None): batch = [ @@ -255,59 +308,6 @@ def _update_multi_value_record(self, record, name=None, type=None, return response.status == httplib.OK - def update_record(self, record, name=None, type=None, data=None, - extra=None): - name = name or record.name - type = type or record.type - extra = extra or record.extra - - if not extra: - extra = record.extra - - # Multiple value records need to be handled specially - we need to - # pass values for other records as well - multiple_value_record = record.extra.get('_multi_value', False) - other_records = record.extra.get('_other_records', []) - - if multiple_value_record and other_records: - self._update_multi_value_record(record=record, name=name, - type=type, data=data, - extra=extra) - else: - self._update_single_value_record(record=record, name=name, - type=type, data=data, - extra=extra) - - id = ':'.join((self.RECORD_TYPE_MAP[type], name)) - return Record(id=id, name=name, type=type, data=data, zone=record.zone, - driver=self, extra=extra) - - def delete_record(self, record): - try: - r = record - batch = [('DELETE', r.name, r.type, r.data, r.extra)] - self._post_changeset(record.zone, batch) - except InvalidChangeBatch: - raise RecordDoesNotExistError(value='', driver=self, - record_id=r.id) - return True - - def ex_delete_all_records(self, zone): - """ - Remove all the records for the provided zone. - - :param zone: Zone to delete records for. - :type zone: :class:`Zone` - """ - deletions = [] - for r in zone.list_records(): - if r.type in (RecordType.NS, RecordType.SOA): - continue - deletions.append(('DELETE', r.name, r.type, r.data, r.extra)) - - if deletions: - self._post_changeset(zone, deletions) - def _post_changeset(self, zone, changes_list): attrs = {'xmlns': NAMESPACE} changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) From cd680cc760d98560ff7ace1e2ecf5e7aef664d05 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 21 Mar 2014 15:20:01 +0100 Subject: [PATCH 008/315] Add ex_create_multi_value_record method to the Route53 driver which allows user to create a record with multiple values with a single call. --- CHANGES.rst | 4 +++ libcloud/dns/drivers/route53.py | 47 +++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 95f7ffa7fa..744b7fce49 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -87,6 +87,10 @@ DNS records with multiple values. [Tomaz Muraus] +- Add ex_create_multi_value_record method to the Route53 driver which allows + user to create a record with multiple values with a single call. + [Tomaz Muraus] + Changes with Apache Libcloud 0.14.1 ----------------------------------- diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index 19a5bdf5d8..d07bb6cddb 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -229,6 +229,53 @@ def delete_record(self, record): record_id=r.id) return True + def ex_create_multi_value_record(self, name, zone, type, data, extra=None): + """ + Create a record with multiple values with a single call. + + :return: A list of created records. + :rtype: ``list`` of :class:`libcloud.dns.base.Record` + """ + extra = extra or {} + + attrs = {'xmlns': NAMESPACE} + changeset = ET.Element('ChangeResourceRecordSetsRequest', attrs) + batch = ET.SubElement(changeset, 'ChangeBatch') + changes = ET.SubElement(batch, 'Changes') + + change = ET.SubElement(changes, 'Change') + ET.SubElement(change, 'Action').text = 'CREATE' + + rrs = ET.SubElement(change, 'ResourceRecordSet') + ET.SubElement(rrs, 'Name').text = name + '.' + zone.domain + ET.SubElement(rrs, 'Type').text = self.RECORD_TYPE_MAP[type] + ET.SubElement(rrs, 'TTL').text = str(extra.get('ttl', '0')) + + rrecs = ET.SubElement(rrs, 'ResourceRecords') + + # Value is provided as a multi line string + values = [value.strip() for value in data.split('\n') if + value.strip()] + + for value in values: + rrec = ET.SubElement(rrecs, 'ResourceRecord') + ET.SubElement(rrec, 'Value').text = value + + uri = API_ROOT + 'hostedzone/' + zone.id + '/rrset' + data = ET.tostring(changeset) + self.connection.set_context({'zone_id': zone.id}) + self.connection.request(uri, method='POST', data=data) + + id = ':'.join((self.RECORD_TYPE_MAP[type], name)) + + records = [] + for value in values: + record = Record(id=id, name=name, type=type, data=value, zone=zone, + driver=self, extra=extra) + records.append(record) + + return record + def ex_delete_all_records(self, zone): """ Remove all the records for the provided zone. From 49536da89271b1c0eeb3c1babeb00284bf074477 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Fri, 21 Mar 2014 20:34:54 -0700 Subject: [PATCH 009/315] Fix links to Google Storage's documentation. Closes #268. Signed-off-by: Tomaz Muraus --- docs/storage/drivers/google_storage.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/storage/drivers/google_storage.rst b/docs/storage/drivers/google_storage.rst index e9b89d3ae6..4ac3e01e46 100644 --- a/docs/storage/drivers/google_storage.rst +++ b/docs/storage/drivers/google_storage.rst @@ -20,5 +20,5 @@ API Docs :members: :inherited-members: -.. _`XML API v1.0`: https://developers.google.com/storage/docs/reference/v1/apiversion1 -.. _`official documentation`: https://developers.google.com/storage/docs/reference/v1/apiversion1#new +.. _`XML API v1.0`: https://developers.google.com/storage/docs/reference-guide +.. _`official documentation`: https://developers.google.com/storage/docs/reference-guide From dcfe4d9f88dcc948f13a5d6d291837fc64fa51ec Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 26 Mar 2014 12:02:54 +0100 Subject: [PATCH 010/315] Add ex_revoke_security_group_ingress method to the CloudStack driver and a work-around for a nasty inconsistency bug in CloudStack. --- CHANGES.rst | 3 +++ libcloud/common/cloudstack.py | 5 ++++- libcloud/compute/drivers/cloudstack.py | 15 +++++++++++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 744b7fce49..b98df33825 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -73,6 +73,9 @@ Compute (GITHUB-264) [Franck Cuny] +- Add ex_revoke_security_group_ingress method to the CloudStack driver. + [Chris DeRamus, Tomaz Muraus] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/common/cloudstack.py b/libcloud/common/cloudstack.py index 65810af488..99699d0699 100644 --- a/libcloud/common/cloudstack.py +++ b/libcloud/common/cloudstack.py @@ -156,7 +156,10 @@ def _sync_request(self, command, action=None, params=None, data=None, result = self.request(action=self.driver.path, params=params, data=data, headers=headers, method=method) - command = command.lower() + 'response' + command = command.lower() + + if command not in ['revokesecuritygroupingress']: + command = command + 'response' if command not in result.object: raise MalformedResponseError( diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 83e09b89cb..c68c884874 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -1752,6 +1752,21 @@ def ex_authorize_security_group_ingress(self, securitygroupname, params=args, method='GET')['securitygroup'] + def ex_revoke_security_group_ingress(self, rule_id): + """ + Revoke/delete an ingress security rule + + :param id: The ID of the ingress security rule + :type id: ``str`` + + :rtype: ``bool`` + """ + + self._async_request(command='revokeSecurityGroupIngress', + params={'id': rule_id}, + method='GET') + return True + def ex_register_iso(self, name, url, location=None, **kwargs): """ Registers an existing ISO by URL. From 121daef8096b5bdaf6c03451ef74294b6c5f8222 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 30 Mar 2014 10:21:09 -0700 Subject: [PATCH 011/315] Sort values in RECORD_TYPE_MAP. Having the values sorted makes it easier to spot duplicate. Closes #270 Signed-off-by: Tomaz Muraus --- libcloud/dns/drivers/gandi.py | 10 +++++----- libcloud/dns/drivers/hostvirtual.py | 2 +- libcloud/dns/drivers/rackspace.py | 4 ++-- libcloud/dns/drivers/route53.py | 9 ++++----- libcloud/dns/drivers/zerigo.py | 8 ++++---- 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/libcloud/dns/drivers/gandi.py b/libcloud/dns/drivers/gandi.py index 8a8f68a0a8..48a6ed5d9e 100644 --- a/libcloud/dns/drivers/gandi.py +++ b/libcloud/dns/drivers/gandi.py @@ -88,16 +88,16 @@ class GandiDNSDriver(BaseGandiDriver, DNSDriver): connectionCls = GandiDNSConnection RECORD_TYPE_MAP = { - RecordType.NS: 'NS', - RecordType.MX: 'MX', RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', - RecordType.TXT: 'TXT', - RecordType.SRV: 'SRV', + RecordType.LOC: 'LOC', + RecordType.MX: 'MX', + RecordType.NS: 'NS', RecordType.SPF: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', RecordType.WKS: 'WKS', - RecordType.LOC: 'LOC', } def _to_zone(self, zone): diff --git a/libcloud/dns/drivers/hostvirtual.py b/libcloud/dns/drivers/hostvirtual.py index 116d3850b0..1d9eec2ce6 100644 --- a/libcloud/dns/drivers/hostvirtual.py +++ b/libcloud/dns/drivers/hostvirtual.py @@ -65,9 +65,9 @@ class HostVirtualDNSDriver(DNSDriver): RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', - RecordType.TXT: 'TXT', RecordType.NS: 'SPF', RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', } def __init__(self, key, secure=True, host=None, port=None): diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index ea7dfef1e5..fc7da74220 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -145,9 +145,9 @@ def __init__(self, key, secret=None, secure=True, host=None, port=None, RecordType.CNAME: 'CNAME', RecordType.MX: 'MX', RecordType.NS: 'NS', - RecordType.TXT: 'TXT', - RecordType.SRV: 'SRV', RecordType.PTR: 'PTR', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', } def iterate_zones(self): diff --git a/libcloud/dns/drivers/route53.py b/libcloud/dns/drivers/route53.py index d07bb6cddb..f1a8f34920 100644 --- a/libcloud/dns/drivers/route53.py +++ b/libcloud/dns/drivers/route53.py @@ -103,17 +103,16 @@ class Route53DNSDriver(DNSDriver): connectionCls = Route53Connection RECORD_TYPE_MAP = { - RecordType.NS: 'NS', - RecordType.MX: 'MX', RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', - RecordType.TXT: 'TXT', - RecordType.SRV: 'SRV', + RecordType.MX: 'MX', + RecordType.NS: 'NS', RecordType.PTR: 'PTR', RecordType.SOA: 'SOA', RecordType.SPF: 'SPF', - RecordType.TXT: 'TXT' + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', } def iterate_zones(self): diff --git a/libcloud/dns/drivers/zerigo.py b/libcloud/dns/drivers/zerigo.py index 892b4fbedb..0af7a9f453 100644 --- a/libcloud/dns/drivers/zerigo.py +++ b/libcloud/dns/drivers/zerigo.py @@ -134,15 +134,15 @@ class ZerigoDNSDriver(DNSDriver): RecordType.A: 'A', RecordType.AAAA: 'AAAA', RecordType.CNAME: 'CNAME', + RecordType.GEO: 'GEO', RecordType.MX: 'MX', - RecordType.REDIRECT: 'REDIRECT', - RecordType.TXT: 'TXT', - RecordType.SRV: 'SRV', RecordType.NAPTR: 'NAPTR', RecordType.NS: 'NS', RecordType.PTR: 'PTR', + RecordType.REDIRECT: 'REDIRECT', RecordType.SPF: 'SPF', - RecordType.GEO: 'GEO', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', RecordType.URL: 'URL', } From ecd63e84b2eb87b5b6f5fdbd197126f18df77822 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 30 Mar 2014 11:13:21 -0700 Subject: [PATCH 012/315] flake8's --exclude flag doesn't like / Signed-off-by: Tomaz Muraus --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 157031a401..4f581e2a5e 100644 --- a/tox.ini +++ b/tox.ini @@ -47,7 +47,7 @@ commands = python ../contrib/generate_provider_feature_matrix_table.py [testenv:lint] deps = flake8 -commands = flake8 --exclude="test/" libcloud/ +commands = flake8 --exclude="test" libcloud/ flake8 --max-line-length=160 libcloud/test/ flake8 demos/ flake8 --ignore=E902 docs/examples/ From e66146fe11a05a3110dec13b6e956a6c3562aa70 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 30 Mar 2014 11:17:34 -0700 Subject: [PATCH 013/315] Fix for PEP8's E265. pep8 1.5 introduced a new test (E265) to ensure there's at least one space before block comment. Signed-off-by: Tomaz Muraus --- docs/examples/compute/openstack/hpcloud.py | 4 +- libcloud/common/base.py | 6 +- libcloud/compute/drivers/abiquo.py | 2 +- libcloud/compute/drivers/dreamhost.py | 2 +- libcloud/compute/drivers/ecp.py | 68 +++++++++++----------- libcloud/compute/drivers/hostvirtual.py | 2 +- libcloud/compute/drivers/nephoscale.py | 16 ++--- libcloud/compute/drivers/openstack.py | 2 +- libcloud/compute/drivers/opsource.py | 5 +- libcloud/compute/drivers/rimuhosting.py | 2 +- libcloud/compute/drivers/softlayer.py | 2 +- libcloud/loadbalancer/drivers/rackspace.py | 2 +- libcloud/storage/drivers/s3.py | 4 +- libcloud/test/compute/test_gridspot.py | 2 +- libcloud/test/compute/test_openstack.py | 10 ++-- libcloud/test/dns/test_route53.py | 2 +- libcloud/test/test_connection.py | 4 +- 17 files changed, 68 insertions(+), 67 deletions(-) diff --git a/docs/examples/compute/openstack/hpcloud.py b/docs/examples/compute/openstack/hpcloud.py index 0b226b5c8e..cbfda3d9a1 100644 --- a/docs/examples/compute/openstack/hpcloud.py +++ b/docs/examples/compute/openstack/hpcloud.py @@ -8,7 +8,7 @@ OpenStack = get_driver(Provider.OPENSTACK) -#HP Cloud US West +# HP Cloud US West driver = OpenStack('your_auth_username', 'your_auth_password', ex_force_auth_version='2.0_password', ex_force_auth_url=HPCLOUD_AUTH_URL_USWEST, @@ -16,7 +16,7 @@ ex_force_service_region='region-a.geo-1', ex_force_service_name='Compute') -#HP Cloud US East +# HP Cloud US East driver = OpenStack('your_auth_username', 'your_auth_password', ex_force_auth_version='2.0_password', ex_force_auth_url=HPCLOUD_AUTH_URL_USEAST, diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 178c218705..8537c07815 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -259,7 +259,7 @@ def reason(self): return self._reason -#TODO: Move this to a better location/package +# TODO: Move this to a better location/package class LoggingConnection(): """ Debug class to log all HTTP(s) requests as they could be made @@ -402,7 +402,7 @@ class Connection(object): """ A Base Connection class to derive from. """ - #conn_classes = (LoggingHTTPSConnection) + # conn_classes = (LoggingHTTPSConnection) conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection) responseCls = Response @@ -523,7 +523,7 @@ def connect(self, host=None, port=None, base_url=None): # which proxies to your endpoint, and lets you easily capture # connections in cleartext when you setup the proxy to do SSL # for you - #connection = self.conn_classes[False]("127.0.0.1", 8080) + # connection = self.conn_classes[False]("127.0.0.1", 8080) self.connection = connection diff --git a/libcloud/compute/drivers/abiquo.py b/libcloud/compute/drivers/abiquo.py index b3e2d2efb8..398087bec3 100644 --- a/libcloud/compute/drivers/abiquo.py +++ b/libcloud/compute/drivers/abiquo.py @@ -573,7 +573,7 @@ def _to_node(self, vm, driver): repo_link = get_href(image_element, 'datacenterrepository') image = self._to_nodeimage(image_element, self, repo_link) - ## Fill the 'ips' data + # Fill the 'ips' data private_ips = [] public_ips = [] nics_element = self.connection.request(get_href(vm, 'nics')).object diff --git a/libcloud/compute/drivers/dreamhost.py b/libcloud/compute/drivers/dreamhost.py index d0a2396a7e..cba445c39f 100644 --- a/libcloud/compute/drivers/dreamhost.py +++ b/libcloud/compute/drivers/dreamhost.py @@ -116,7 +116,7 @@ def add_default_params(self, params): """ params['key'] = self.key params['format'] = self.format - #params['unique_id'] = generate_unique_id() + # params['unique_id'] = generate_unique_id() return params diff --git a/libcloud/compute/drivers/ecp.py b/libcloud/compute/drivers/ecp.py index 021e7b0ea0..79b57fa5b2 100644 --- a/libcloud/compute/drivers/ecp.py +++ b/libcloud/compute/drivers/ecp.py @@ -38,7 +38,7 @@ from libcloud.compute.types import Provider, NodeState, InvalidCredsError from libcloud.utils.networking import is_private_subnet -#Defaults +# Defaults API_HOST = '' API_PORT = (80, 443) @@ -65,7 +65,7 @@ def success(self): def parse_error(self): return self.error - #Interpret the json responses - no error checking required + # Interpret the json responses - no error checking required def parse_body(self): return json.loads(self.body) @@ -83,7 +83,7 @@ class ECPConnection(ConnectionUserAndKey): port = API_PORT def add_default_headers(self, headers): - #Authentication + # Authentication username = self.user_id password = self.key base64string = base64.encodestring( @@ -98,7 +98,7 @@ def _encode_multipart_formdata(self, fields): Based on Wade Leftwich's function: http://code.activestate.com/recipes/146306/ """ - #use a random boundary that does not appear in the fields + # use a random boundary that does not appear in the fields boundary = '' while boundary in ''.join(fields): boundary = binascii.hexlify(os.urandom(16)).decode('utf-8') @@ -133,17 +133,17 @@ def list_nodes(self): :rtype: ``list`` of :class:`Node` """ - #Make the call + # Make the call res = self.connection.request('/rest/hosting/vm/list').parse_body() - #Put together a list of node objects + # Put together a list of node objects nodes = [] for vm in res['vms']: node = self._to_node(vm) - if not node is None: + if node is not None: nodes.append(node) - #And return it + # And return it return nodes def _to_node(self, vm): @@ -152,11 +152,11 @@ def _to_node(self, vm): This returns only running VMs. """ - #Check state + # Check state if not vm['state'] == "running": return None - #IPs + # IPs iplist = [interface['ip'] for interface in vm['interfaces'] if interface['ip'] != '127.0.0.1'] @@ -173,7 +173,7 @@ def _to_node(self, vm): else: public_ips.append(ip) - #Create the node object + # Create the node object n = Node( id=vm['uuid'], name=vm['name'], @@ -192,8 +192,8 @@ def reboot_node(self, node): @inherits: :class:`NodeDriver.reboot_node` """ - #Turn the VM off - #Black magic to make the POST requests work + # Turn the VM off + # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'stop'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, @@ -203,9 +203,9 @@ def reboot_node(self, node): ).parse_body() node.state = NodeState.REBOOTING - #Wait for it to turn off and then continue (to turn it on again) + # Wait for it to turn off and then continue (to turn it on again) while node.state == NodeState.REBOOTING: - #Check if it's off. + # Check if it's off. response = self.connection.request( '/rest/hosting/vm/%s' % node.id ).parse_body() @@ -214,8 +214,8 @@ def reboot_node(self, node): else: time.sleep(5) - #Turn the VM back on. - #Black magic to make the POST requests work + # Turn the VM back on. + # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'start'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, @@ -234,8 +234,8 @@ def destroy_node(self, node): @inherits: :class:`NodeDriver.destroy_node` """ - #Shut down first - #Black magic to make the POST requests work + # Shut down first + # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'stop'}) self.connection.request( '/rest/hosting/vm/%s' % node.id, @@ -244,11 +244,11 @@ def destroy_node(self, node): data=d[1] ).parse_body() - #Ensure there was no applicationl level error + # Ensure there was no applicationl level error node.state = NodeState.PENDING - #Wait for the VM to turn off before continuing + # Wait for the VM to turn off before continuing while node.state == NodeState.PENDING: - #Check if it's off. + # Check if it's off. response = self.connection.request( '/rest/hosting/vm/%s' % node.id ).parse_body() @@ -257,8 +257,8 @@ def destroy_node(self, node): else: time.sleep(5) - #Delete the VM - #Black magic to make the POST requests work + # Delete the VM + # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata({'action': 'delete'}) self.connection.request( '/rest/hosting/vm/%s' % (node.id), @@ -276,11 +276,11 @@ def list_images(self, location=None): @inherits: :class:`NodeDriver.list_images` """ - #Make the call + # Make the call response = self.connection.request( '/rest/hosting/ptemplate/list').parse_body() - #Turn the response into an array of NodeImage objects + # Turn the response into an array of NodeImage objects images = [] for ptemplate in response['packages']: images.append(NodeImage( @@ -298,11 +298,11 @@ def list_sizes(self, location=None): @inherits: :class:`NodeDriver.list_sizes` """ - #Make the call + # Make the call response = self.connection.request( '/rest/hosting/htemplate/list').parse_body() - #Turn the response into an array of NodeSize objects + # Turn the response into an array of NodeSize objects sizes = [] for htemplate in response['templates']: sizes.append(NodeSize( @@ -346,15 +346,15 @@ def create_node(self, **kwargs): :rtype: :class:`Node` """ - #Find out what network to put the VM on. + # Find out what network to put the VM on. res = self.connection.request( '/rest/hosting/network/list').parse_body() - #Use the first / default network because there is no way to specific - #which one + # Use the first / default network because there is no way to specific + # which one network = res['networks'][0]['uuid'] - #Prepare to make the VM + # Prepare to make the VM data = { 'name': str(kwargs['name']), 'package': str(kwargs['image'].id), @@ -363,7 +363,7 @@ def create_node(self, **kwargs): 'disk': '' } - #Black magic to make the POST requests work + # Black magic to make the POST requests work d = self.connection._encode_multipart_formdata(data) response = self.connection.request( '/rest/hosting/vm/', @@ -372,7 +372,7 @@ def create_node(self, **kwargs): data=d[1] ).parse_body() - #Create a node object and return it. + # Create a node object and return it. n = Node( id=response['machine_id'], name=data['name'], diff --git a/libcloud/compute/drivers/hostvirtual.py b/libcloud/compute/drivers/hostvirtual.py index ebef915cff..e9fa6c80a5 100644 --- a/libcloud/compute/drivers/hostvirtual.py +++ b/libcloud/compute/drivers/hostvirtual.py @@ -211,7 +211,7 @@ def reboot_node(self, node): def destroy_node(self, node): params = { 'mbpkgid': node.id, - #'reason': 'Submitted through Libcloud API' + # 'reason': 'Submitted through Libcloud API' } result = self.connection.request( diff --git a/libcloud/compute/drivers/nephoscale.py b/libcloud/compute/drivers/nephoscale.py index efe5089347..1888ac8f50 100644 --- a/libcloud/compute/drivers/nephoscale.py +++ b/libcloud/compute/drivers/nephoscale.py @@ -48,10 +48,10 @@ VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED, httplib.NO_CONTENT] -#used in create_node and specifies how many times to get the list of nodes and -#check if the newly created node is there. This is because when a request is -#sent to create a node, NephoScale replies with the job id, and not the node -#itself thus we don't have the ip addresses, that are required in deploy_node +# used in create_node and specifies how many times to get the list of nodes and +# check if the newly created node is there. This is because when a request is +# sent to create a node, NephoScale replies with the job id, and not the node +# itself thus we don't have the ip addresses, that are required in deploy_node CONNECT_ATTEMPTS = 10 @@ -388,9 +388,9 @@ def create_node(self, name, size, image, server_key=None, if not nowait: return node else: - #try to get the created node public ips, for use in deploy_node - #At this point we don't have the id of the newly created Node, - #so search name in nodes + # try to get the created node public ips, for use in deploy_node + # At this point we don't have the id of the newly created Node, + # so search name in nodes created_node = False while connect_attempts > 0: nodes = self.list_nodes() @@ -411,7 +411,7 @@ def _to_node(self, data): public_ips = [] private_ips = [] ip_addresses = data.get('ipaddresses', '') - #E.g. "ipaddresses": "198.120.14.6, 10.132.60.1" + # E.g. "ipaddresses": "198.120.14.6, 10.132.60.1" if ip_addresses: for ip in ip_addresses.split(','): ip = ip.replace(' ', '') diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index c7d0b093f1..83a5dadf11 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1989,7 +1989,7 @@ def _to_node(self, api_node): public_ips.extend(ips) else: for ip in ips: - #is_private_subnet does not check for ipv6 + # is_private_subnet does not check for ipv6 try: if is_private_subnet(ip): private_ips.append(ip) diff --git a/libcloud/compute/drivers/opsource.py b/libcloud/compute/drivers/opsource.py index c38b042430..a2024c3828 100644 --- a/libcloud/compute/drivers/opsource.py +++ b/libcloud/compute/drivers/opsource.py @@ -46,7 +46,7 @@ # x implement list_images() (only support Base OS images, # no customer images yet) # x implement list_locations() -# x implement ex_* extension functions for opsource-specific featurebody =s +# x implement ex_* extension functions for opsource-specific featurebody # x ex_graceful_shutdown # x ex_start_node # x ex_power_off @@ -67,7 +67,8 @@ # - delete customer images # - modify customer images # - add "pending-servers" in list_nodes() -# - implement various ex_* extension functions for opsource-specific features +# - implement various ex_* extension functions for opsource-specific +# features # - ex_modify_server() # - ex_add_storage_to_server() # - ex_snapshot_server() (create's customer image) diff --git a/libcloud/compute/drivers/rimuhosting.py b/libcloud/compute/drivers/rimuhosting.py index 32fbb744dd..35bd9e4050 100644 --- a/libcloud/compute/drivers/rimuhosting.py +++ b/libcloud/compute/drivers/rimuhosting.py @@ -288,7 +288,7 @@ def create_node(self, **kwargs): data['instantiation_options']['password'] = auth.password if 'ex_billing_oid' in kwargs: - #TODO check for valid oid. + # TODO check for valid oid. data['billing_oid'] = kwargs['ex_billing_oid'] if 'ex_host_server_oid' in kwargs: diff --git a/libcloud/compute/drivers/softlayer.py b/libcloud/compute/drivers/softlayer.py index 3f89938e4b..ced4a34b81 100644 --- a/libcloud/compute/drivers/softlayer.py +++ b/libcloud/compute/drivers/softlayer.py @@ -341,7 +341,7 @@ def create_node(self, **kwargs): price=None, driver=self.connection.driver)) ex_size_data = SL_TEMPLATES.get(int(size.id)) or {} - #plan keys are ints + # plan keys are ints cpu_count = kwargs.get('ex_cpus') or ex_size_data.get('cpus') or \ DEFAULT_CPU_SIZE ram = kwargs.get('ex_ram') or ex_size_data.get('ram') or \ diff --git a/libcloud/loadbalancer/drivers/rackspace.py b/libcloud/loadbalancer/drivers/rackspace.py index 13be22c0ca..1807ed6af3 100644 --- a/libcloud/loadbalancer/drivers/rackspace.py +++ b/libcloud/loadbalancer/drivers/rackspace.py @@ -404,7 +404,7 @@ def ex_create_balancer(self, name, members, protocol='http', balancer_attrs.update({ 'nodes': [self._member_attributes(member) for member in members], }) - #balancer_attrs['nodes'] = ['fu'] + # balancer_attrs['nodes'] = ['fu'] balancer_object = {"loadBalancer": balancer_attrs} resp = self.connection.request('/loadbalancers', diff --git a/libcloud/storage/drivers/s3.py b/libcloud/storage/drivers/s3.py index 69dcafc6b6..816493936d 100644 --- a/libcloud/storage/drivers/s3.py +++ b/libcloud/storage/drivers/s3.py @@ -817,8 +817,8 @@ def _put_object(self, container, object_name, upload_func, # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE # here. - #SIGPIPE is thrown if the provided container does not exist or the user - # does not have correct permission + # SIGPIPE is thrown if the provided container does not exist or the + # user does not have correct permission result_dict = self._upload_object( object_name=object_name, content_type=content_type, upload_func=upload_func, upload_func_kwargs=upload_func_kwargs, diff --git a/libcloud/test/compute/test_gridspot.py b/libcloud/test/compute/test_gridspot.py index 2fe80f76b9..0160f8e4e5 100644 --- a/libcloud/test/compute/test_gridspot.py +++ b/libcloud/test/compute/test_gridspot.py @@ -48,7 +48,7 @@ def test_invalid_creds(self): GridspotMockHttp.type = 'BAD_AUTH' try: self.driver.list_nodes() - # Above command should have thrown an InvalidCredsException + # Above command should have thrown an InvalidCredsException self.assertTrue(False) except InvalidCredsError: self.assertTrue(True) diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index bdc6979a35..ca379b28f8 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -301,7 +301,7 @@ class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): driver_klass = OpenStack_1_0_NodeDriver driver_args = OPENSTACK_PARAMS driver_kwargs = {} - #driver_kwargs = {'ex_force_auth_version': '1.0'} + # driver_kwargs = {'ex_force_auth_version': '1.0'} @classmethod def create_driver(self): @@ -961,15 +961,15 @@ def test_list_nodes(self): node = nodes[0] self.assertEqual('12065', node.id) - #test public IPv4 + # test public IPv4 self.assertTrue('12.16.18.28' in node.public_ips) self.assertTrue('50.57.94.35' in node.public_ips) - #test public IPv6 + # test public IPv6 self.assertTrue( '2001:4801:7808:52:16:3eff:fe47:788a' in node.public_ips) - #test private IPv4 + # test private IPv4 self.assertTrue('10.182.64.34' in node.private_ips) - #test private IPv6 + # test private IPv6 self.assertTrue( 'fec0:4801:7808:52:16:3eff:fe60:187d' in node.private_ips) diff --git a/libcloud/test/dns/test_route53.py b/libcloud/test/dns/test_route53.py index bc11125a15..299420381e 100644 --- a/libcloud/test/dns/test_route53.py +++ b/libcloud/test/dns/test_route53.py @@ -220,7 +220,7 @@ def _2012_02_29_hostedzone_47234(self, method, url, body, headers): return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _2012_02_29_hostedzone(self, method, url, body, headers): - #print method, url, body, headers + # print method, url, body, headers if method == "POST": body = self.fixtures.load("create_zone.xml") return (httplib.CREATED, body, {}, httplib.responses[httplib.OK]) diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index c17ff4d7c1..ae09c7ae65 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -52,7 +52,7 @@ def test_content_length(self): con = Connection() con.connection = Mock() - ## GET method + # GET method # No data, no content length should be present con.request('/test', method='GET', data=None) call_kwargs = con.connection.request.call_args[1] @@ -69,7 +69,7 @@ def test_content_length(self): call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') - ## POST, PUT method + # POST, PUT method # No data, content length should be present for method in ['POST', 'PUT', 'post', 'put']: con.request('/test', method=method, data=None) From 51af25e74d239d30198450f44314f448ca6a6b53 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 30 Mar 2014 11:20:04 -0700 Subject: [PATCH 014/315] Fix PEP8's E713 and E714. Two new tests were introduced with pep8 1.5 to report when operators 'not in' and 'is not' are recommended. Signed-off-by: Tomaz Muraus --- libcloud/compute/base.py | 2 +- libcloud/compute/drivers/abiquo.py | 8 ++++---- libcloud/compute/drivers/cloudsigma.py | 4 ++-- libcloud/compute/drivers/ibm_sce.py | 2 +- libcloud/compute/drivers/linode.py | 2 +- libcloud/compute/drivers/rimuhosting.py | 10 +++++----- libcloud/dns/drivers/rackspace.py | 2 +- libcloud/pricing.py | 2 +- libcloud/storage/drivers/google_storage.py | 4 ++-- libcloud/storage/drivers/s3.py | 4 ++-- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index e901f347dc..df6d296dbf 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -865,7 +865,7 @@ def deploy_node(self, **kwargs): pass elif 'create_node' in self.features: f = self.features['create_node'] - if not 'generates_password' in f and not "password" in f: + if 'generates_password' not in f and "password" not in f: raise NotImplementedError( 'deploy_node not implemented for this driver') else: diff --git a/libcloud/compute/drivers/abiquo.py b/libcloud/compute/drivers/abiquo.py index 398087bec3..ab74778e5c 100644 --- a/libcloud/compute/drivers/abiquo.py +++ b/libcloud/compute/drivers/abiquo.py @@ -290,7 +290,7 @@ def ex_create_group(self, name, location=None): if location is None: location = self.list_locations()[0] - elif not location in self.list_locations(): + elif location not in self.list_locations(): raise LibcloudError('Location does not exist') link_vdc = self.connection.cache['locations'][location] @@ -630,7 +630,7 @@ def _define_create_node_location(self, **kwargs): location will be created. """ # First, get image location - if not 'image' in kwargs: + if 'image' not in kwargs: error = "'image' parameter is mandatory" raise LibcloudError(error, self) @@ -640,7 +640,7 @@ def _define_create_node_location(self, **kwargs): location = None if 'location' in kwargs: location = kwargs['location'] - if not location in self.list_locations(): + if location not in self.list_locations(): raise LibcloudError('Location does not exist') # Check if the image is compatible with any of the locations or @@ -669,7 +669,7 @@ def _define_create_node_group(self, xml_loc, loc, **kwargs): If we can not find any group, create it into argument 'location' """ - if not 'group_name' in kwargs: + if 'group_name' not in kwargs: group_name = NodeGroup.DEFAULT_GROUP_NAME else: group_name = kwargs['group_name'] diff --git a/libcloud/compute/drivers/cloudsigma.py b/libcloud/compute/drivers/cloudsigma.py index 833dea2027..ccad8b11c7 100644 --- a/libcloud/compute/drivers/cloudsigma.py +++ b/libcloud/compute/drivers/cloudsigma.py @@ -918,7 +918,7 @@ def _parse_errors_from_body(self, body): return None for item in body: - if not 'error_type' in item: + if 'error_type' not in item: # Unrecognized error continue @@ -991,7 +991,7 @@ class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver): def __init__(self, key, secret, secure=True, host=None, port=None, region=DEFAULT_REGION, **kwargs): - if not region in API_ENDPOINTS_2_0: + if region not in API_ENDPOINTS_2_0: raise ValueError('Invalid region: %s' % (region)) if not secure: diff --git a/libcloud/compute/drivers/ibm_sce.py b/libcloud/compute/drivers/ibm_sce.py index 830fa6a2f6..bcf73b9a8d 100644 --- a/libcloud/compute/drivers/ibm_sce.py +++ b/libcloud/compute/drivers/ibm_sce.py @@ -64,7 +64,7 @@ def add_default_headers(self, headers): headers['Accept'] = 'text/xml' headers['Authorization'] = ('Basic %s' % (base64.b64encode( b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))) - if not 'Content-Type' in headers: + if 'Content-Type' not in headers: headers['Content-Type'] = 'text/xml' return headers diff --git a/libcloud/compute/drivers/linode.py b/libcloud/compute/drivers/linode.py index c25e3638ec..428ce3c7ba 100644 --- a/libcloud/compute/drivers/linode.py +++ b/libcloud/compute/drivers/linode.py @@ -244,7 +244,7 @@ def create_node(self, **kwargs): if not ssh and not root: raise LinodeException(0xFB, "Need SSH key or root password") - if not root is None and len(root) < 6: + if root is not None and len(root) < 6: raise LinodeException(0xFB, "Root password is too short") # Swap size diff --git a/libcloud/compute/drivers/rimuhosting.py b/libcloud/compute/drivers/rimuhosting.py index 35bd9e4050..ebe6bc9890 100644 --- a/libcloud/compute/drivers/rimuhosting.py +++ b/libcloud/compute/drivers/rimuhosting.py @@ -299,29 +299,29 @@ def create_node(self, **kwargs): kwargs['ex_vps_order_oid_to_clone'] if 'ex_num_ips' in kwargs and int(kwargs['ex_num_ips']) > 1: - if not 'ex_extra_ip_reason' in kwargs: + if 'ex_extra_ip_reason' not in kwargs: raise RimuHostingException( 'Need an reason for having an extra IP') else: - if not 'ip_request' in data: + if 'ip_request' not in data: data['ip_request'] = {} data['ip_request']['num_ips'] = int(kwargs['ex_num_ips']) data['ip_request']['extra_ip_reason'] = \ kwargs['ex_extra_ip_reason'] if 'ex_memory_mb' in kwargs: - if not 'vps_parameters' in data: + if 'vps_parameters' not in data: data['vps_parameters'] = {} data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb'] if 'ex_disk_space_mb' in kwargs: - if not 'ex_vps_parameters' in data: + if 'ex_vps_parameters' not in data: data['vps_parameters'] = {} data['vps_parameters']['disk_space_mb'] = \ kwargs['ex_disk_space_mb'] if 'ex_disk_space_2_mb' in kwargs: - if not 'vps_parameters' in data: + if 'vps_parameters' not in data: data['vps_parameters'] = {} data['vps_parameters']['disk_space_2_mb'] =\ kwargs['ex_disk_space_2_mb'] diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index fc7da74220..55f834f7f9 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -210,7 +210,7 @@ def create_zone(self, domain, type='master', ttl=None, extra=None): extra = extra if extra else {} # Email address is required - if not 'email' in extra: + if 'email' not in extra: raise ValueError('"email" key must be present in extra dictionary') payload = {'name': domain, 'emailAddress': extra['email'], diff --git a/libcloud/pricing.py b/libcloud/pricing.py index dd8de96771..abf71e61da 100644 --- a/libcloud/pricing.py +++ b/libcloud/pricing.py @@ -79,7 +79,7 @@ def get_pricing(driver_type, driver_name, pricing_file_path=None): :return: Dictionary with pricing where a key name is size ID and the value is a price. """ - if not driver_type in VALID_PRICING_DRIVER_TYPES: + if driver_type not in VALID_PRICING_DRIVER_TYPES: raise AttributeError('Invalid driver type: %s', driver_type) if driver_name in PRICING_DATA[driver_type]: diff --git a/libcloud/storage/drivers/google_storage.py b/libcloud/storage/drivers/google_storage.py index 6dd60545dc..9caca9b9ee 100644 --- a/libcloud/storage/drivers/google_storage.py +++ b/libcloud/storage/drivers/google_storage.py @@ -90,10 +90,10 @@ def _get_aws_auth_param(self, method, headers, params, expires, elif key.lower().startswith('x-goog-'): extension_header_values[key.lower()] = value.strip() - if not 'content-md5' in special_header_values: + if 'content-md5' not in special_header_values: special_header_values['content-md5'] = '' - if not 'content-type' in special_header_values: + if 'content-type' not in special_header_values: special_header_values['content-type'] = '' keys_sorted = list(special_header_values.keys()) diff --git a/libcloud/storage/drivers/s3.py b/libcloud/storage/drivers/s3.py index 816493936d..9577f98369 100644 --- a/libcloud/storage/drivers/s3.py +++ b/libcloud/storage/drivers/s3.py @@ -137,10 +137,10 @@ def _get_aws_auth_param(self, method, headers, params, expires, elif key_lower.startswith('x-amz-'): amz_header_values[key.lower()] = value.strip() - if not 'content-md5' in special_header_values: + if 'content-md5' not in special_header_values: special_header_values['content-md5'] = '' - if not 'content-type' in special_header_values: + if 'content-type' not in special_header_values: special_header_values['content-type'] = '' if expires: From 95b6a68adb5cbbe071d9da509484146cff6c5626 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 30 Mar 2014 11:22:15 -0700 Subject: [PATCH 015/315] Fix for various PEP8 errors. Fix some visual alignments errors reported by pep8. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/cloudsigma.py | 18 ++++++++++-------- libcloud/compute/drivers/ec2.py | 6 +++--- libcloud/compute/drivers/openstack.py | 6 +++--- libcloud/compute/drivers/opsource.py | 4 ++-- libcloud/compute/drivers/rimuhosting.py | 3 +-- libcloud/pricing.py | 2 +- libcloud/storage/base.py | 4 ++-- libcloud/test/compute/test_vcloud.py | 7 ++++--- libcloud/test/loadbalancer/test_rackspace.py | 2 +- 9 files changed, 27 insertions(+), 25 deletions(-) diff --git a/libcloud/compute/drivers/cloudsigma.py b/libcloud/compute/drivers/cloudsigma.py index ccad8b11c7..5d97434e4e 100644 --- a/libcloud/compute/drivers/cloudsigma.py +++ b/libcloud/compute/drivers/cloudsigma.py @@ -145,7 +145,7 @@ class CloudSigma_1_0_NodeDriver(CloudSigmaNodeDriver): connectionCls = CloudSigma_1_0_Connection IMAGING_TIMEOUT = 20 * 60 # Default timeout (in seconds) for the drive - # imaging process + # imaging process NODE_STATE_MAP = { 'active': NodeState.RUNNING, @@ -974,11 +974,12 @@ class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver): website = 'http://www.cloudsigma.com/' connectionCls = CloudSigma_2_0_Connection - DRIVE_TRANSITION_TIMEOUT = 500 # Default drive transition timeout in - # seconds - DRIVE_TRANSITION_SLEEP_INTERVAL = 5 # How long to sleep between different - # polling periods while waiting for - # drive transition + # Default drive transition timeout in seconds + DRIVE_TRANSITION_TIMEOUT = 500 + + # How long to sleep between different polling periods while waiting for + # drive transition + DRIVE_TRANSITION_SLEEP_INTERVAL = 5 NODE_STATE_MAP = { 'starting': NodeState.PENDING, @@ -1106,8 +1107,9 @@ def create_node(self, name, size, image, ex_metadata=None, vnc_password = get_secure_random_string(size=12) drive_name = '%s-drive' % (name) - drive_size = (size.disk * 1024 * 1024 * 1024) # size is specified in - # GB + + # size is specified in GB + drive_size = (size.disk * 1024 * 1024 * 1024) if not is_installation_cd: # 1. Clone library drive so we can use it diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index e2899f1271..4d75fa3b2f 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -3593,9 +3593,9 @@ def _to_interface(self, element, name=None): 'private_dns': findtext(element=item, xpath='privateDnsName', namespace=NAMESPACE), - 'primary': findtext(element=item, - xpath='primary', - namespace=NAMESPACE)}) + 'primary': findtext(element=item, + xpath='primary', + namespace=NAMESPACE)}) # Build our attachment dictionary which we will add into extra later attributes_map = \ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 83a5dadf11..38223eec1e 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -861,8 +861,7 @@ def get_meta_dict(el): 'uri': "https://%s%s/servers/%s" % ( self.connection.host, self.connection.request_path, el.get('id')), - 'metadata': metadata, - }) + 'metadata': metadata}) return n def _to_sizes(self, object): @@ -1022,7 +1021,8 @@ def __init__(self, id, name, cidr, driver, extra=None): def __repr__(self): return '' % (self.id, - self.name, self.cidr,) + self.name, + self.cidr,) class OpenStackSecurityGroup(object): diff --git a/libcloud/compute/drivers/opsource.py b/libcloud/compute/drivers/opsource.py index a2024c3828..9d43ca45f8 100644 --- a/libcloud/compute/drivers/opsource.py +++ b/libcloud/compute/drivers/opsource.py @@ -359,8 +359,8 @@ def list_images(self, location=None): @inherits: :class:`NodeDriver.list_images` """ - return self._to_base_images(self.connection.request('base/image') - .object) + return self._to_base_images( + self.connection.request('base/image').object) def list_sizes(self, location=None): return [ diff --git a/libcloud/compute/drivers/rimuhosting.py b/libcloud/compute/drivers/rimuhosting.py index ebe6bc9890..edb344b27e 100644 --- a/libcloud/compute/drivers/rimuhosting.py +++ b/libcloud/compute/drivers/rimuhosting.py @@ -159,8 +159,7 @@ def _to_node(self, order): state=NodeState.RUNNING, public_ips=( [order['allocated_ips']['primary_ip']] - + order['allocated_ips']['secondary_ips'] - ), + + order['allocated_ips']['secondary_ips']), private_ips=[], driver=self.connection.driver, extra={ diff --git a/libcloud/pricing.py b/libcloud/pricing.py index abf71e61da..ebe99a8aa5 100644 --- a/libcloud/pricing.py +++ b/libcloud/pricing.py @@ -189,7 +189,7 @@ def download_pricing_file(file_url=DEFAULT_FILE_URL, if not os.path.exists(dir_name): # Verify a valid path is provided msg = ('Can\'t write to %s, directory %s, doesn\'t exist' % - (file_path, dir_name)) + (file_path, dir_name)) raise ValueError(msg) if os.path.exists(file_path) and os.path.isdir(file_path): diff --git a/libcloud/storage/base.py b/libcloud/storage/base.py index b5e88ff3c0..136d70123e 100644 --- a/libcloud/storage/base.py +++ b/libcloud/storage/base.py @@ -742,7 +742,7 @@ def _stream_data(self, response, iterator, chunked=False, chunk = '' if chunked: response.connection.connection.send(b('%X\r\n' % - (len(chunk)))) + (len(chunk)))) response.connection.connection.send(chunk) response.connection.connection.send(b('\r\n')) response.connection.connection.send(b('0\r\n\r\n')) @@ -754,7 +754,7 @@ def _stream_data(self, response, iterator, chunked=False, try: if chunked: response.connection.connection.send(b('%X\r\n' % - (len(chunk)))) + (len(chunk)))) response.connection.connection.send(b(chunk)) response.connection.connection.send(b('\r\n')) else: diff --git a/libcloud/test/compute/test_vcloud.py b/libcloud/test/compute/test_vcloud.py index b44162a421..fc801bebbf 100644 --- a/libcloud/test/compute/test_vcloud.py +++ b/libcloud/test/compute/test_vcloud.py @@ -478,9 +478,10 @@ class VCloud_1_5_MockHttp(MockHttp, unittest.TestCase): fixtures = ComputeFileFixtures('vcloud_1_5') def request(self, method, url, body=None, headers=None, raw=False): - self.assertTrue(url.startswith('/api/'), ('"%s" is invalid. Needs to ' - 'start with "/api". The passed URL should be just ' - 'the path, not full URL.', url)) + self.assertTrue(url.startswith('/api/'), + ('"%s" is invalid. Needs to ' + 'start with "/api". The passed URL should be just ' + 'the path, not full URL.', url)) super(VCloud_1_5_MockHttp, self).request(method, url, body, headers, raw) diff --git a/libcloud/test/loadbalancer/test_rackspace.py b/libcloud/test/loadbalancer/test_rackspace.py index 1d183ec600..5d6a0cfbfb 100644 --- a/libcloud/test/loadbalancer/test_rackspace.py +++ b/libcloud/test/loadbalancer/test_rackspace.py @@ -158,7 +158,7 @@ def test_create_balancer(self): Member( None, '10.1.0.10', 80, extra={'condition': MemberCondition.DISABLED, - 'weight': 10}), + 'weight': 10}), Member(None, '10.1.0.11', 80)) ) From e780066b2f8942732a4df66142455a020f44d787 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Fri, 4 Apr 2014 21:31:59 -0700 Subject: [PATCH 016/315] Fix for various PEP8 errors. pep8 1.5.2 started to report errors when a new line follows the parameter equal, and when a variable is redefined in a list comprehension. Signed-off-by: Tomaz Muraus --- .../cloudstack/security_groups_management.py | 10 +++++----- libcloud/common/cloudstack.py | 11 +++-------- libcloud/common/openstack.py | 4 ++-- libcloud/compute/drivers/cloudstack.py | 19 +++++-------------- libcloud/compute/drivers/openstack.py | 15 +++++---------- libcloud/test/compute/test_cloudsigma_v2_0.py | 4 ++-- libcloud/utils/misc.py | 2 +- 7 files changed, 23 insertions(+), 42 deletions(-) diff --git a/docs/examples/compute/cloudstack/security_groups_management.py b/docs/examples/compute/cloudstack/security_groups_management.py index 5e12a13c71..be89b3385b 100644 --- a/docs/examples/compute/cloudstack/security_groups_management.py +++ b/docs/examples/compute/cloudstack/security_groups_management.py @@ -17,11 +17,11 @@ # Authorize an ingress rule on a security group # If `startport` is used alone, this will be the only port open # If `endport` is also used then the entire range will be authorized -sg = driver.ex_authorize_security_group_ingress(securitygroupname= - 'test-security-group', - protocol='tcp', - startport='22', - cidrlist='0.0.0.0/0') +sg = driver.ex_authorize_security_group_ingress( + securitygroupname='test-security-group', protocol='tcp', startport='22', + cidrlist='0.0.0.0/0' +) + pprint(sg) # Delete a security group we have previously created diff --git a/libcloud/common/cloudstack.py b/libcloud/common/cloudstack.py index 99699d0699..efd31c8277 100644 --- a/libcloud/common/cloudstack.py +++ b/libcloud/common/cloudstack.py @@ -107,14 +107,9 @@ def _async_request(self, command, action=None, params=None, data=None, # Command is specified as part of GET call context['command'] = command - result = super(CloudStackConnection, self).async_request(action=action, - params=params, - data=data, - headers= - headers, - method=method, - context= - context) + result = super(CloudStackConnection, self).async_request( + action=action, params=params, data=data, headers=headers, + method=method, context=context) return result['jobresult'] def get_request_kwargs(self, action, params=None, data='', headers=None, diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 5c4f02b09d..158a9f5f8c 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -604,8 +604,8 @@ def _populate_hosts_and_request_paths(self): self.auth_user_info = osa.auth_user_info # Pull out and parse the service catalog - osc = OpenStackServiceCatalog(osa.urls, ex_force_auth_version= - self._auth_version) + osc = OpenStackServiceCatalog( + osa.urls, ex_force_auth_version=self._auth_version) self.service_catalog = osc url = self._ex_force_base_url or self.get_endpoint() diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index c68c884874..cd0cc3ef83 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -289,20 +289,11 @@ def ex_create_port_forwarding_rule(self, address, """ Add a port forwarding rule for port or ports. """ - return self.driver.ex_create_port_forwarding_rule(node=self, - address= - address, - private_port= - private_port, - public_port= - public_port, - protocol=protocol, - public_end_port= - public_end_port, - private_end_port= - private_end_port, - openfirewall= - openfirewall) + return self.driver.ex_create_port_forwarding_rule( + node=self, address=address, private_port=private_port, + public_port=public_port, protocol=protocol, + public_end_port=public_end_port, private_end_port=private_end_port, + openfirewall=openfirewall) def ex_delete_ip_forwarding_rule(self, rule): """ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 38223eec1e..398803046a 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1662,16 +1662,11 @@ def _to_security_group_rule(self, obj): group = obj['group'].get('name', None) tenant_id = obj['group'].get('tenant_id', None) - return OpenStackSecurityGroupRule(id=obj['id'], - parent_group_id= - obj['parent_group_id'], - ip_protocol=obj['ip_protocol'], - from_port=obj['from_port'], - to_port=obj['to_port'], - driver=self, - ip_range=ip_range, - group=group, - tenant_id=tenant_id) + return OpenStackSecurityGroupRule( + id=obj['id'], parent_group_id=obj['parent_group_id'], + ip_protocol=obj['ip_protocol'], from_port=obj['from_port'], + to_port=obj['to_port'], driver=self, ip_range=ip_range, + group=group, tenant_id=tenant_id) def _to_security_groups(self, obj): security_groups = obj['security_groups'] diff --git a/libcloud/test/compute/test_cloudsigma_v2_0.py b/libcloud/test/compute/test_cloudsigma_v2_0.py index 6258a29c78..4bc66a5b90 100644 --- a/libcloud/test/compute/test_cloudsigma_v2_0.py +++ b/libcloud/test/compute/test_cloudsigma_v2_0.py @@ -388,8 +388,8 @@ def test_ex_list_subscriptions_resource_filterting(self): def test_ex_toggle_subscription_auto_renew(self): subscription = self.driver.ex_list_subscriptions()[0] - status = self.driver.ex_toggle_subscription_auto_renew(subscription= - subscription) + status = self.driver.ex_toggle_subscription_auto_renew( + subscription=subscription) self.assertTrue(status) def test_ex_list_capabilities(self): diff --git a/libcloud/utils/misc.py b/libcloud/utils/misc.py index 21978e138b..fbca4cb100 100644 --- a/libcloud/utils/misc.py +++ b/libcloud/utils/misc.py @@ -166,7 +166,7 @@ def str2dicts(data): value = line[whitespace + 1:] d.update({key: value}) - list_data = [value for value in list_data if value != {}] + list_data = [val for val in list_data if val != {}] return list_data From 502846370576fa3b11ad0cff7f4ad72b3743be73 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 7 Apr 2014 08:55:07 -0600 Subject: [PATCH 017/315] Allow user to pass ex_ebs_optimized argument to the create_node method in the EC2 driver. Closes #272 --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/ec2.py | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index b98df33825..616656a41d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -76,6 +76,11 @@ Compute - Add ex_revoke_security_group_ingress method to the CloudStack driver. [Chris DeRamus, Tomaz Muraus] +- Allow user to pass ex_ebs_optimized argument to the create_node method + in the EC2 driver. + (GITHUB-272) + [zerthimon] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 4d75fa3b2f..bac7f8d1bb 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1362,6 +1362,9 @@ def create_node(self, **kwargs): :keyword ex_iamprofile: Name or ARN of IAM profile :type ex_iamprofile: ``str`` + + :keyword ex_ebs_optimized: EBS-Optimized if True + :type ex_ebs_optimized: ``bool`` """ image = kwargs["image"] size = kwargs["size"] @@ -1430,6 +1433,9 @@ def create_node(self, **kwargs): else: params['IamInstanceProfile.Name'] = kwargs['ex_iamprofile'] + if 'ex_ebs_optimized' in kwargs: + params['EbsOptimized'] = kwargs['ex_ebs_optimized'] + object = self.connection.request(self.path, params=params).object nodes = self._to_nodes(object, 'instancesSet/item') From e36f05a5f1705186d0534d202607a4f460070e23 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 7 Apr 2014 09:44:06 -0600 Subject: [PATCH 018/315] Update CHANGES. --- CHANGES.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 616656a41d..92166b35e1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -61,15 +61,15 @@ Compute (LIBCLOUD-503, GITHUB-235) [Markos Gogoulos] -- Ad new ex_delete_image and ex_deprecate_image method to the GCE driver, +- Ad new ex_delete_image and ex_deprecate_image method to the GCE driver. (GITHUB-260) [Franck Cuny] -- Ad new ex_copy_image method to the GCE driver, +- Ad new ex_copy_image method to the GCE driver. (GITHUB-258) [Franck Cuny] -- Ad new ex_set_volume_auto_delete method to the GCE driver, +- Ad new ex_set_volume_auto_delete method to the GCE driver. (GITHUB-264) [Franck Cuny] From dc2c0dfa4a0b8d3f5fcbafcaf572720e755ad78a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 7 Apr 2014 09:48:13 -0600 Subject: [PATCH 019/315] Update EC2 instance and pricing information. Closes #256 --- libcloud/compute/drivers/ec2.py | 14 ++++++++++++++ libcloud/data/pricing.json | 9 +++++++++ 2 files changed, 23 insertions(+) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index bac7f8d1bb..ad51e00c64 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -247,6 +247,13 @@ 'disk': 240, 'bandwidth': None }, + 'hs1.4xlarge': { + 'id': 'hs1.4xlarge', + 'name': 'High Storage Quadruple Extra Large Instance', + 'ram': 61952, + 'disk': 2048, + 'bandwidth': None + }, 'hs1.8xlarge': { 'id': 'hs1.8xlarge', 'name': 'High Storage Eight Extra Large Instance', @@ -316,6 +323,7 @@ 'cg1.4xlarge', 'g2.2xlarge', 'cr1.8xlarge', + 'hs1.4xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', @@ -377,6 +385,8 @@ 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', + 'hs1.4xlarge', + 'hs1.8xlarge', 'cc2.8xlarge', 'i2.xlarge', 'i2.2xlarge', @@ -410,6 +420,8 @@ 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', + 'hs1.4xlarge', + 'hs1.8xlarge', 'cc2.8xlarge', 'i2.xlarge', 'i2.2xlarge', @@ -475,6 +487,8 @@ 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', + 'hs1.4xlarge', + 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', 'i2.4xlarge', diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 1d862d4cdf..d6202bac81 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -90,6 +90,7 @@ "cc1.4xlarge": 1.3, "cc2.8xlarge": 2.4, "cr1.8xlarge": 3.50, + "hs1.4xlarge": 3.1, "hs1.8xlarge": 4.6, "i2.xlarge": 0.85, "i2.2xlarge": 1.71, @@ -118,6 +119,8 @@ "m3.large": 0.248, "m3.xlarge": 0.495, "m3.2xlarge": 0.990, + "hs1.4xlarge": 3.1, + "hs1.8xlarge": 4.6, "i2.xlarge": 0.85, "i2.2xlarge": 1.71, "i2.4xlarge": 3.41, @@ -145,6 +148,8 @@ "m3.large": 0.225, "m3.xlarge": 0.45, "m3.2xlarge": 0.90, + "hs1.4xlarge": 3.1, + "hs1.8xlarge": 4.6, "cc2.8xlarge": 2.400, "i2.xlarge": 0.85, "i2.2xlarge": 1.71, @@ -173,6 +178,8 @@ "m3.large": 0.248, "m3.xlarge": 0.495, "m3.2xlarge": 0.990, + "hs1.4xlarge": 3.1, + "hs1.8xlarge": 4.6, "cc2.8xlarge": 2.7, "i2.xlarge": 0.85, "i2.2xlarge": 1.71, @@ -228,6 +235,8 @@ "m3.large": 0.342, "m3.xlarge": 0.684, "m3.2xlarge": 1.368, + "hs1.4xlarge": 3.440, + "hs1.8xlarge": 5.670, "i2.xlarge": 0.85, "i2.2xlarge": 1.71, "i2.4xlarge": 3.41, From dbe902715c9dacbec9d5495ebc6cc1e0de28bfb4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 7 Apr 2014 09:49:40 -0600 Subject: [PATCH 020/315] Update tox lint step to also verify syntax in the pricing.json file. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 4f581e2a5e..66dc270fb1 100644 --- a/tox.ini +++ b/tox.ini @@ -52,3 +52,4 @@ commands = flake8 --exclude="test" libcloud/ flake8 demos/ flake8 --ignore=E902 docs/examples/ flake8 --ignore=E902 contrib/ + python -mjson.tool libcloud/data/pricing.json From 72c25dd9717a6c69967734a9dddf13f800e60843 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 7 Apr 2014 10:03:07 -0600 Subject: [PATCH 021/315] Fix broken test. --- libcloud/test/compute/test_ec2.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 1a3cb20ed7..4a2b4d7c1e 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -372,7 +372,7 @@ def test_list_sizes(self): self.assertTrue('m2.4xlarge' in ids) if region_name == 'us-east-1': - self.assertEqual(len(sizes), 29) + self.assertEqual(len(sizes), 30) self.assertTrue('cg1.4xlarge' in ids) self.assertTrue('cc1.4xlarge' in ids) self.assertTrue('cc2.8xlarge' in ids) @@ -386,7 +386,7 @@ def test_list_sizes(self): elif region_name == 'ap-southeast-2': self.assertEqual(len(sizes), 24) elif region_name == 'eu-west-1': - self.assertEqual(len(sizes), 25) + self.assertEqual(len(sizes), 27) self.driver.region_name = region_old From 342471bb94938128a32945c52a48ccb3a2062171 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 7 Apr 2014 10:18:49 -0600 Subject: [PATCH 022/315] Fix lint. --- libcloud/compute/drivers/linode.py | 4 ++-- libcloud/test/secrets.py-dist | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/linode.py b/libcloud/compute/drivers/linode.py index 428ce3c7ba..c58f56a1c1 100644 --- a/libcloud/compute/drivers/linode.py +++ b/libcloud/compute/drivers/linode.py @@ -89,8 +89,8 @@ def __init__(self, key): # Converts Linode's state from DB to a NodeState constant. LINODE_STATES = { - -2: NodeState.UNKNOWN, # Boot Failed - -1: NodeState.PENDING, # Being Created + (-2): NodeState.UNKNOWN, # Boot Failed + (-1): NodeState.PENDING, # Being Created 0: NodeState.PENDING, # Brand New 1: NodeState.RUNNING, # Running 2: NodeState.TERMINATED, # Powered Off diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index 2195b33ef2..773a2c25b1 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -23,7 +23,7 @@ EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) GCE_PARAMS = ('email_address', 'key') # Service Account Authentication -#GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication +# GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication GCE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') IBM_PARAMS = ('user', 'secret') From c842f35f85c7fe9dea90c5a25f66711bdceaceb2 Mon Sep 17 00:00:00 2001 From: Chris DeRamus Date: Mon, 31 Mar 2014 15:38:34 -0400 Subject: [PATCH 023/315] Updating the pricing information to match new AWS pricing changes effective April 1st, 2014 Signed-off-by: Tomaz Muraus --- libcloud/data/pricing.json | 436 +++++++++++++++++++++---------------- 1 file changed, 250 insertions(+), 186 deletions(-) diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index d6202bac81..5a2e12310f 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -15,7 +15,16 @@ "5": 0.240, "6": 0.480, "7": 0.960, - "8": 1.800 + "8": 1.800, + "performance1-1": 0.040, + "performance1-2": 0.080, + "performance1-4": 0.160, + "performance1-8": 0.320, + "performance2-15": 0.680, + "performance2-30": 1.360, + "performance2-60": 2.720, + "performance2-90": 4.080, + "performance2-120": 5.440 }, "rackspacenovaus": { @@ -44,7 +53,16 @@ "5": 0.258, "6": 0.516, "7": 0.967, - "8": 1.612 + "8": 1.612, + "performance1-1": 0.040, + "performance1-2": 0.080, + "performance1-4": 0.160, + "performance1-8": 0.320, + "performance2-15": 0.680, + "performance2-30": 1.360, + "performance2-60": 2.720, + "performance2-90": 4.080, + "performance2-120": 5.440 }, "rackspacenovasyd": { @@ -54,7 +72,16 @@ "5": 0.288, "6": 0.576, "7": 1.080, - "8": 1.440 + "8": 1.440, + "performance1-1": 0.040, + "performance1-2": 0.080, + "performance1-4": 0.160, + "performance1-8": 0.320, + "performance2-15": 0.680, + "performance2-30": 1.360, + "performance2-60": 2.720, + "performance2-90": 4.080, + "performance2-120": 5.440 }, "dreamhost": { @@ -66,225 +93,262 @@ }, "ec2_us_east": { - "t1.micro": 0.02, - "m1.small": 0.065, - "m1.medium": 0.13, - "m1.large": 0.26, - "m1.xlarge": 0.52, - "c1.medium": 0.17, - "c1.xlarge": 0.68, - "c3.large": 0.150, - "c3.xlarge": 0.300, - "c3.2xlarge": 0.600, - "c3.4xlarge": 1.200, - "c3.8xlarge": 2.400, - "m2.xlarge": 0.50, - "m2.2xlarge": 1.0, - "m2.4xlarge": 2.0, "m3.medium": 0.113, - "m3.large": 0.225, + "m3.large": 0.140, "m3.xlarge": 0.45, "m3.2xlarge": 0.90, - "cg1.4xlarge": 2.1, + "m1.small": 0.044, + "m1.medium": 0.087, + "m1.large": 0.175, + "m1.xlarge": 0.350, + "c3.large": 0.105, + "c3.xlarge": 0.210, + "c3.2xlarge": 0.420, + "c3.4xlarge": 0.840, + "c3.8xlarge": 1.680, + "c1.medium": 0.130, + "c1.xlarge": 0.520, + "cc2.8xlarge": 2.00, "g2.2xlarge": 0.650, - "cc1.4xlarge": 1.3, - "cc2.8xlarge": 2.4, + "cg1.4xlarge": 2.1, + "r3.large": 0.175, + "r3.xlarge": 0.350, + "r3.2xlarge": 0.700, + "r3.4xlarge": 1.400, + "r3.8xlarge": 2.800, + "m2.xlarge": 0.245, + "m2.2xlarge": 0.490, + "m2.4xlarge": 0.980, "cr1.8xlarge": 3.50, - "hs1.4xlarge": 3.1, - "hs1.8xlarge": 4.6, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, + "i2.xlarge": 0.853, + "i2.2xlarge": 1.705, "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "i2.8xlarge": 6.82, + "hs1.8xlarge": 4.6, + "hi1.4xlarge": 3.10, + "t1.micro": 0.02 }, "ec2_us_west": { - "t1.micro": 0.025, - "m1.small": 0.095, - "m1.medium": 0.180, - "m1.large": 0.38, - "m1.xlarge": 0.76, - "c1.medium": 0.19, - "c1.xlarge": 0.76, - "g2.2xlarge": 0.702, - "c3.large": 0.171, - "c3.xlarge": 0.342, - "c3.2xlarge": 0.633, - "c3.4xlarge": 1.366, - "c3.8xlarge": 2.732, - "m2.xlarge": 0.57, - "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28, "m3.medium": 0.124, "m3.large": 0.248, "m3.xlarge": 0.495, "m3.2xlarge": 0.990, - "hs1.4xlarge": 3.1, - "hs1.8xlarge": 4.6, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "m1.small": 0.047, + "m1.medium": 0.095, + "m1.large": 0.190, + "m1.xlarge": 0.379, + "c3.large": 0.120, + "c3.xlarge": 0.239, + "c3.2xlarge": 0.478, + "c3.4xlarge": 0.956, + "c3.8xlarge": 1.912, + "c1.medium": 0.148, + "c1.xlarge": 0.592, + "g2.2xlarge": 0.702, + "r3.large": 0.195, + "r3.xlarge": 0.390, + "r3.2xlarge": 0.780, + "r3.4xlarge": 1.560, + "r3.8xlarge": 3.120, + "m2.xlarge": 0.275, + "m2.2xlarge": 0.550, + "m2.4xlarge": 1.100, + "i2.xlarge": 0.938, + "i2.2xlarge": 1.876, + "i2.4xlarge": 3.751, + "i2.8xlarge": 7.502, + "t1.micro": 0.025 }, "ec2_us_west_oregon": { - "t1.micro": 0.02, - "m1.small": 0.065, - "m1.medium": 0.13, - "m1.large": 0.26, - "m1.xlarge": 0.52, - "c1.medium": 0.17, - "c1.xlarge": 0.68, + "m3.medium": 0.070, + "m3.large": 0.140, + "m3.xlarge": 0.280, + "m3.2xlarge": 0.560, + "m1.small": 0.044, + "m1.medium": 0.087, + "m1.large": 0.175, + "m1.xlarge": 0.350, + "c3.large": 0.105, + "c3.xlarge": 0.210, + "c3.2xlarge": 0.420, + "c3.4xlarge": 0.840, + "c3.8xlarge": 1.680, + "c1.medium": 0.130, + "c1.xlarge": 0.520, + "cc2.8xlarge": 2.000, "g2.2xlarge": 0.650, - "c3.large": 0.150, - "c3.xlarge": 0.300, - "c3.2xlarge": 0.600, - "c3.4xlarge": 1.200, - "c3.8xlarge": 2.400, - "m2.xlarge": 0.50, - "m2.2xlarge": 1.0, - "m2.4xlarge": 2.0, - "m3.medium": 0.113, - "m3.large": 0.225, - "m3.xlarge": 0.45, - "m3.2xlarge": 0.90, - "hs1.4xlarge": 3.1, - "hs1.8xlarge": 4.6, - "cc2.8xlarge": 2.400, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, + "r3.large": 0.175, + "r3.xlarge": 0.350, + "r3.2xlarge": 0.700, + "r3.4xlarge": 1.400, + "r3.8xlarge": 2.800, + "m2.xlarge": 0.245, + "m2.2xlarge": 0.490, + "m2.4xlarge": 0.980, + "cr1.8xlarge": 3.50, + "i2.xlarge": 0.853, + "i2.2xlarge": 1.705, "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "i2.8xlarge": 6.82, + "hs1.8xlarge": 4.6, + "hi1.4xlarge": 3.10, + "t1.micro": 0.02 }, "ec2_eu_west": { - "t1.micro": 0.025, - "m1.small": 0.095, - "m1.medium": 0.180, - "m1.large": 0.38, - "m1.xlarge": 0.76, - "c1.medium": 0.19, - "c1.xlarge": 0.76, + "m3.medium": 0.077, + "m3.large": 0.154, + "m3.xlarge": 0.308, + "m3.2xlarge": 0.616, + "m1.small": 0.047, + "m1.medium": 0.095, + "m1.large": 0.190, + "m1.xlarge": 0.379, + "c3.large": 0.120, + "c3.xlarge": 0.239, + "c3.2xlarge": 0.478, + "c3.4xlarge": 0.956, + "c3.8xlarge": 1.912, + "c1.medium": 0.148, + "c1.xlarge": 0.592, + "cc2.8xlarge": 2.250, "g2.2xlarge": 0.702, - "c3.large": 0.171, - "c3.xlarge": 0.342, - "c3.2xlarge": 0.683, - "c3.4xlarge": 1.366, - "c3.8xlarge": 2.732, - "m2.xlarge": 0.57, - "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28, - "m3.medium": 0.124, - "m3.large": 0.248, - "m3.xlarge": 0.495, - "m3.2xlarge": 0.990, - "hs1.4xlarge": 3.1, - "hs1.8xlarge": 4.6, - "cc2.8xlarge": 2.7, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "cg1.4xlarge": 2.360, + "r3.large": 0.195, + "r3.xlarge": 0.390, + "r3.2xlarge": 0.780, + "r3.4xlarge": 1.560, + "r3.8xlarge": 3.120, + "m2.xlarge": 0.275, + "m2.2xlarge": 0.550, + "m2.4xlarge": 1.100, + "cr1.8xlarge": 3.750, + "i2.xlarge": 0.938, + "i2.2xlarge": 1.876, + "i2.4xlarge": 3.751, + "i2.8xlarge": 7.502, + "hs1.8xlarge": 4.900, + "hs1.4xlarge": 3.100, + "t1.micro": 0.020 }, "ec2_ap_southeast": { - "t1.micro": 0.025, - "m1.small": 0.095, - "m1.medium": 0.180, - "m1.large": 0.38, - "m1.xlarge": 0.76, - "c1.medium": 0.19, - "c1.xlarge": 0.76, - "c3.large": 0.189, - "c3.xlarge": 0.378, - "c3.2xlarge": 0.756, - "c3.4xlarge": 1.512, - "c3.8xlarge": 3.024, - "m2.xlarge": 0.57, - "m2.2xlarge": 1.14, - "m2.4xlarge": 2.28, - "m3.medium": 0.158, - "m3.large": 0.315, - "m3.xlarge": 0.630, - "m3.2xlarge": 1.260, + "m3.medium": 0.098, + "m3.large": 0.196, + "m3.xlarge": 0.392, + "m3.2xlarge": 0.784, + "m1.small": 0.058, + "m1.medium": 0.117, + "m1.large": 0.233, + "m1.xlarge": 0.467, + "c3.large": 0.132, + "c3.xlarge": 0.265, + "c3.2xlarge": 0.529, + "c3.4xlarge": 1.058, + "c3.8xlarge": 2.117, + "c1.medium": 0.164, + "c1.xlarge": 0.655, + "r3.large": 0.210, + "r3.xlarge": 0.420, + "r3.2xlarge": 0.840, + "r3.4xlarge": 1.680, + "r3.8xlarge": 3.360, + "m2.xlarge": 0.296, + "m2.2xlarge": 0.592, + "m2.4xlarge": 1.183, + "i2.xlarge": 1.018, + "i2.2xlarge": 2.035, + "i2.4xlarge": 4.070, + "i2.8xlarge": 8.140, "hs1.8xlarge": 5.570, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "t1.micro": 0.025 }, "ec2_ap_northeast": { - "t1.micro": 0.027, - "m1.small": 0.10, - "m1.medium": 0.184, - "m1.large": 0.40, - "m1.xlarge": 0.80, - "c1.medium": 0.20, - "c1.xlarge": 0.80, - "g2.2xlarge": 0.943, - "c3.large": 0.192, - "c3.xlarge": 0.383, - "c3.2xlarge": 0.766, - "c3.4xlarge": 1.532, - "c3.8xlarge": 3.064, - "m2.xlarge": 0.60, - "m2.2xlarge": 1.20, - "m2.4xlarge": 2.39, - "m3.medium": 0.171, - "m3.large": 0.342, - "m3.xlarge": 0.684, - "m3.2xlarge": 1.368, - "hs1.4xlarge": 3.440, - "hs1.8xlarge": 5.670, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "m3.medium": 0.101, + "m3.large": 0.203, + "m3.xlarge": 0.405, + "m3.2xlarge": 0.810, + "m1.small": 0.061, + "m1.medium": 0.122, + "m1.large": 0.243, + "m1.xlarge": 0.486, + "c3.large": 0.128, + "c3.xlarge": 0.255, + "c3.2xlarge": 0.511, + "c3.4xlarge": 1.021, + "c3.8xlarge": 2.043, + "c1.medium": 0.158, + "c1.xlarge": 0.632, + "cc2.8xlarge": 2.350, + "g2.2xlarge": 0.898, + "r3.large": 0.210, + "r3.xlarge": 0.420, + "r3.2xlarge": 0.840, + "r3.4xlarge": 1.680, + "r3.8xlarge": 3.360, + "m2.xlarge": 0.287, + "m2.2xlarge": 0.575, + "m2.4xlarge": 1.150, + "cr1.8xlarge": 4.105, + "i2.xlarge": 1.001, + "i2.2xlarge": 2.001, + "i2.4xlarge": 4.002, + "i2.8xlarge": 8.004, + "hs1.8xlarge": 5.400, + "hi1.4xlarge": 3.276, + "t1.micro": 0.027 }, "ec2_sa_east": { - "t1.micro": 0.027, - "m1.small": 0.115, - "m1.medium": 0.230, - "m1.large": 0.46, - "m1.xlarge": 0.92, - "c1.medium": 0.23, - "c1.xlarge": 0.92, - "m2.xlarge": 0.68, - "m2.2xlarge": 1.36, - "m2.4xlarge": 2.72, - "m3.medium": 0.153, - "m3.large": 0.306, - "m3.xlarge": 0.612, - "m3.2xlarge": 1.224 + "m3.medium": 0.095, + "m3.large": 0.190, + "m3.xlarge": 0.381, + "m3.2xlarge": 0.761, + "m1.small": 0.058, + "m1.medium": 0.117, + "m1.large": 0.233, + "m1.xlarge": 0.467, + "c1.medium": 0.179, + "c1.xlarge": 0.718, + "m2.xlarge": 0.323, + "m2.2xlarge": 0.645, + "m2.4xlarge": 1.291, + "t1.micro": 0.027 }, "ec2_ap_southeast_2": { - "t1.micro": 0.020, - "m1.small": 0.085, - "m1.medium": 0.170, - "m1.large": 0.340, - "m1.xlarge": 0.680, - "c1.medium": 0.186, - "c1.xlarge": 0.744, - "c3.large": 0.189, - "c3.xlarge": 0.378, - "c3.2xlarge": 0.756, - "c3.4xlarge": 1.512, - "c3.8xlarge": 3.024, - "m2.xlarge": 0.506, - "m2.2xlarge": 1.012, - "m2.4xlarge": 2.024, - "m3.medium": 0.158, - "m3.large": 0.315, - "m3.xlarge": 0.630, - "m3.2xlarge": 1.260, + "m3.medium": 0.098, + "m3.large": 0.196, + "m3.xlarge": 0.392, + "m3.2xlarge": 0.784, + "m1.small": 0.058, + "m1.medium": 0.117, + "m1.large": 0.233, + "m1.xlarge": 0.467, + "c3.large": 0.132, + "c3.xlarge": 0.265, + "c3.2xlarge": 0.529, + "c3.4xlarge": 1.058, + "c3.8xlarge": 2.117, + "c1.medium": 0.164, + "c1.xlarge": 0.655, + "r3.large": 0.210, + "r3.xlarge": 0.420, + "r3.2xlarge": 0.840, + "r3.4xlarge": 1.680, + "r3.8xlarge": 3.360, + "m2.xlarge": 0.296, + "m2.2xlarge": 0.592, + "m2.4xlarge": 1.183, + "i2.xlarge": 1.018, + "i2.2xlarge": 2.035, + "i2.4xlarge": 4.070, + "i2.8xlarge": 8.140, "hs1.8xlarge": 5.570, - "i2.xlarge": 0.85, - "i2.2xlarge": 1.71, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82 + "t1.micro": 0.020 }, "nephoscale" : { From 5521b87afa2ab3df426de797992b8765a6b2806f Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 10 Apr 2014 12:04:45 -0400 Subject: [PATCH 024/315] Add script for scraping and updating EC2 pricing data. --- contrib/scrape-ec2-prices.py | 144 +++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100755 contrib/scrape-ec2-prices.py diff --git a/contrib/scrape-ec2-prices.py b/contrib/scrape-ec2-prices.py new file mode 100755 index 0000000000..bf543f47e3 --- /dev/null +++ b/contrib/scrape-ec2-prices.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import os +import json +import time +from collections import defaultdict, OrderedDict + +import requests + +ON_DEMAND_LINUX_URL = 'http://aws.amazon.com/ec2/pricing/json/linux-od.json' + +EC2_REGIONS = [ + 'us-east-1', + 'us-west-1', + 'us-west-2', + 'eu-west-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'ap-northeast-1', + 'sa-east-1' +] + +EC2_INSTANCE_TYPES = [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'cg1.4xlarge', + 'g2.2xlarge', + 'cr1.8xlarge', + 'hs1.4xlarge', + 'hs1.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', +] + +# Maps EC2 region name to region name used in the pricing file +REGION_NAME_MAP = { + 'us-east': 'ec2_us_east', + 'us-west': 'ec2_us_west', + 'us-west-2': 'ec2_us_west_oregon', + 'eu-ireland': 'ec2_eu_west', + 'apac-sin': 'ec2_ap_southeast', + 'apac-syd': 'ec2_ap_southeast_2', + 'apac-tokyo': 'ec2_ap_northeast', + 'sa-east-1': 'ec2_sa_east', +} + +BASE_PATH = os.path.dirname(os.path.abspath(__file__)) +PRICING_FILE_PATH = os.path.join(BASE_PATH, '../libcloud/data/pricing.json') +PRICING_FILE_PATH = os.path.abspath(PRICING_FILE_PATH) + + +def scrape_ec2_pricing(): + response = requests.get(ON_DEMAND_LINUX_URL) + data = response.json() + + regions = data['config']['regions'] + + result = defaultdict(OrderedDict) + for region_data in regions: + region_name = region_data['region'] + libcloud_region_name = REGION_NAME_MAP[region_name] + instance_types = region_data['instanceTypes'] + + for instance_type in instance_types: + sizes = instance_type['sizes'] + + for size in sizes: + price = size['valueColumns'][0]['prices']['USD'] + result[libcloud_region_name][size['size']] = price + + return result + + +def update_pricing_file(pricing_file_path, pricing_data): + with open(pricing_file_path, 'r') as fp: + content = fp.read() + + data = json.loads(content) + data['updated'] = int(time.time()) + data['compute'].update(pricing_data) + + # Always sort the pricing info + data = OrderedDict(sorted(data.items())) + + content = json.dumps(data, indent=4) + lines = content.splitlines() + print lines + lines = [line.rstrip() for line in lines] + content = '\n'.join(lines) + + with open(pricing_file_path, 'w') as fp: + fp.write(content) + + +def main(): + print('Scraping EC2 pricing data') + + pricing_data = scrape_ec2_pricing() + update_pricing_file(pricing_file_path=PRICING_FILE_PATH, + pricing_data=pricing_data) + + print('Pricing data updated') + + +if __name__ == '__main__': + main() From 6f3a4235b35879b74b14fb7934203cb759045550 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 10 Apr 2014 14:45:39 -0400 Subject: [PATCH 025/315] Update EC2 instance and pricing information. --- libcloud/compute/drivers/ec2.py | 5 - libcloud/data/pricing.json | 839 +++++++++++++++--------------- libcloud/test/compute/test_ec2.py | 5 +- 3 files changed, 424 insertions(+), 425 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index ad51e00c64..884e8ba07d 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -313,7 +313,6 @@ 'm3.2xlarge', 'c1.medium', 'c1.xlarge', - 'cc1.4xlarge', 'cc2.8xlarge', 'c3.large', 'c3.xlarge', @@ -323,7 +322,6 @@ 'cg1.4xlarge', 'g2.2xlarge', 'cr1.8xlarge', - 'hs1.4xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', @@ -385,7 +383,6 @@ 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', - 'hs1.4xlarge', 'hs1.8xlarge', 'cc2.8xlarge', 'i2.xlarge', @@ -420,7 +417,6 @@ 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', - 'hs1.4xlarge', 'hs1.8xlarge', 'cc2.8xlarge', 'i2.xlarge', @@ -487,7 +483,6 @@ 'c3.2xlarge', 'c3.4xlarge', 'c3.8xlarge', - 'hs1.4xlarge', 'hs1.8xlarge', 'i2.xlarge', 'i2.2xlarge', diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 5a2e12310f..f054dfcb44 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -1,454 +1,459 @@ { "compute": { - "bluebox": { - "1gb": 0.15, - "2gb": 0.25, - "4gb": 0.35, - "8gb": 0.45 + "ec2_us_west_oregon": { + "m3.medium": "0.070", + "m3.large": "0.140", + "m3.xlarge": "0.280", + "m3.2xlarge": "0.560", + "m1.small": "0.044", + "m1.medium": "0.087", + "m1.large": "0.175", + "m1.xlarge": "0.350", + "c3.large": "0.105", + "c3.xlarge": "0.210", + "c3.2xlarge": "0.420", + "c3.4xlarge": "0.840", + "c3.8xlarge": "1.680", + "c1.medium": "0.130", + "c1.xlarge": "0.520", + "cc2.8xlarge": "2.000", + "g2.2xlarge": "0.650", + "r3.large": "0.175", + "r3.xlarge": "0.350", + "r3.2xlarge": "0.700", + "r3.4xlarge": "1.400", + "r3.8xlarge": "2.800", + "m2.xlarge": "0.245", + "m2.2xlarge": "0.490", + "m2.4xlarge": "0.980", + "cr1.8xlarge": "3.500", + "i2.xlarge": "0.853", + "i2.2xlarge": "1.705", + "i2.4xlarge": "3.410", + "i2.8xlarge": "6.820", + "hs1.8xlarge": "4.600", + "hi1.4xlarge": "3.100", + "t1.micro": "0.020" }, - - "rackspace": { - "1": 0.015, - "2": 0.030, - "3": 0.060, - "4": 0.120, - "5": 0.240, - "6": 0.480, - "7": 0.960, - "8": 1.800, - "performance1-1": 0.040, - "performance1-2": 0.080, - "performance1-4": 0.160, - "performance1-8": 0.320, - "performance2-15": 0.680, - "performance2-30": 1.360, - "performance2-60": 2.720, - "performance2-90": 4.080, - "performance2-120": 5.440 + "\"ec2_us_west": { + "m3.2xlarge": "0.616", + "r3.4xlarge": "1.560", + "m1.small": "0.047", + "c1.medium": "0.148", + "m3.large": "0.154", + "r3.2xlarge": "0.780", + "t1.micro": "0.025", + "c3.2xlarge": "0.478", + "c3.xlarge": "0.239", + "m1.large": "0.190", + "c3.8xlarge": "1.912", + "c3.4xlarge": "0.956", + "i2.8xlarge": "7.502", + "m2.2xlarge": "0.550", + "c1.xlarge": "0.592", + "g2.2xlarge": "0.702", + "m1.xlarge": "0.379", + "m2.xlarge": "0.275", + "m1.medium": "0.095", + "r3.large": "0.195", + "i2.xlarge": "0.938", + "m3.medium": "0.077", + "i2.4xlarge": "3.751", + "r3.xlarge": "0.390", + "i2.2xlarge": "1.876", + "c3.large": "0.120", + "r3.8xlarge": "3.120", + "m2.4xlarge": "1.100", + "m3.xlarge": "0.308" }, - - "rackspacenovaus": { - "2": 0.022, - "3": 0.060, - "4": 0.120, - "5": 0.240, - "6": 0.480, - "7": 0.960, - "8": 1.200, - "performance1-1": 0.040, - "performance1-2": 0.080, - "performance1-4": 0.160, - "performance1-8": 0.320, - "performance2-15": 0.680, - "performance2-30": 1.360, - "performance2-60": 2.720, - "performance2-90": 4.080, - "performance2-120": 5.440 + "ec2_us_west": { + "m3.medium": "0.077", + "m3.large": "0.154", + "m3.xlarge": "0.308", + "m3.2xlarge": "0.616", + "m1.small": "0.047", + "m1.medium": "0.095", + "m1.large": "0.190", + "m1.xlarge": "0.379", + "c3.large": "0.120", + "c3.xlarge": "0.239", + "c3.2xlarge": "0.478", + "c3.4xlarge": "0.956", + "c3.8xlarge": "1.912", + "c1.medium": "0.148", + "c1.xlarge": "0.592", + "g2.2xlarge": "0.702", + "r3.large": "0.195", + "r3.xlarge": "0.390", + "r3.2xlarge": "0.780", + "r3.4xlarge": "1.560", + "r3.8xlarge": "3.120", + "m2.xlarge": "0.275", + "m2.2xlarge": "0.550", + "m2.4xlarge": "1.100", + "i2.xlarge": "0.938", + "i2.2xlarge": "1.876", + "i2.4xlarge": "3.751", + "i2.8xlarge": "7.502", + "t1.micro": "0.025" + }, + "ec2_eu_west": { + "m3.medium": "0.077", + "m3.large": "0.154", + "m3.xlarge": "0.308", + "m3.2xlarge": "0.616", + "m1.small": "0.047", + "m1.medium": "0.095", + "m1.large": "0.190", + "m1.xlarge": "0.379", + "c3.large": "0.120", + "c3.xlarge": "0.239", + "c3.2xlarge": "0.478", + "c3.4xlarge": "0.956", + "c3.8xlarge": "1.912", + "c1.medium": "0.148", + "c1.xlarge": "0.592", + "cc2.8xlarge": "2.250", + "g2.2xlarge": "0.702", + "cg1.4xlarge": "2.360", + "r3.large": "0.195", + "r3.xlarge": "0.390", + "r3.2xlarge": "0.780", + "r3.4xlarge": "1.560", + "r3.8xlarge": "3.120", + "m2.xlarge": "0.275", + "m2.2xlarge": "0.550", + "m2.4xlarge": "1.100", + "cr1.8xlarge": "3.750", + "i2.xlarge": "0.938", + "i2.2xlarge": "1.876", + "i2.4xlarge": "3.751", + "i2.8xlarge": "7.502", + "hs1.8xlarge": "4.900", + "hi1.4xlarge": "3.100", + "t1.micro": "0.020" }, - "rackspacenovalon": { - "2": 0.032, + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "performance2-90": 4.08, "3": 0.064, + "2": 0.032, + "performance1-2": 0.08, "4": 0.129, - "5": 0.258, - "6": 0.516, "7": 0.967, - "8": 1.612, - "performance1-1": 0.040, - "performance1-2": 0.080, - "performance1-4": 0.160, - "performance1-8": 0.320, - "performance2-15": 0.680, - "performance2-30": 1.360, - "performance2-60": 2.720, - "performance2-90": 4.080, - "performance2-120": 5.440 + "6": 0.516, + "5": 0.258, + "performance1-8": 0.32, + "8": 1.612 }, - - "rackspacenovasyd": { - "2": 0.026, - "3": 0.072, - "4": 0.144, - "5": 0.288, - "6": 0.576, - "7": 1.080, - "8": 1.440, - "performance1-1": 0.040, - "performance1-2": 0.080, - "performance1-4": 0.160, - "performance1-8": 0.320, - "performance2-15": 0.680, - "performance2-30": 1.360, - "performance2-60": 2.720, - "performance2-90": 4.080, - "performance2-120": 5.440 + "ec2_ap_southeast_2": { + "m3.medium": "0.098", + "m3.large": "0.196", + "m3.xlarge": "0.392", + "m3.2xlarge": "0.784", + "m1.small": "0.058", + "m1.medium": "0.117", + "m1.large": "0.233", + "m1.xlarge": "0.467", + "c3.large": "0.132", + "c3.xlarge": "0.265", + "c3.2xlarge": "0.529", + "c3.4xlarge": "1.058", + "c3.8xlarge": "2.117", + "c1.medium": "0.164", + "c1.xlarge": "0.655", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "m2.xlarge": "0.296", + "m2.2xlarge": "0.592", + "m2.4xlarge": "1.183", + "i2.xlarge": "1.018", + "i2.2xlarge": "2.035", + "i2.4xlarge": "4.070", + "i2.8xlarge": "8.140", + "hs1.8xlarge": "5.570", + "t1.micro": "0.020" }, - - "dreamhost": { - "minimum": 15, - "maximum": 200, - "default": 115, - "low": 50, - "high": 150 + "vps_net": { + "1": 0.416 }, - "ec2_us_east": { - "m3.medium": 0.113, - "m3.large": 0.140, - "m3.xlarge": 0.45, - "m3.2xlarge": 0.90, - "m1.small": 0.044, - "m1.medium": 0.087, - "m1.large": 0.175, - "m1.xlarge": 0.350, - "c3.large": 0.105, - "c3.xlarge": 0.210, - "c3.2xlarge": 0.420, - "c3.4xlarge": 0.840, - "c3.8xlarge": 1.680, - "c1.medium": 0.130, - "c1.xlarge": 0.520, - "cc2.8xlarge": 2.00, - "g2.2xlarge": 0.650, - "cg1.4xlarge": 2.1, - "r3.large": 0.175, - "r3.xlarge": 0.350, - "r3.2xlarge": 0.700, - "r3.4xlarge": 1.400, - "r3.8xlarge": 2.800, - "m2.xlarge": 0.245, - "m2.2xlarge": 0.490, - "m2.4xlarge": 0.980, - "cr1.8xlarge": 3.50, - "i2.xlarge": 0.853, - "i2.2xlarge": 1.705, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82, - "hs1.8xlarge": 4.6, - "hi1.4xlarge": 3.10, - "t1.micro": 0.02 + "m3.medium": "0.070", + "m3.large": "0.140", + "m3.xlarge": "0.280", + "m3.2xlarge": "0.560", + "m1.small": "0.044", + "m1.medium": "0.087", + "m1.large": "0.175", + "m1.xlarge": "0.350", + "c3.large": "0.105", + "c3.xlarge": "0.210", + "c3.2xlarge": "0.420", + "c3.4xlarge": "0.840", + "c3.8xlarge": "1.680", + "c1.medium": "0.130", + "c1.xlarge": "0.520", + "cc2.8xlarge": "2.000", + "g2.2xlarge": "0.650", + "cg1.4xlarge": "2.100", + "r3.large": "0.175", + "r3.xlarge": "0.350", + "r3.2xlarge": "0.700", + "r3.4xlarge": "1.400", + "r3.8xlarge": "2.800", + "m2.xlarge": "0.245", + "m2.2xlarge": "0.490", + "m2.4xlarge": "0.980", + "cr1.8xlarge": "3.500", + "i2.xlarge": "0.853", + "i2.2xlarge": "1.705", + "i2.4xlarge": "3.410", + "i2.8xlarge": "6.820", + "hs1.8xlarge": "4.600", + "hi1.4xlarge": "3.100", + "t1.micro": "0.020" }, - - "ec2_us_west": { - "m3.medium": 0.124, - "m3.large": 0.248, - "m3.xlarge": 0.495, - "m3.2xlarge": 0.990, - "m1.small": 0.047, - "m1.medium": 0.095, - "m1.large": 0.190, - "m1.xlarge": 0.379, - "c3.large": 0.120, - "c3.xlarge": 0.239, - "c3.2xlarge": 0.478, - "c3.4xlarge": 0.956, - "c3.8xlarge": 1.912, - "c1.medium": 0.148, - "c1.xlarge": 0.592, - "g2.2xlarge": 0.702, - "r3.large": 0.195, - "r3.xlarge": 0.390, - "r3.2xlarge": 0.780, - "r3.4xlarge": 1.560, - "r3.8xlarge": 3.120, - "m2.xlarge": 0.275, - "m2.2xlarge": 0.550, - "m2.4xlarge": 1.100, - "i2.xlarge": 0.938, - "i2.2xlarge": 1.876, - "i2.4xlarge": 3.751, - "i2.8xlarge": 7.502, - "t1.micro": 0.025 + "rackspacenovaus": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "performance2-90": 4.08, + "3": 0.06, + "2": 0.022, + "performance1-2": 0.08, + "4": 0.12, + "7": 0.96, + "6": 0.48, + "5": 0.24, + "performance1-8": 0.32, + "8": 1.2 }, - - "ec2_us_west_oregon": { - "m3.medium": 0.070, - "m3.large": 0.140, - "m3.xlarge": 0.280, - "m3.2xlarge": 0.560, - "m1.small": 0.044, - "m1.medium": 0.087, - "m1.large": 0.175, - "m1.xlarge": 0.350, - "c3.large": 0.105, - "c3.xlarge": 0.210, - "c3.2xlarge": 0.420, - "c3.4xlarge": 0.840, - "c3.8xlarge": 1.680, - "c1.medium": 0.130, - "c1.xlarge": 0.520, - "cc2.8xlarge": 2.000, - "g2.2xlarge": 0.650, - "r3.large": 0.175, - "r3.xlarge": 0.350, - "r3.2xlarge": 0.700, - "r3.4xlarge": 1.400, - "r3.8xlarge": 2.800, - "m2.xlarge": 0.245, - "m2.2xlarge": 0.490, - "m2.4xlarge": 0.980, - "cr1.8xlarge": 3.50, - "i2.xlarge": 0.853, - "i2.2xlarge": 1.705, - "i2.4xlarge": 3.41, - "i2.8xlarge": 6.82, - "hs1.8xlarge": 4.6, - "hi1.4xlarge": 3.10, - "t1.micro": 0.02 + "ec2_sa_east": { + "m3.medium": "0.095", + "m3.large": "0.190", + "m3.xlarge": "0.381", + "m3.2xlarge": "0.761", + "m1.small": "0.058", + "m1.medium": "0.117", + "m1.large": "0.233", + "m1.xlarge": "0.467", + "c1.medium": "0.179", + "c1.xlarge": "0.718", + "m2.xlarge": "0.323", + "m2.2xlarge": "0.645", + "m2.4xlarge": "1.291", + "t1.micro": "0.027" }, - - "ec2_eu_west": { - "m3.medium": 0.077, - "m3.large": 0.154, - "m3.xlarge": 0.308, - "m3.2xlarge": 0.616, - "m1.small": 0.047, - "m1.medium": 0.095, - "m1.large": 0.190, - "m1.xlarge": 0.379, - "c3.large": 0.120, - "c3.xlarge": 0.239, - "c3.2xlarge": 0.478, - "c3.4xlarge": 0.956, - "c3.8xlarge": 1.912, - "c1.medium": 0.148, - "c1.xlarge": 0.592, - "cc2.8xlarge": 2.250, - "g2.2xlarge": 0.702, - "cg1.4xlarge": 2.360, - "r3.large": 0.195, - "r3.xlarge": 0.390, - "r3.2xlarge": 0.780, - "r3.4xlarge": 1.560, - "r3.8xlarge": 3.120, - "m2.xlarge": 0.275, - "m2.2xlarge": 0.550, - "m2.4xlarge": 1.100, - "cr1.8xlarge": 3.750, - "i2.xlarge": 0.938, - "i2.2xlarge": 1.876, - "i2.4xlarge": 3.751, - "i2.8xlarge": 7.502, - "hs1.8xlarge": 4.900, - "hs1.4xlarge": 3.100, - "t1.micro": 0.020 + "cloudsigma_zrh": { + "high-cpu-medium": 0.211, + "standard-large": 0.381, + "micro-high-cpu": 0.381, + "standard-extra-large": 0.762, + "high-memory-double-extra-large": 1.383, + "micro-regular": 0.0548, + "standard-small": 0.0796, + "high-memory-extra-large": 0.642, + "high-cpu-extra-large": 0.78 }, - - "ec2_ap_southeast": { - "m3.medium": 0.098, - "m3.large": 0.196, - "m3.xlarge": 0.392, - "m3.2xlarge": 0.784, - "m1.small": 0.058, - "m1.medium": 0.117, - "m1.large": 0.233, - "m1.xlarge": 0.467, - "c3.large": 0.132, - "c3.xlarge": 0.265, - "c3.2xlarge": 0.529, - "c3.4xlarge": 1.058, - "c3.8xlarge": 2.117, - "c1.medium": 0.164, - "c1.xlarge": 0.655, - "r3.large": 0.210, - "r3.xlarge": 0.420, - "r3.2xlarge": 0.840, - "r3.4xlarge": 1.680, - "r3.8xlarge": 3.360, - "m2.xlarge": 0.296, - "m2.2xlarge": 0.592, - "m2.4xlarge": 1.183, - "i2.xlarge": 1.018, - "i2.2xlarge": 2.035, - "i2.4xlarge": 4.070, - "i2.8xlarge": 8.140, - "hs1.8xlarge": 5.570, - "t1.micro": 0.025 + "rackspacenovasyd": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "performance2-90": 4.08, + "3": 0.072, + "2": 0.026, + "performance1-2": 0.08, + "4": 0.144, + "7": 1.08, + "6": 0.576, + "5": 0.288, + "performance1-8": 0.32, + "8": 1.44 }, - - "ec2_ap_northeast": { - "m3.medium": 0.101, - "m3.large": 0.203, - "m3.xlarge": 0.405, - "m3.2xlarge": 0.810, - "m1.small": 0.061, - "m1.medium": 0.122, - "m1.large": 0.243, - "m1.xlarge": 0.486, - "c3.large": 0.128, - "c3.xlarge": 0.255, - "c3.2xlarge": 0.511, - "c3.4xlarge": 1.021, - "c3.8xlarge": 2.043, - "c1.medium": 0.158, - "c1.xlarge": 0.632, - "cc2.8xlarge": 2.350, - "g2.2xlarge": 0.898, - "r3.large": 0.210, - "r3.xlarge": 0.420, - "r3.2xlarge": 0.840, - "r3.4xlarge": 1.680, - "r3.8xlarge": 3.360, - "m2.xlarge": 0.287, - "m2.2xlarge": 0.575, - "m2.4xlarge": 1.150, - "cr1.8xlarge": 4.105, - "i2.xlarge": 1.001, - "i2.2xlarge": 2.001, - "i2.4xlarge": 4.002, - "i2.8xlarge": 8.004, - "hs1.8xlarge": 5.400, - "hi1.4xlarge": 3.276, - "t1.micro": 0.027 - }, - - "ec2_sa_east": { - "m3.medium": 0.095, - "m3.large": 0.190, - "m3.xlarge": 0.381, - "m3.2xlarge": 0.761, - "m1.small": 0.058, - "m1.medium": 0.117, - "m1.large": 0.233, - "m1.xlarge": 0.467, - "c1.medium": 0.179, - "c1.xlarge": 0.718, - "m2.xlarge": 0.323, - "m2.2xlarge": 0.645, - "m2.4xlarge": 1.291, - "t1.micro": 0.027 + "ec2_ap_northeast": { + "m3.medium": "0.101", + "m3.large": "0.203", + "m3.xlarge": "0.405", + "m3.2xlarge": "0.810", + "m1.small": "0.061", + "m1.medium": "0.122", + "m1.large": "0.243", + "m1.xlarge": "0.486", + "c3.large": "0.128", + "c3.xlarge": "0.255", + "c3.2xlarge": "0.511", + "c3.4xlarge": "1.021", + "c3.8xlarge": "2.043", + "c1.medium": "0.158", + "c1.xlarge": "0.632", + "cc2.8xlarge": "2.349", + "g2.2xlarge": "0.898", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "m2.xlarge": "0.287", + "m2.2xlarge": "0.575", + "m2.4xlarge": "1.150", + "cr1.8xlarge": "4.105", + "i2.xlarge": "1.001", + "i2.2xlarge": "2.001", + "i2.4xlarge": "4.002", + "i2.8xlarge": "8.004", + "hs1.8xlarge": "5.400", + "hi1.4xlarge": "3.276", + "t1.micro": "0.026" }, - - "ec2_ap_southeast_2": { - "m3.medium": 0.098, - "m3.large": 0.196, - "m3.xlarge": 0.392, - "m3.2xlarge": 0.784, - "m1.small": 0.058, - "m1.medium": 0.117, - "m1.large": 0.233, - "m1.xlarge": 0.467, - "c3.large": 0.132, - "c3.xlarge": 0.265, - "c3.2xlarge": 0.529, - "c3.4xlarge": 1.058, - "c3.8xlarge": 2.117, - "c1.medium": 0.164, - "c1.xlarge": 0.655, - "r3.large": 0.210, - "r3.xlarge": 0.420, - "r3.2xlarge": 0.840, - "r3.4xlarge": 1.680, - "r3.8xlarge": 3.360, - "m2.xlarge": 0.296, - "m2.2xlarge": 0.592, - "m2.4xlarge": 1.183, - "i2.xlarge": 1.018, - "i2.2xlarge": 2.035, - "i2.4xlarge": 4.070, - "i2.8xlarge": 8.140, - "hs1.8xlarge": 5.570, - "t1.micro": 0.020 + "gogrid": { + "24GB": 4.56, + "512MB": 0.095, + "8GB": 1.52, + "4GB": 0.76, + "2GB": 0.38, + "1GB": 0.19, + "16GB": 3.04 }, - - "nephoscale" : { - "1": 0.60, - "3": 0.063, - "5": 0.031, - "7": 0.125, - "9": 0.188, + "serverlove": { + "high-cpu-medium": 0.291, + "medium": 0.404, + "large": 0.534, + "small": 0.161, + "extra-large": 0.615, + "high-cpu-extra-large": 0.776 + }, + "elastichosts": { + "high-cpu-medium": 0.18, + "medium": 0.223, + "large": 0.378, + "small": 0.1, + "extra-large": 0.579, + "high-cpu-extra-large": 0.77 + }, + "rackspace": { + "performance2-60": 2.72, + "performance2-120": 5.44, + "performance1-1": 0.04, + "performance2-15": 0.68, + "performance1-4": 0.16, + "performance2-30": 1.36, + "1": 0.015, + "performance2-90": 4.08, + "3": 0.06, + "2": 0.03, + "performance1-2": 0.08, + "4": 0.12, + "7": 0.96, + "6": 0.48, + "5": 0.24, + "performance1-8": 0.32, + "8": 1.8 + }, + "nephoscale": { "11": 0.35, "27": 0.0, - "46": 0.10, "48": 0.15, + "46": 0.1, + "54": 0.938, + "56": 0.75, "50": 0.28, "52": 0.48, - "54": 0.938, - "56": 0.75 + "1": 0.6, + "3": 0.063, + "5": 0.031, + "7": 0.125, + "9": 0.188 }, - - "nimbus" : { + "nimbus": { + "m1.xlarge": 0.0, "m1.small": 0.0, - "m1.large": 0.0, - "m1.xlarge": 0.0 + "m1.large": 0.0 }, - - "cloudsigma_zrh": { - "micro-regular": 0.0548, - "micro-high-cpu": 0.381, - "standard-small": 0.0796, - "standard-large": 0.381, - "standard-extra-large": 0.762, - "high-memory-extra-large": 0.642, - "high-memory-double-extra-large": 1.383, - "high-cpu-medium": 0.211, - "high-cpu-extra-large": 0.780 + "gandi": { + "1": 0.02, + "small": 0.02, + "large": 0.06, + "medium": 0.03, + "x-large": 0.12 + }, + "skalicloud": { + "high-cpu-medium": 0.249, + "medium": 0.301, + "large": 0.505, + "small": 0.136, + "extra-large": 0.654, + "high-cpu-extra-large": 0.936 + }, + "bluebox": { + "4gb": 0.35, + "2gb": 0.25, + "8gb": 0.45, + "1gb": 0.15 + }, + "ec2_ap_southeast": { + "m3.medium": "0.098", + "m3.large": "0.196", + "m3.xlarge": "0.392", + "m3.2xlarge": "0.784", + "m1.small": "0.058", + "m1.medium": "0.117", + "m1.large": "0.233", + "m1.xlarge": "0.467", + "c3.large": "0.132", + "c3.xlarge": "0.265", + "c3.2xlarge": "0.529", + "c3.4xlarge": "1.058", + "c3.8xlarge": "2.117", + "c1.medium": "0.164", + "c1.xlarge": "0.655", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "m2.xlarge": "0.296", + "m2.2xlarge": "0.592", + "m2.4xlarge": "1.183", + "i2.xlarge": "1.018", + "i2.2xlarge": "2.035", + "i2.4xlarge": "4.070", + "i2.8xlarge": "8.140", + "hs1.8xlarge": "5.570", + "t1.micro": "0.020" }, - "cloudsigma_lvs": { - "micro-regular": 0.0, - "micro-high-cpu": 0.0, - "standard-small": 0.0, + "high-cpu-medium": 0.0, "standard-large": 0.0, + "micro-high-cpu": 0.0, "standard-extra-large": 0.0, - "high-memory-extra-large": 0.0, "high-memory-double-extra-large": 0.0, - "high-cpu-medium": 0.0, + "micro-regular": 0.0, + "standard-small": 0.0, + "high-memory-extra-large": 0.0, "high-cpu-extra-large": 0.0 }, - - "elastichosts": { - "small": 0.100, - "medium": 0.223, - "large": 0.378, - "extra-large": 0.579, - "high-cpu-medium": 0.180, - "high-cpu-extra-large": 0.770 - }, - - "skalicloud": { - "small": 0.136, - "medium": 0.301, - "large": 0.505, - "extra-large": 0.654, - "high-cpu-medium": 0.249, - "high-cpu-extra-large": 0.936 - }, - - "serverlove": { - "small": 0.161, - "medium": 0.404, - "large": 0.534, - "extra-large": 0.615, - "high-cpu-medium": 0.291, - "high-cpu-extra-large": 0.776 - }, - - "gogrid": { - "512MB": 0.095, - "1GB": 0.19, - "2GB": 0.38, - "4GB": 0.76, - "8GB": 1.52, - "16GB": 3.04, - "24GB": 4.56 - }, - - "gandi": { - "1": 0.02, - "small": 0.02, - "medium": 0.03, - "large": 0.06, - "x-large": 0.12 - }, - - "vps_net": { - "1": 0.416 + "dreamhost": { + "default": 115, + "high": 150, + "minimum": 15, + "maximum": 200, + "low": 50 } }, - - "storage": { - }, - - "updated": 1349661768 -} + "storage": {}, + "updated": 1397154837 +} \ No newline at end of file diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 4a2b4d7c1e..de2eeb3ba5 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -372,9 +372,8 @@ def test_list_sizes(self): self.assertTrue('m2.4xlarge' in ids) if region_name == 'us-east-1': - self.assertEqual(len(sizes), 30) + self.assertEqual(len(sizes), 28) self.assertTrue('cg1.4xlarge' in ids) - self.assertTrue('cc1.4xlarge' in ids) self.assertTrue('cc2.8xlarge' in ids) self.assertTrue('cr1.8xlarge' in ids) elif region_name == 'us-west-1': @@ -386,7 +385,7 @@ def test_list_sizes(self): elif region_name == 'ap-southeast-2': self.assertEqual(len(sizes), 24) elif region_name == 'eu-west-1': - self.assertEqual(len(sizes), 27) + self.assertEqual(len(sizes), 26) self.driver.region_name = region_old From b9b3f08751d47c611ce4bc8010c4644f6c6a1150 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 10 Apr 2014 14:49:08 -0400 Subject: [PATCH 026/315] Remove print, fix pricing file. --- contrib/scrape-ec2-prices.py | 1 - libcloud/data/pricing.json | 33 +-------------------------------- 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/contrib/scrape-ec2-prices.py b/contrib/scrape-ec2-prices.py index bf543f47e3..54a63e8b61 100755 --- a/contrib/scrape-ec2-prices.py +++ b/contrib/scrape-ec2-prices.py @@ -122,7 +122,6 @@ def update_pricing_file(pricing_file_path, pricing_data): content = json.dumps(data, indent=4) lines = content.splitlines() - print lines lines = [line.rstrip() for line in lines] content = '\n'.join(lines) diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index f054dfcb44..9e08d3027f 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -35,37 +35,6 @@ "hi1.4xlarge": "3.100", "t1.micro": "0.020" }, - "\"ec2_us_west": { - "m3.2xlarge": "0.616", - "r3.4xlarge": "1.560", - "m1.small": "0.047", - "c1.medium": "0.148", - "m3.large": "0.154", - "r3.2xlarge": "0.780", - "t1.micro": "0.025", - "c3.2xlarge": "0.478", - "c3.xlarge": "0.239", - "m1.large": "0.190", - "c3.8xlarge": "1.912", - "c3.4xlarge": "0.956", - "i2.8xlarge": "7.502", - "m2.2xlarge": "0.550", - "c1.xlarge": "0.592", - "g2.2xlarge": "0.702", - "m1.xlarge": "0.379", - "m2.xlarge": "0.275", - "m1.medium": "0.095", - "r3.large": "0.195", - "i2.xlarge": "0.938", - "m3.medium": "0.077", - "i2.4xlarge": "3.751", - "r3.xlarge": "0.390", - "i2.2xlarge": "1.876", - "c3.large": "0.120", - "r3.8xlarge": "3.120", - "m2.4xlarge": "1.100", - "m3.xlarge": "0.308" - }, "ec2_us_west": { "m3.medium": "0.077", "m3.large": "0.154", @@ -456,4 +425,4 @@ }, "storage": {}, "updated": 1397154837 -} \ No newline at end of file +} From 04884dca384de6b849bf23c2ce395bf1017894c6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 10 Apr 2014 14:50:26 -0400 Subject: [PATCH 027/315] Add new r3 instance types to the EC2 driver. --- libcloud/compute/drivers/ec2.py | 66 +++++++++++++++++++++++++++++++ libcloud/test/compute/test_ec2.py | 10 ++--- 2 files changed, 71 insertions(+), 5 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 884e8ba07d..6c5e10d556 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -290,6 +290,42 @@ 'disk': 6400, 'bandwidth': None }, + # 1x SSD + 'r3.large': { + 'id': 'r3.large', + 'name': 'Memory Optimized Large instance', + 'ram': 15000, + 'disk': 32, + 'bandwidth': None + }, + 'r3.xlarge': { + 'id': 'r3.xlarge', + 'name': 'Memory Optimized Extra Large instance', + 'ram': 30500, + 'disk': 80, + 'bandwidth': None + }, + 'r3.2xlarge': { + 'id': 'r3.2xlarge', + 'name': 'Memory Optimized Double Extra Large instance', + 'ram': 61000, + 'disk': 160, + 'bandwidth': None + }, + 'r3.4xlarge': { + 'id': 'r3.4xlarge', + 'name': 'Memory Optimized Quadruple Extra Large instance', + 'ram': 122000, + 'disk': 320, + 'bandwidth': None + }, + 'r3.8xlarge': { + 'id': 'r3.8xlarge', + 'name': 'Memory Optimized Eight Extra Large instance', + 'ram': 244000, + 'disk': 320, # x2 + 'bandwidth': None + } } REGION_DETAILS = { @@ -327,6 +363,11 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' ] }, # US West (Northern California) Region @@ -359,6 +400,11 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' ] }, # US West (Oregon) Region @@ -389,6 +435,11 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' ] }, # EU (Ireland) Region @@ -423,6 +474,11 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' ] }, # Asia Pacific (Singapore) Region @@ -488,6 +544,11 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' ] }, # South America (Sao Paulo) Region @@ -542,6 +603,11 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge' ] }, 'nimbus': { diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index de2eeb3ba5..6d665457b5 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -372,20 +372,20 @@ def test_list_sizes(self): self.assertTrue('m2.4xlarge' in ids) if region_name == 'us-east-1': - self.assertEqual(len(sizes), 28) + self.assertEqual(len(sizes), 33) self.assertTrue('cg1.4xlarge' in ids) self.assertTrue('cc2.8xlarge' in ids) self.assertTrue('cr1.8xlarge' in ids) elif region_name == 'us-west-1': - self.assertEqual(len(sizes), 24) + self.assertEqual(len(sizes), 29) if region_name == 'us-west-2': - self.assertEqual(len(sizes), 24) + self.assertEqual(len(sizes), 29) elif region_name == 'ap-southeast-1': self.assertEqual(len(sizes), 24) elif region_name == 'ap-southeast-2': - self.assertEqual(len(sizes), 24) + self.assertEqual(len(sizes), 29) elif region_name == 'eu-west-1': - self.assertEqual(len(sizes), 26) + self.assertEqual(len(sizes), 31) self.driver.region_name = region_old From e24b498c59bea942266fa140fc158e5dbc285630 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Mon, 21 Apr 2014 14:57:40 -0700 Subject: [PATCH 028/315] Document and verify that we support Python 3.4 Closes #279 Signed-off-by: Tomaz Muraus --- setup.py | 1 + tox.ini | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 41f2833090..cbc2e744a9 100644 --- a/setup.py +++ b/setup.py @@ -255,4 +255,5 @@ def run(self): 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: Implementation :: PyPy']) diff --git a/tox.ini b/tox.ini index 66dc270fb1..7dee77e58e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py25,py26,py27,pypy,py32,py33,lint +envlist = py25,py26,py27,pypy,py32,py33,py34,lint setenv = PIP_USE_MIRRORS=1 @@ -38,6 +38,12 @@ deps = mock deps = mock lockfile +[testenv:py34] +# At some point we can switch to use the stdlib provided mock module on +# Python3.4+ +deps = mock + lockfile + [testenv:docs] deps = sphinx basepython = python2.7 From 5f2925d4739ab43906f8a1c4f8105e5f96f50347 Mon Sep 17 00:00:00 2001 From: Chris Hannam Date: Thu, 10 Apr 2014 12:54:57 +0100 Subject: [PATCH 029/315] add a deprecated flag to a size Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index abe264e634..b6e1c16b48 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -3125,6 +3125,9 @@ def _to_node(self, node): extra['tags_fingerprint'] = node['tags']['fingerprint'] extra['scheduling'] = node.get('scheduling', {}) + if (machine_type.get('deprecated')): + extra['deprecated'] = True + extra['boot_disk'] = None for disk in extra['disks']: if disk.get('boot') and disk.get('type') == 'PERSISTENT': From de8e4ce7f92e90374b376f9f54ccdd1e22dd7abb Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 23 Apr 2014 11:44:26 +0200 Subject: [PATCH 030/315] Add "deprecated" attribute to the Node object in the GCE driver. Closes #276 --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/gce.py | 5 +---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 92166b35e1..0fed4dd8ca 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -81,6 +81,11 @@ Compute (GITHUB-272) [zerthimon] +- Add "deprecated" attribute to the Node object in the Google Compute Engine + driver. + (GITHUB-276) + [Chris / bassdread] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index b6e1c16b48..c62957f7d5 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -3124,11 +3124,8 @@ def _to_node(self, node): extra['metadata'] = node.get('metadata', {}) extra['tags_fingerprint'] = node['tags']['fingerprint'] extra['scheduling'] = node.get('scheduling', {}) + extra['deprecated'] = True if node.get('deprecated', None) else False - if (machine_type.get('deprecated')): - extra['deprecated'] = True - - extra['boot_disk'] = None for disk in extra['disks']: if disk.get('boot') and disk.get('type') == 'PERSISTENT': bd = self._get_components_from_path(disk['source']) From 48d3bfd31fd4ae783934935ff7fa3348a290051f Mon Sep 17 00:00:00 2001 From: Roel Van Nyen Date: Tue, 22 Apr 2014 18:16:50 +0200 Subject: [PATCH 031/315] Update Softlayer driver to use "fullyQualifiedDomainName" attribute instead of "hostname" for the node name. Closes #280 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/softlayer.py | 4 +++- libcloud/test/compute/test_softlayer.py | 4 ++-- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0fed4dd8ca..35466b8584 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -86,6 +86,11 @@ Compute (GITHUB-276) [Chris / bassdread] +- Update Softlayer driver to use "fullyQualifiedDomainName" instead of + "hostname" attribute for the node name. + (GITHUB-280) + [RoelVanNyen] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/softlayer.py b/libcloud/compute/drivers/softlayer.py index ced4a34b81..4fe8cdd2e3 100644 --- a/libcloud/compute/drivers/softlayer.py +++ b/libcloud/compute/drivers/softlayer.py @@ -242,12 +242,14 @@ def _to_node(self, host): return Node( id=host['id'], - name=host['hostname'], + name=host['fullyQualifiedDomainName'], state=state, public_ips=public_ips, private_ips=private_ips, driver=self, extra={ + 'hostname': host['hostname'], + 'fullyQualifiedDomainName': host['fullyQualifiedDomainName'], 'password': password, 'maxCpu': host.get('maxCpu', None), 'datacenter': host.get('datacenter', {}).get('longName', None), diff --git a/libcloud/test/compute/test_softlayer.py b/libcloud/test/compute/test_softlayer.py index 4d1d9f845e..74f4a3278b 100644 --- a/libcloud/test/compute/test_softlayer.py +++ b/libcloud/test/compute/test_softlayer.py @@ -43,7 +43,7 @@ def setUp(self): def test_list_nodes(self): nodes = self.driver.list_nodes() node = nodes[0] - self.assertEqual(node.name, 'libcloud-testing1') + self.assertEqual(node.name, 'libcloud-testing1.example.com') self.assertEqual(node.state, NodeState.RUNNING) self.assertEqual(node.extra['password'], 'L3TJVubf') @@ -73,7 +73,7 @@ def test_create_node(self): location=self.driver.list_locations()[0], size=self.driver.list_sizes()[0], image=self.driver.list_images()[0]) - self.assertEqual(node.name, 'libcloud-testing') + self.assertEqual(node.name, 'libcloud-testing.example.com') self.assertEqual(node.state, NODE_STATE_MAP['RUNNING']) def test_create_fail(self): From 5870925123b7738f976f7b56d198e97f210bba8d Mon Sep 17 00:00:00 2001 From: Jon Chen Date: Fri, 25 Apr 2014 19:02:51 -0400 Subject: [PATCH 032/315] Update Linode pricing. Closes #281 Signed-off-by: Tomaz Muraus --- libcloud/common/linode.py | 18 +- .../fixtures/linode/_avail_linodeplans.json | 257 +++++++++--------- .../compute/fixtures/linode/_linode_list.json | 6 +- libcloud/test/compute/test_linode.py | 2 +- 4 files changed, 137 insertions(+), 146 deletions(-) diff --git a/libcloud/common/linode.py b/libcloud/common/linode.py index 9d953f043a..f7ee22bdd2 100644 --- a/libcloud/common/linode.py +++ b/libcloud/common/linode.py @@ -31,15 +31,15 @@ API_HOST = 'api.linode.com' API_ROOT = '/' -# Constants that map a RAM figure to a PlanID (updated 6/28/10) -LINODE_PLAN_IDS = {1024: '1', - 2048: '3', - 4096: '5', - 8192: '6', - 16384: '7', - 24576: '8', - 32768: '9', - 40960: '10'} +# Constants that map a RAM figure to a PlanID (updated 4/25/14) +LINODE_PLAN_IDS = {2048: '1', + 4096: '3', + 8192: '5', + 16384: '6', + 32768: '7', + 49152: '8', + 65536: '9', + 98304: '11'} class LinodeException(Exception): diff --git a/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json b/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json index 4248425ff5..c492e626a6 100644 --- a/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json +++ b/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json @@ -1,134 +1,125 @@ { - "ERRORARRAY": [], - "DATA": [ - { - "PRICE": 20, - "RAM": 1024, - "XFER": 2000, - "PLANID": 1, - "LABEL": "Linode 1024", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 48 - }, - { - "PRICE": 40, - "RAM": 2048, - "XFER": 4000, - "PLANID": 3, - "LABEL": "Linode 2048", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 96 - }, - { - "PRICE": 80, - "RAM": 4096, - "XFER": 8000, - "PLANID": 5, - "LABEL": "Linode 4096", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 192 - }, - { - "PRICE": 160, - "RAM": 8192, - "XFER": 16000, - "PLANID": 6, - "LABEL": "Linode 8192", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 384 - }, - { - "PRICE": 320, - "RAM": 16384, - "XFER": 20000, - "PLANID": 7, - "LABEL": "Linode 16384", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 768 - }, - { - "PRICE": 480, - "RAM": 24576, - "XFER": 20000, - "PLANID": 8, - "LABEL": "Linode 24576", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 1152 - }, - { - "PRICE": 640, - "RAM": 32768, - "XFER": 20000, - "PLANID": 9, - "LABEL": "Linode 32768", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 1536 - }, - { - "PRICE": 800, - "RAM": 40960, - "XFER": 20000, - "PLANID": 10, - "LABEL": "Linode 40960", - "AVAIL": { - "2": 500, - "3": 500, - "4": 500, - "6": 500, - "7": 500, - "8": 500 - }, - "DISK": 1920 - } - ], - "ACTION": "avail.linodeplans" -} \ No newline at end of file + "ERRORARRAY": [], + "DATA": [{ + "PRICE": 20.00, + "RAM": 2048, + "XFER": 3000, + "PLANID": 1, + "LABEL": "Linode 2048", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 48 + }, { + "PRICE": 40.00, + "RAM": 4096, + "XFER": 4000, + "PLANID": 3, + "LABEL": "Linode 4096", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 96 + }, { + "PRICE": 80.00, + "RAM": 8192, + "XFER": 8000, + "PLANID": 5, + "LABEL": "Linode 8192", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 192 + }, { + "PRICE": 160.00, + "RAM": 16384, + "XFER": 16000, + "PLANID": 6, + "LABEL": "Linode 16384", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 384 + }, { + "PRICE": 320.00, + "RAM": 32768, + "XFER": 20000, + "PLANID": 7, + "LABEL": "Linode 32768", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 768 + }, { + "PRICE": 480.00, + "RAM": 49152, + "XFER": 20000, + "PLANID": 8, + "LABEL": "Linode 49152", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 1152 + }, { + "PRICE": 640.00, + "RAM": 65536, + "XFER": 20000, + "PLANID": 9, + "LABEL": "Linode 65536", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 1536 + }, { + "PRICE": 960.00, + "RAM": 98304, + "XFER": 20000, + "PLANID": 11, + "LABEL": "Linode 98304", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 1920 + }], + "ACTION": "avail.linodeplans" +} diff --git a/libcloud/test/compute/fixtures/linode/_linode_list.json b/libcloud/test/compute/fixtures/linode/_linode_list.json index 4f21714cf4..4baf897257 100644 --- a/libcloud/test/compute/fixtures/linode/_linode_list.json +++ b/libcloud/test/compute/fixtures/linode/_linode_list.json @@ -13,7 +13,7 @@ "STATUS": 1, "ALERT_DISKIO_ENABLED": 1, "CREATE_DT": "2012-05-04 19:31:30.0", - "TOTALHD": 98304, + "TOTALHD": 49152, "ALERT_BWQUOTA_THRESHOLD": 80, "TOTALRAM": 2048, "ALERT_BWIN_THRESHOLD": 5, @@ -22,11 +22,11 @@ "ALERT_BWOUT_ENABLED": 1, "BACKUPSENABLED": 1, "ALERT_CPU_THRESHOLD": 90, - "PLANID": "3", + "PLANID": "1", "BACKUPWEEKLYDAY": 0, "LABEL": "api-node3", "LPM_DISPLAYGROUP": "test", - "TOTALXFER": 4000 + "TOTALXFER": 3000 } ], "ACTION": "linode.list" diff --git a/libcloud/test/compute/test_linode.py b/libcloud/test/compute/test_linode.py index b3328787af..4c7c80f10c 100644 --- a/libcloud/test/compute/test_linode.py +++ b/libcloud/test/compute/test_linode.py @@ -43,7 +43,7 @@ def test_list_nodes(self): node = nodes[0] self.assertEqual(node.id, "8098") self.assertEqual(node.name, 'api-node3') - self.assertEqual(node.extra['PLANID'], '3') + self.assertEqual(node.extra['PLANID'], '1') self.assertTrue('75.127.96.245' in node.public_ips) self.assertEqual(node.private_ips, []) From 71fffcbc7108c0f019aab6726392663f33468181 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Mon, 21 Apr 2014 17:49:14 +0400 Subject: [PATCH 033/315] Add destination(target) instance tags support to GCE driver Closes #278 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index c62957f7d5..690dd03f3b 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -168,13 +168,14 @@ def __repr__(self): class GCEFirewall(UuidMixin): """A GCE Firewall rule class.""" def __init__(self, id, name, allowed, network, source_ranges, source_tags, - driver, extra=None): + target_tags, driver, extra=None): self.id = str(id) self.name = name self.network = network self.allowed = allowed self.source_ranges = source_ranges self.source_tags = source_tags + self.target_tags = target_tags self.driver = driver self.extra = extra UuidMixin.__init__(self) @@ -986,7 +987,8 @@ def ex_create_healthcheck(self, name, host=None, path=None, port=None, return self.ex_get_healthcheck(name) def ex_create_firewall(self, name, allowed, network='default', - source_ranges=None, source_tags=None): + source_ranges=None, source_tags=None, + target_tags=None): """ Create a firewall on a network. @@ -1020,9 +1022,14 @@ def ex_create_firewall(self, name, allowed, network='default', ['0.0.0.0/0'] :type source_ranges: ``list`` of ``str`` - :keyword source_tags: A list of instance tags which the rules apply + :keyword source_tags: A list of source instance tags the rules apply + to. :type source_tags: ``list`` of ``str`` + :keyword target_tags: A list of target instance tags the rules apply + to. + :type target_tags: ``list`` of ``str`` + :return: Firewall object :rtype: :class:`GCEFirewall` """ @@ -1038,6 +1045,8 @@ def ex_create_firewall(self, name, allowed, network='default', firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0'] if source_tags is not None: firewall_data['sourceTags'] = source_tags + if target_tags is not None: + firewall_data['targetTags'] = target_tags request = '/global/firewalls' @@ -1519,6 +1528,8 @@ def ex_update_firewall(self, firewall): firewall_data['sourceRanges'] = firewall.source_ranges if firewall.source_tags: firewall_data['sourceTags'] = firewall.source_tags + if firewall.target_tags: + firewall_data['targetTags'] = firewall.target_tags if firewall.extra['description']: firewall_data['description'] = firewall.extra['description'] @@ -3008,11 +3019,13 @@ def _to_firewall(self, firewall): network = self.ex_get_network(extra['network_name']) source_ranges = firewall.get('sourceRanges') source_tags = firewall.get('sourceTags') + target_tags = firewall.get('targetTags') return GCEFirewall(id=firewall['id'], name=firewall['name'], allowed=firewall.get('allowed'), network=network, source_ranges=source_ranges, source_tags=source_tags, + target_tags=target_tags, driver=self, extra=extra) def _to_forwarding_rule(self, forwarding_rule): From 8c331063e2e313ec07d6608f4d35401cca69cbbe Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 26 Apr 2014 17:32:21 +0200 Subject: [PATCH 034/315] Update CHANGES. --- CHANGES.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 35466b8584..7150322a77 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -91,6 +91,11 @@ Compute (GITHUB-280) [RoelVanNyen] +- Allow user to specify target tags using target_tags attribute when creating + a firewall rule in the GCE driver. + (GITHUB-278) + [zerthimon] + Load Balancer ~~~~~~~~~~~~~ From d69adbd886965c7b176b9713cdad2f954d041c3c Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Sat, 26 Apr 2014 10:37:42 -0700 Subject: [PATCH 035/315] Fixed many typos in the code and docs. Found using: https://github.com/intgr/topy Closes #282 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 10 +++++----- demos/gce_lb_demo.py | 2 +- docs/compute/drivers/cloudsigma.rst | 2 +- docs/compute/drivers/gce.rst | 2 +- docs/development.rst | 2 +- docs/dns/drivers/hostvirtual.rst | 2 +- libcloud/__init__.py | 2 +- libcloud/common/base.py | 2 +- libcloud/common/google.py | 4 ++-- libcloud/compute/base.py | 8 ++++---- libcloud/compute/deployment.py | 4 ++-- libcloud/compute/drivers/abiquo.py | 2 +- libcloud/compute/drivers/cloudframes.py | 2 +- libcloud/compute/drivers/cloudsigma.py | 2 +- libcloud/compute/drivers/ec2.py | 4 ++-- libcloud/compute/drivers/elasticstack.py | 2 +- libcloud/compute/drivers/gce.py | 2 +- libcloud/compute/drivers/gogrid.py | 2 +- libcloud/compute/drivers/opsource.py | 2 +- libcloud/compute/ssh.py | 8 ++++---- libcloud/loadbalancer/drivers/rackspace.py | 2 +- libcloud/storage/base.py | 4 ++-- libcloud/storage/drivers/cloudfiles.py | 2 +- libcloud/storage/drivers/local.py | 2 +- libcloud/test/common/test_cloudstack.py | 2 +- libcloud/test/compute/test_ec2.py | 2 +- libcloud/test/compute/test_ibm_sce.py | 4 ++-- libcloud/test/test_connection.py | 2 +- libcloud/utils/dist.py | 2 +- libcloud/utils/networking.py | 2 +- 30 files changed, 45 insertions(+), 45 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 7150322a77..1ba169f676 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -226,7 +226,7 @@ Compute authentication to not allow insecure connections (secure constructor kwarg being set to False) by default. - This way credentials can't accidentaly be sent in plain text over the + This way credentials can't accidentally be sent in plain text over the write. Affected drivers: Bluebox, Joyent, NephoScale, OpSource, VPSNet @@ -1609,7 +1609,7 @@ Storage CloudFiles driver. [Tomaz Muraus] -- Fix a bug with content_type and and encoding of object and path names in +- Fix a bug with content_type and encoding of object and path names in the Atmos driver. [Russell Keith-Magee] @@ -1787,7 +1787,7 @@ Compute Node extra dictionary. (LIBCLOUD-163) [Chris Gilmer] -- Alow users to use a list of tuples for the query string parameters inside +- Allow users to use a list of tuples for the query string parameters inside the OpenStack connection classes. This way same key can be specified multiple times (LIBCLOUD-153) [Dave King] @@ -2273,7 +2273,7 @@ Compute available [Tomaz Muraus] -- Fix chmod argument value which is pased to the sftpclient.put +- Fix chmod argument value which is passed to the sftpclient.put method; GITHUB-17 [John Carr] @@ -2437,7 +2437,7 @@ Changes with Apache Libcloud 0.4.2 (Released January 18, 2011) [Peter Herndon] - EC2 Driver availability zones, via ex_list_availability_zones; - list_locations rewrite to include availablity zones + list_locations rewrite to include availability zones [Tomaz Muraus] - EC2 Driver Idempotency capability in create_node; LIBCLOUD-69 diff --git a/demos/gce_lb_demo.py b/demos/gce_lb_demo.py index e977ac7b22..8d0fa5388f 100755 --- a/demos/gce_lb_demo.py +++ b/demos/gce_lb_demo.py @@ -96,7 +96,7 @@ def get_gce_driver(): def get_gcelb_driver(gce_driver=None): # The GCE Load Balancer driver uses the GCE Compute driver for all of its # API calls. You can either provide the driver directly, or provide the - # same authentication information so the the LB driver can get its own + # same authentication information so the LB driver can get its own # Compute driver. if gce_driver: driver = get_driver_lb(Provider_lb.GCE)(gce_driver=gce_driver) diff --git a/docs/compute/drivers/cloudsigma.rst b/docs/compute/drivers/cloudsigma.rst index 95d0864b8f..aa17f41429 100644 --- a/docs/compute/drivers/cloudsigma.rst +++ b/docs/compute/drivers/cloudsigma.rst @@ -217,7 +217,7 @@ server. This examples shows how to do that upon server creation. Add a tag to the server ~~~~~~~~~~~~~~~~~~~~~~~ -CloudSigma allows you to ogranize resources such as servers and drivers by +CloudSigma allows you to organize resources such as servers and drivers by tagging them. This example shows how to do that. .. literalinclude:: /examples/compute/cloudsigma/tag_server.py diff --git a/docs/compute/drivers/gce.rst b/docs/compute/drivers/gce.rst index dc76204053..a6b6b71b7e 100644 --- a/docs/compute/drivers/gce.rst +++ b/docs/compute/drivers/gce.rst @@ -13,7 +13,7 @@ It is a part of Google Cloud Platform. Google Compute Engine features: * High-performance virtual machines -* Minute-level billing (10-minute minumum) +* Minute-level billing (10-minute minimum) * Fast VM provisioning * Native Load Balancing diff --git a/docs/development.rst b/docs/development.rst index b3ef500b96..3bd9ea4c6c 100644 --- a/docs/development.rst +++ b/docs/development.rst @@ -443,7 +443,7 @@ Contributing Bigger Changes ~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you are contributing a bigger change (e.g. large new feature or a new -provider driver) you need to have have signed Apache Individual Contributor +provider driver) you need to have signed Apache Individual Contributor License Agreement (ICLA) in order to have your patch accepted. You can find more information on how to sign and file an ICLA on the diff --git a/docs/dns/drivers/hostvirtual.rst b/docs/dns/drivers/hostvirtual.rst index 56e5e01219..fd1337e7b1 100644 --- a/docs/dns/drivers/hostvirtual.rst +++ b/docs/dns/drivers/hostvirtual.rst @@ -1,7 +1,7 @@ HostVirtual DNS Driver Documentation ==================================== -`Host Virtual`_ is is a cloud hosting provider that operates dual-stack IPv4 +`Host Virtual`_ is a cloud hosting provider that operates dual-stack IPv4 and IPv6 IaaS clouds in 15 locations worldwide. Instantiating a driver diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 6a86bb37f8..04de4532b5 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -51,7 +51,7 @@ def _init_once(): """ Utility function that is ran once on Library import. - This checks for the LIBCLOUD_DEBUG enviroment variable, which if it exists + This checks for the LIBCLOUD_DEBUG environment variable, which if it exists is where we will log debug information about the provider transports. """ path = os.getenv('LIBCLOUD_DEBUG') diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 8537c07815..5848c59c88 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -544,7 +544,7 @@ def user_agent_append(self, token): """ Append a token to a user agent string. - Users of the library should call this to uniquely identify thier + Users of the library should call this to uniquely identify their requests to a provider. :type token: ``str`` diff --git a/libcloud/common/google.py b/libcloud/common/google.py index a850751391..9d8000ce34 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -403,7 +403,7 @@ def __init__(self, user_id, key, *args, **kwargs): """ if SHA256 is None: raise GoogleAuthError('PyCrypto library required for ' - 'Service Accout Authentication.') + 'Service Account Authentication.') # Check to see if 'key' is a file and read the file if it is. keypath = os.path.expanduser(key) is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) @@ -454,7 +454,7 @@ def refresh_token(self, token_info): Service Account authentication doesn't supply a "refresh token" so this simply gets a new token using the email address/key. - :param token_info: Dictionary contining token information. + :param token_info: Dictionary containing token information. (Not used, but here for compatibility) :type token_info: ``dict`` diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index df6d296dbf..6d24f3f5f7 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -751,7 +751,7 @@ def create_node(self, **kwargs): :type image: :class:`.NodeImage` :param location: Which data center to create a node in. If empty, - undefined behavoir will be selected. (optional) + undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` :param auth: Initial authentication information for the node @@ -810,7 +810,7 @@ def deploy_node(self, **kwargs): existing implementation should be able to handle most such. :param deploy: Deployment to run once machine is online and - availble to SSH. + available to SSH. :type deploy: :class:`Deployment` :param ssh_username: Optional name of the account which is used @@ -919,7 +919,7 @@ def deploy_node(self, **kwargs): e = sys.exc_info()[1] deploy_error = e else: - # Script sucesfully executed, don't try alternate username + # Script successfully executed, don't try alternate username deploy_error = None break @@ -991,7 +991,7 @@ def create_volume(self, size, name, location=None, snapshot=None): :type name: ``str`` :param location: Which data center to create a volume in. If - empty, undefined behavoir will be selected. + empty, undefined behavior will be selected. (optional) :type location: :class:`.NodeLocation` diff --git a/libcloud/compute/deployment.py b/libcloud/compute/deployment.py index 5cfc2c64f3..018c5f86dc 100644 --- a/libcloud/compute/deployment.py +++ b/libcloud/compute/deployment.py @@ -136,7 +136,7 @@ def __init__(self, script, args=None, name=None, delete=False): :type name: ``str`` :keyword name: Name of the script to upload it as, if not specified, - a random name will be choosen. + a random name will be chosen. :type delete: ``bool`` :keyword delete: Whether to delete the script on completion. @@ -210,7 +210,7 @@ def __init__(self, script_file, args=None, name=None, delete=False): :type name: ``str`` :keyword name: Name of the script to upload it as, if not specified, - a random name will be choosen. + a random name will be chosen. :type delete: ``bool`` :keyword delete: Whether to delete the script on completion. diff --git a/libcloud/compute/drivers/abiquo.py b/libcloud/compute/drivers/abiquo.py index ab74778e5c..edf2f25484 100644 --- a/libcloud/compute/drivers/abiquo.py +++ b/libcloud/compute/drivers/abiquo.py @@ -99,7 +99,7 @@ def create_node(self, **kwargs): :type image: :class:`NodeImage` :keyword location: Which data center to create a node in. If empty, - undefined behavoir will be selected. (optional) + undefined behavior will be selected. (optional) :type location: :class:`NodeLocation` :keyword group_name: Which group this node belongs to. If empty, diff --git a/libcloud/compute/drivers/cloudframes.py b/libcloud/compute/drivers/cloudframes.py index 68f74cc04e..9902f5dbb9 100644 --- a/libcloud/compute/drivers/cloudframes.py +++ b/libcloud/compute/drivers/cloudframes.py @@ -18,7 +18,7 @@ """ -# (name, ram, disk, bandwith, price, vcpus) +# (name, ram, disk, bandwidth, price, vcpus) SIZES = [ ('512mb_1core_10gb', 512, 10, 512, 0.025, 1), ('1024mb_1core_20gb', 1024, 20, 512, 0.05, 1), diff --git a/libcloud/compute/drivers/cloudsigma.py b/libcloud/compute/drivers/cloudsigma.py index 5d97434e4e..55e472f1e1 100644 --- a/libcloud/compute/drivers/cloudsigma.py +++ b/libcloud/compute/drivers/cloudsigma.py @@ -673,7 +673,7 @@ def __init__(self, http_code, error_type, error_msg, error_point, driver): :param error_msg: A description of the error that occurred. :type error_msg: ``str`` - :param error_point: Point at which the error occured. Can be None. + :param error_point: Point at which the error occurred. Can be None. :type error_point: ``str`` or ``None`` """ super(CloudSigmaError, self).__init__(http_code=http_code, diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 6c5e10d556..692282ea47 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1219,7 +1219,7 @@ def __repr__(self): class ElasticIP(object): """ - Represents information about an elastic IP adddress + Represents information about an elastic IP address :param ip: The elastic IP address :type ip: ``str`` @@ -2035,7 +2035,7 @@ def ex_list_security_groups(self): def ex_create_security_group(self, name, description, vpc_id=None): """ - Creates a new Security Group in EC2-Classic or a targetted VPC. + Creates a new Security Group in EC2-Classic or a targeted VPC. :param name: The name of the security group to Create. This must be unique. diff --git a/libcloud/compute/drivers/elasticstack.py b/libcloud/compute/drivers/elasticstack.py index cced69cd1e..da3863a3ef 100644 --- a/libcloud/compute/drivers/elasticstack.py +++ b/libcloud/compute/drivers/elasticstack.py @@ -254,7 +254,7 @@ def create_node(self, **kwargs): if nic_model not in ('e1000', 'rtl8139', 'virtio'): raise ElasticStackException('Invalid NIC model specified') - # check that drive size is not smaller then pre installed image size + # check that drive size is not smaller than pre installed image size # First we create a drive with the specified size drive_data = {} diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 690dd03f3b..0b340b318e 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -592,7 +592,7 @@ def __init__(self, user_id, key, datacenter=None, project=None, def ex_list_addresses(self, region=None): """ - Return a list of static addreses for a region or all. + Return a list of static addresses for a region or all. :keyword region: The region to return addresses from. For example: 'us-central1'. If None, will return addresses from diff --git a/libcloud/compute/drivers/gogrid.py b/libcloud/compute/drivers/gogrid.py index f9c0259207..cc242527b2 100644 --- a/libcloud/compute/drivers/gogrid.py +++ b/libcloud/compute/drivers/gogrid.py @@ -258,7 +258,7 @@ def ex_create_node_nowait(self, **kwargs): """Don't block until GoGrid allocates id for a node but return right away with id == None. - The existance of this method is explained by the fact + The existence of this method is explained by the fact that GoGrid assigns id to a node only few minutes after creation. diff --git a/libcloud/compute/drivers/opsource.py b/libcloud/compute/drivers/opsource.py index 9d43ca45f8..d40155c0e2 100644 --- a/libcloud/compute/drivers/opsource.py +++ b/libcloud/compute/drivers/opsource.py @@ -36,7 +36,7 @@ # Roadmap / TODO: # # 0.1 - Basic functionality: create, delete, start, stop, reboot - servers -# (base OS images only, no customer images suported yet) +# (base OS images only, no customer images supported yet) # x implement list_nodes() # x implement create_node() (only support Base OS images, # no customer images yet) diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index b200238810..7fb1af2149 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -80,8 +80,8 @@ def connect(self): """ Connect to the remote node over SSH. - :return: True if the connection has been successfuly established, False - otherwise. + :return: True if the connection has been successfully established, + False otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -116,7 +116,7 @@ def delete(self, path): :type path: ``str`` :keyword path: File path on the remote node. - :return: True if the file has been successfuly deleted, False + :return: True if the file has been successfully deleted, False otherwise. :rtype: ``bool`` """ @@ -139,7 +139,7 @@ def close(self): """ Shutdown connection to the remote node. - :return: True if the connection has been successfuly closed, False + :return: True if the connection has been successfully closed, False otherwise. :rtype: ``bool`` """ diff --git a/libcloud/loadbalancer/drivers/rackspace.py b/libcloud/loadbalancer/drivers/rackspace.py index 1807ed6af3..bf2a13c617 100644 --- a/libcloud/loadbalancer/drivers/rackspace.py +++ b/libcloud/loadbalancer/drivers/rackspace.py @@ -157,7 +157,7 @@ class RackspaceConnectionThrottle(object): before applying throttling. :type min_connections: ``int`` - :param max_connections: Maximum number of of connections per IP address. + :param max_connections: Maximum number of connections per IP address. (Must be between 0 and 100000, 0 allows an unlimited number of connections.) :type max_connections: ``int`` diff --git a/libcloud/storage/base.py b/libcloud/storage/base.py index 136d70123e..f12b906409 100644 --- a/libcloud/storage/base.py +++ b/libcloud/storage/base.py @@ -667,7 +667,7 @@ def _upload_data(self, response, data, calculate_hash=True): :param data: Data to upload. :type calculate_hash: ``bool`` - :param calculate_hash: True to calculate hash of the transfered data. + :param calculate_hash: True to calculate hash of the transferred data. (defauls to True). :rtype: ``tuple`` @@ -713,7 +713,7 @@ def _stream_data(self, response, iterator, chunked=False, (defauls to False). :type calculate_hash: ``bool`` - :param calculate_hash: True to calculate hash of the transfered data. + :param calculate_hash: True to calculate hash of the transferred data. (defauls to True). :type chunk_size: ``int`` diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index ff3e676648..a828490934 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -790,7 +790,7 @@ def _encode_object_name(self, name): return name def _to_container_list(self, response): - # @TODO: Handle more then 10k containers - use "lazy list"? + # @TODO: Handle more than 10k containers - use "lazy list"? for container in response: extra = {'object_count': int(container['count']), 'size': int(container['bytes'])} diff --git a/libcloud/storage/drivers/local.py b/libcloud/storage/drivers/local.py index cfcf4108ab..5e326f902e 100644 --- a/libcloud/storage/drivers/local.py +++ b/libcloud/storage/drivers/local.py @@ -253,7 +253,7 @@ def get_container_cdn_url(self, container, check=False): :param container: Container instance :type container: :class:`Container` - :param check: Indicates if the path's existance must be checked + :param check: Indicates if the path's existence must be checked :type check: ``bool`` :return: A CDN URL for this container. diff --git a/libcloud/test/common/test_cloudstack.py b/libcloud/test/common/test_cloudstack.py index 5a86757c20..2412d5cd92 100644 --- a/libcloud/test/common/test_cloudstack.py +++ b/libcloud/test/common/test_cloudstack.py @@ -36,7 +36,7 @@ class CloudStackMockDriver(object): - host = 'nonexistant.' + host = 'nonexistent.' path = '/path' async_poll_frequency = 0 diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 6d665457b5..9832408d39 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -849,7 +849,7 @@ def test_create_node_ex_security_groups(self): self.driver.create_node(name='foo', image=image, size=size, ex_security_groups=security_groups) - # Test old and new arguments are mutally exclusive + # Test old and new arguments are mutually exclusive self.assertRaises(ValueError, self.driver.create_node, name='foo', image=image, size=size, ex_securitygroup=security_groups, diff --git a/libcloud/test/compute/test_ibm_sce.py b/libcloud/test/compute/test_ibm_sce.py index b81416fe56..55071a43e8 100644 --- a/libcloud/test/compute/test_ibm_sce.py +++ b/libcloud/test/compute/test_ibm_sce.py @@ -127,7 +127,7 @@ def test_destroy_node(self): ret = self.driver.destroy_node(toDelete) self.assertTrue(ret) - # Delete non-existant node + # Delete non-existent node IBMMockHttp.type = 'DELETED' nodes = self.driver.list_nodes() # retrieves 2 nodes self.assertEqual(len(nodes), 2) @@ -313,7 +313,7 @@ def _computecloud_enterprise_api_rest_20100331_offerings_storage(self, method, u body = self.fixtures.load('list_storage_offerings.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - # This is only to accomodate the response tests built into test\__init__.py + # This is only to accommodate the response tests built into test\__init__.py def _computecloud_enterprise_api_rest_20100331_instances_26557(self, method, url, body, headers): if method == 'DELETE': body = self.fixtures.load('delete.xml') diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index ae09c7ae65..ed8999db1a 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -64,7 +64,7 @@ def test_content_length(self): self.assertTrue('Content-Length' not in call_kwargs['headers']) # 'a' as data, content length should be present (data in GET is not - # corect, but anyways) + # correct, but anyways) con.request('/test', method='GET', data='a') call_kwargs = con.connection.request.call_args[1] self.assertEqual(call_kwargs['headers']['Content-Length'], '1') diff --git a/libcloud/utils/dist.py b/libcloud/utils/dist.py index 3b9b8a7108..4477e2ae2d 100644 --- a/libcloud/utils/dist.py +++ b/libcloud/utils/dist.py @@ -20,7 +20,7 @@ import os import fnmatch -# Names that are exluded from globbing results: +# Names that are excluded from globbing results: EXCLUDE_NAMES = ['{arch}', 'CVS', '.cvsignore', '_darcs', 'RCS', 'SCCS', '.svn'] EXCLUDE_PATTERNS = ['*.py[cdo]', '*.s[ol]', '.#*', '*~', '*.py'] diff --git a/libcloud/utils/networking.py b/libcloud/utils/networking.py index d9678edb6c..f7dca9bbc5 100644 --- a/libcloud/utils/networking.py +++ b/libcloud/utils/networking.py @@ -62,7 +62,7 @@ def is_public_subnet(ip): def is_valid_ip_address(address, family=socket.AF_INET): """ - Check if the provided address is valid IPv4 or IPv6 adddress. + Check if the provided address is valid IPv4 or IPv6 address. :param address: IPv4 or IPv6 ddress to check. :type address: ``str`` From d2604aff4db0a4c6683e04550a94e9ee4c0d4274 Mon Sep 17 00:00:00 2001 From: Matthew Lehman Date: Mon, 14 Apr 2014 11:56:12 -0400 Subject: [PATCH 036/315] Added support for generic image management at Rackspace and EC2. Closes #277 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 + .../_supported_methods_image_management.rst | 119 ++++++++++++++++++ libcloud/compute/base.py | 99 +++++++++++++-- libcloud/compute/drivers/ec2.py | 56 +++++++-- libcloud/compute/drivers/openstack.py | 38 +++++- libcloud/test/compute/test_ec2.py | 37 ++++-- libcloud/test/compute/test_openstack.py | 20 +-- 7 files changed, 322 insertions(+), 52 deletions(-) create mode 100644 docs/compute/_supported_methods_image_management.rst diff --git a/CHANGES.rst b/CHANGES.rst index 1ba169f676..89bf70e561 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -96,6 +96,11 @@ Compute (GITHUB-278) [zerthimon] +- Add new standard API for image management and initial implementation for the + EC2 and Rackspace driver. + (GITHUB-277) + [Matt Lehman] + Load Balancer ~~~~~~~~~~~~~ diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst new file mode 100644 index 0000000000..1e2d2c05da --- /dev/null +++ b/docs/compute/_supported_methods_image_management.rst @@ -0,0 +1,119 @@ +===================================== ============ ============= ============== ============= ============= +Provider create image delete image get image list images copy image +===================================== ============ ============= ============== ============= ============= +`Abiquo`_ no no no yes no +`Bluebox Blocks`_ no no no yes no +`Brightbox`_ no no no yes no +`CloudFrames`_ no no no yes no +`CloudSigma (API v2.0)`_ no no no yes no +`CloudStack`_ no no no yes no +`Digital Ocean`_ no no no yes no +`Dreamhost`_ no no no yes no +`Amazon EC2`_ yes yes yes yes yes +`Amazon EC2 (ap-northeast-1)`_ yes yes yes yes yes +`Amazon EC2 (ap-southeast-1)`_ yes yes yes yes yes +`Amazon EC2 (ap-southeast-2)`_ yes yes yes yes yes +`Amazon EC2 (eu-west-1)`_ yes yes yes yes yes +`Amazon EC2 (eu-west-1)`_ yes yes yes yes yes +`Amazon EC2 (sa-east-1)`_ yes yes yes yes yes +`Amazon EC2`_ yes yes yes yes yes +`Amazon EC2 (us-west-1)`_ yes yes yes yes yes +`Amazon EC2 (us-west-2)`_ yes yes yes yes yes +`Enomaly Elastic Computing Platform`_ no no no yes no +`ElasticHosts`_ no no no yes no +`ElasticHosts (syd-y)`_ no no no yes no +`ElasticHosts (tor-p)`_ no no no yes no +`ElasticHosts (cn-1)`_ no no no yes no +`ElasticHosts (lon-p)`_ no no no yes no +`ElasticHosts (lon-b)`_ no no no yes no +`ElasticHosts (sat-p)`_ no no no yes no +`ElasticHosts (lax-p)`_ no no no yes no +`ElasticHosts (sjc-c)`_ no no no yes no +`Eucalyptus`_ yes yes yes yes yes +`Exoscale`_ no no no no no +`Gandi`_ no no no yes no +`Google Compute Engine`_ no no no yes no +`GoGrid`_ no no no yes no +`HostVirtual`_ no no no yes no +`IBM SmartCloud Enterprise`_ no no no yes no +`Ikoula`_ no no no no no +`Joyent`_ no no no yes no +`KTUCloud`_ no no no yes no +`Libvirt`_ no no no no no +`Linode`_ no no no yes no +`NephoScale`_ no no no yes no +`Nimbus`_ yes yes yes yes yes +`Ninefold`_ no no no no no +`OpenNebula (v3.8)`_ no no no yes no +`OpenStack`_ yes yes yes yes no +`Opsource`_ no no no yes no +`Rackspace Cloud (Next Gen)`_ yes yes yes yes no +`Rackspace Cloud (First Gen)`_ yes yes yes yes no +`RimuHosting`_ no no no yes no +`ServerLove`_ no no no no no +`skalicloud`_ no no no no no +`SoftLayer`_ no no no yes no +`vCloud`_ no no no yes no +`VCL`_ no no no yes no +`vCloud`_ no no no yes no +`Voxel VoxCLOUD`_ no no no yes no +`vps.net`_ no no no yes no +===================================== ============ ============= ============== ============= ============= + +.. _`Abiquo`: http://www.abiquo.com/ +.. _`Bluebox Blocks`: http://bluebox.net +.. _`Brightbox`: http://www.brightbox.co.uk/ +.. _`CloudFrames`: http://www.cloudframes.net/ +.. _`CloudSigma (API v2.0)`: http://www.cloudsigma.com/ +.. _`CloudStack`: http://cloudstack.org/ +.. _`Digital Ocean`: https://www.digitalocean.com +.. _`Dreamhost`: http://dreamhost.com/ +.. _`Amazon EC2`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (ap-northeast-1)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (ap-southeast-1)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (ap-southeast-2)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (eu-west-1)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (eu-west-1)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (sa-east-1)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (us-west-1)`: http://aws.amazon.com/ec2/ +.. _`Amazon EC2 (us-west-2)`: http://aws.amazon.com/ec2/ +.. _`Enomaly Elastic Computing Platform`: http://www.enomaly.com/ +.. _`ElasticHosts`: http://www.elastichosts.com/ +.. _`ElasticHosts (syd-y)`: http://www.elastichosts.com/ +.. _`ElasticHosts (tor-p)`: http://www.elastichosts.com/ +.. _`ElasticHosts (cn-1)`: http://www.elastichosts.com/ +.. _`ElasticHosts (lon-p)`: http://www.elastichosts.com/ +.. _`ElasticHosts (lon-b)`: http://www.elastichosts.com/ +.. _`ElasticHosts (sat-p)`: http://www.elastichosts.com/ +.. _`ElasticHosts (lax-p)`: http://www.elastichosts.com/ +.. _`ElasticHosts (sjc-c)`: http://www.elastichosts.com/ +.. _`Eucalyptus`: http://www.eucalyptus.com/ +.. _`Exoscale`: https://www.exoscale.ch/ +.. _`Gandi`: http://www.gandi.net/ +.. _`Google Compute Engine`: https://cloud.google.com/ +.. _`GoGrid`: http://www.gogrid.com/ +.. _`HostVirtual`: http://www.vr.org +.. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ +.. _`Ikoula`: http://express.ikoula.co.uk/cloudstack +.. _`Joyent`: http://www.joyentcloud.com +.. _`KTUCloud`: https://ucloudbiz.olleh.com/ +.. _`Libvirt`: http://libvirt.org/ +.. _`Linode`: http://www.linode.com/ +.. _`NephoScale`: http://www.nephoscale.com +.. _`Nimbus`: http://www.nimbusproject.org/ +.. _`Ninefold`: http://ninefold.com/ +.. _`OpenNebula (v3.8)`: http://opennebula.org/ +.. _`OpenStack`: http://openstack.org/ +.. _`Opsource`: http://www.opsource.net/ +.. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com +.. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com +.. _`RimuHosting`: http://rimuhosting.com/ +.. _`ServerLove`: http://www.serverlove.com/ +.. _`skalicloud`: http://www.skalicloud.com/ +.. _`SoftLayer`: http://www.softlayer.com/ +.. _`vCloud`: http://www.vmware.com/products/vcloud/ +.. _`VCL`: http://incubator.apache.org/vcl/ +.. _`vCloud`: http://www.vmware.com/products/vcloud/ +.. _`Voxel VoxCLOUD`: http://www.voxel.net/ +.. _`vps.net`: http://vps.net/ \ No newline at end of file diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 6d24f3f5f7..6c34186f40 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -645,19 +645,6 @@ def list_nodes(self): raise NotImplementedError( 'list_nodes not implemented for this driver') - def list_images(self, location=None): - """ - List images on a provider - - :param location: The location at which to list images - :type location: :class:`.NodeLocation` - - :return: list of node image objects - :rtype: ``list`` of :class:`.NodeImage` - """ - raise NotImplementedError( - 'list_images not implemented for this driver') - def list_sizes(self, location=None): """ List sizes on a provider @@ -1065,6 +1052,92 @@ def destroy_volume_snapshot(self, snapshot): raise NotImplementedError( 'destroy_volume_snapshot not implemented for this driver') + ## + # Image management methods + ## + + def list_images(self, location=None): + """ + List images on a provider. + + :param location: The location at which to list images. + :type location: :class:`.NodeLocation` + + :return: list of node image objects. + :rtype: ``list`` of :class:`.NodeImage` + """ + raise NotImplementedError( + 'list_images not implemented for this driver') + + def create_image(self, node, name, description=None): + """ + Creates an image from a node object. + + :param node: Node to run the task on. + :type node: :class:`.Node` + + :param name: name for new image. + :type name: ``str`` + + :param description: description for new image. + :type name: ``description`` + + :rtype: :class:`.NodeImage`: + :return: NodeImage instance on success. + + """ + raise NotImplementedError( + 'create_image not implemented for this driver') + + def delete_image(self, node_image): + """ + Deletes a node image from a provider. + + :param node_image: Node image object. + :type node_image: :class:`.NodeImage` + + :return: ``True`` if delete_image was successful, ``False`` otherwise. + :rtype: ``bool`` + """ + + raise NotImplementedError( + 'delete_image not implemented for this driver') + + def get_image(self, image_id): + """ + Returns a single node image from a provider. + + :param image_id: Node to run the task on. + :type image_id: ``str`` + + :rtype :class:`.NodeImage`: + :return: NodeImage instance on success. + """ + raise NotImplementedError( + 'get_image not implemented for this driver') + + def copy_image(self, source_region, node_image, name, description=None): + """ + Copies an image from a source region to the current region. + + :param source_region: Region to copy the node from. + :type source_region: ``str`` + + :param node_image: NodeImage to copy. + :type node_image: :class`.NodeImage`: + + :param name: name for new image. + :type name: ``str`` + + :param description: description for new image. + :type name: ``str`` + + :rtype: :class:`.NodeImage`: + :return: NodeImage instance on success. + """ + raise NotImplementedError( + 'copy_image not implemented for this driver') + ## # SSH key pair management methods ## diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 692282ea47..0a57b0bb1e 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1329,6 +1329,7 @@ def list_images(self, location=None, ex_image_ids=None, ex_owner=None, ex_executableby=None): """ List all images + @inherits: :class:`NodeDriver.list_images` Ex_image_ids parameter is used to filter the list of images that should be returned. Only the images @@ -1376,6 +1377,22 @@ def list_images(self, location=None, ex_image_ids=None, ex_owner=None, ) return images + def get_image(self, image_id): + """ + Get an image based on a image_id + + :param image_id: Image identifier + :type image_id: ``str`` + + :return: A NodeImage object + :rtype: :class:`NodeImage` + + """ + images = self.list_images(ex_image_ids=[image_id]) + image = images[0] + + return image + def list_locations(self): locations = [] for index, availability_zone in \ @@ -1716,11 +1733,13 @@ def delete_key_pair(self, key_pair): namespace=NAMESPACE) return element == 'true' - def ex_copy_image(self, source_region, image, name=None, description=None): + def copy_image(self, image, source_region, name=None, description=None): """ Copy an Amazon Machine Image from the specified source region to the current region. + @inherits: :class:`NodeDriver.copy_image` + :param source_region: The region where the image resides :type source_region: ``str`` @@ -1751,11 +1770,13 @@ def ex_copy_image(self, source_region, image, name=None, description=None): return image - def ex_create_image_from_node(self, node, name, block_device_mapping, - reboot=False, description=None): + def create_image(self, node, name, description=None, reboot=False, + block_device_mapping=None): """ Create an Amazon Machine Image based off of an EBS-backed instance. + @inherits: :class:`NodeDriver.create_image` + :param node: Instance of ``Node`` :type node: :class: `Node` @@ -1768,8 +1789,9 @@ def ex_create_image_from_node(self, node, name, block_device_mapping, :type block_device_mapping: ``list`` of ``dict`` :param reboot: Whether or not to shutdown the instance before - creation. By default Amazon sets this to false - to ensure a clean image. + creation. Amazon calls this NoReboot and + sets it to false by default to ensure a + clean image. :type reboot: ``bool`` :param description: An optional description for the new image @@ -1794,19 +1816,29 @@ def ex_create_image_from_node(self, node, name, block_device_mapping, if description is not None: params['Description'] = description - params.update(self._get_block_device_mapping_params( - block_device_mapping)) + if block_device_mapping is not None: + params.update(self._get_block_device_mapping_params( + block_device_mapping)) image = self._to_image( self.connection.request(self.path, params=params).object) return image - def ex_destroy_image(self, image): - params = { - 'Action': 'DeregisterImage', - 'ImageId': image.id - } + def delete_image(self, image): + """ + Deletes an image at Amazon given a NodeImage object + + @inherits: :class:`NodeDriver.delete_image` + + :param image: Instance of ``NodeImage`` + :type image: :class: `NodeImage` + + :rtype: ``bool`` + """ + params = {'Action': 'DeregisterImage', + 'ImageId': image.id} + response = self.connection.request(self.path, params=params).object return self._get_boolean(response) diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 398803046a..2aca45c024 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -287,6 +287,8 @@ def ex_get_volume(self, volumeId): def list_images(self, location=None, ex_only_active=True): """ + Lists all active images + @inherits: :class:`NodeDriver.list_images` :param ex_only_active: True if list only active @@ -296,6 +298,22 @@ def list_images(self, location=None, ex_only_active=True): return self._to_images( self.connection.request('/images/detail').object, ex_only_active) + def get_image(self, image_id): + """ + Get an image based on a image_id + + @inherits: :class:`NodeDriver.get_image` + + :param image_id: Image identifier + :type image_id: ``str`` + + :return: A NodeImage object + :rtype: :class:`NodeImage` + + """ + return self._to_image(self.connection.request( + '/images/%s' % (image_id,)).object['image']) + def list_sizes(self, location=None): return self._to_sizes( self.connection.request('/flavors/detail').object) @@ -912,9 +930,11 @@ def _to_absolute(el): return {"rate": rate, "absolute": absolute} - def ex_save_image(self, node, name): + def create_image(self, node, name, description=None, reboot=True): """Create an image for node. + @inherits: :class:`NodeDriver.create_image` + :param node: node to use as a base for image :type node: :class:`Node` @@ -935,9 +955,11 @@ def ex_save_image(self, node, name): self.connection.request("/images", method="POST", data=ET.tostring(image_elm)).object) - def ex_delete_image(self, image): + def delete_image(self, image): """Delete an image for node. + @inherits: :class:`NodeDriver.delete_image` + :param image: the image to be deleted :type image: :class:`NodeImage` @@ -1444,7 +1466,7 @@ def ex_revert_resize(self, node): resp = self._node_action(node, 'revertResize') return resp.status == httplib.ACCEPTED - def ex_save_image(self, node, name, metadata=None): + def create_image(self, node, name, metadata=None): """ Creates a new image. @@ -1465,7 +1487,7 @@ def ex_save_image(self, node, name, metadata=None): resp = self._node_action(node, 'createImage', name=name, **optional_params) image_id = self._extract_image_id_from_url(resp.headers['location']) - return self.ex_get_image(image_id=image_id) + return self.get_image(image_id=image_id) def ex_set_server_name(self, node, name): """ @@ -1927,10 +1949,12 @@ def ex_get_size(self, size_id): return self._to_size(self.connection.request( '/flavors/%s' % (size_id,)) .object['flavor']) - def ex_get_image(self, image_id): + def get_image(self, image_id): """ Get a NodeImage + @inherits: :class:`NodeDriver.get_image` + :param image_id: ID of the image which should be used :type image_id: ``str`` @@ -1939,10 +1963,12 @@ def ex_get_image(self, image_id): return self._to_image(self.connection.request( '/images/%s' % (image_id,)).object['image']) - def ex_delete_image(self, image): + def delete_image(self, image): """ Delete a NodeImage + @inherits: :class:`NodeDriver.delete_image` + :param image: image witch should be used :type image: :class:`NodeImage` diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 9832408d39..64010f40d9 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -447,14 +447,21 @@ def test_list_images_with_executable_by(self): self.assertEqual(len(images), 2) - def test_ex_copy_image(self): + def test_get_image(self): + image = self.driver.get_image('ami-57ba933a') + self.assertEqual(image.id, 'ami-57ba933a') + self.assertEqual(image.name, 'Test Image') + self.assertEqual(image.extra['architecture'], 'x86_64') + self.assertEqual(len(image.extra['block_device_mapping']), 2) + + def test_copy_image(self): image = self.driver.list_images()[0] - resp = self.driver.ex_copy_image('us-east-1', image, - name='Faux Image', - description='Test Image Copy') + resp = self.driver.copy_image(image, 'us-east-1', + name='Faux Image', + description='Test Image Copy') self.assertEqual(resp.id, 'ami-4db38224') - def test_ex_create_image_from_node(self): + def test_create_image(self): node = self.driver.list_nodes()[0] mapping = [{'VirtualName': None, @@ -463,17 +470,25 @@ def test_ex_create_image_from_node(self): 'DeleteOnTermination': 'true'}, 'DeviceName': '/dev/sda1'}] - resp = self.driver.ex_create_image_from_node(node, - 'New Image', - mapping, - description='New EBS Image') + resp = self.driver.create_image(node, + 'New Image', + description='New EBS Image', + block_device_mapping=mapping) self.assertEqual(resp.id, 'ami-e9b38280') - def ex_destroy_image(self): + def test_create_image_no_mapping(self): + node = self.driver.list_nodes()[0] + + resp = self.driver.create_image(node, + 'New Image', + description='New EBS Image') + self.assertEqual(resp.id, 'ami-e9b38280') + + def delete_image(self): images = self.driver.list_images() image = images[0] - resp = self.driver.ex_destroy_image(image) + resp = self.driver.delete_image(image) self.assertTrue(resp) def ex_register_image(self): diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index ca379b28f8..ef5dfc7824 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -537,17 +537,17 @@ def test_ex_limits(self): self.assertTrue("rate" in limits) self.assertTrue("absolute" in limits) - def test_ex_save_image(self): + def test_create_image(self): node = Node(id=444222, name=None, state=None, public_ips=None, private_ips=None, driver=self.driver) - image = self.driver.ex_save_image(node, "imgtest") + image = self.driver.create_image(node, "imgtest") self.assertEqual(image.name, "imgtest") self.assertEqual(image.id, "12345") - def test_ex_delete_image(self): + def test_delete_image(self): image = NodeImage(id=333111, name='Ubuntu 8.10 (intrepid)', driver=self.driver) - ret = self.driver.ex_delete_image(image) + ret = self.driver.delete_image(image) self.assertTrue(ret) def test_ex_list_ip_addresses(self): @@ -1158,8 +1158,8 @@ def test_ex_revert_resize(self): e = sys.exc_info()[1] self.fail('An error was raised: ' + repr(e)) - def test_ex_save_image(self): - image = self.driver.ex_save_image(self.node, 'new_image') + def test_create_image(self): + image = self.driver.create_image(self.node, 'new_image') self.assertEqual(image.name, 'new_image') self.assertEqual(image.id, '4949f9ee-2421-4c81-8b49-13119446008b') @@ -1214,19 +1214,19 @@ def test_ex_get_size(self): self.assertEqual(size.id, size_id) self.assertEqual(size.name, '15.5GB slice') - def test_ex_get_image(self): + def test_get_image(self): image_id = '13' - image = self.driver.ex_get_image(image_id) + image = self.driver.get_image(image_id) self.assertEqual(image.id, image_id) self.assertEqual(image.name, 'Windows 2008 SP2 x86 (B24)') self.assertEqual(image.extra['serverId'], None) self.assertEqual(image.extra['minDisk'], "5") self.assertEqual(image.extra['minRam'], "256") - def test_ex_delete_image(self): + def test_delete_image(self): image = NodeImage( id='26365521-8c62-11f9-2c33-283d153ecc3a', name='My Backup', driver=self.driver) - result = self.driver.ex_delete_image(image) + result = self.driver.delete_image(image) self.assertTrue(result) def test_extract_image_id_from_url(self): From 08d7d95dbe5e2d545b1d66a2744ae6adf4226a16 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 28 Apr 2014 14:07:19 +0200 Subject: [PATCH 037/315] Update script to support new image manegement API and re-generate the file. --- .../generate_provider_feature_matrix_table.py | 15 ++- .../_supported_methods_image_management.rst | 124 +++++++++--------- 2 files changed, 75 insertions(+), 64 deletions(-) diff --git a/contrib/generate_provider_feature_matrix_table.py b/contrib/generate_provider_feature_matrix_table.py index 6d93f7ac3d..54ab7fedd7 100755 --- a/contrib/generate_provider_feature_matrix_table.py +++ b/contrib/generate_provider_feature_matrix_table.py @@ -47,6 +47,8 @@ 'compute_main': ['list_nodes', 'create_node', 'reboot_node', 'destroy_node', 'list_images', 'list_sizes', 'deploy_node'], + 'compute_image_management': ['list_images', 'get_image', + 'create_image', 'delete_image', 'copy_image'], 'compute_block_storage': ['list_volumes', 'create_volume', 'destroy_volume', 'attach_volume', 'detach_volume', @@ -82,6 +84,13 @@ 'list_sizes': 'list sizes', 'deploy_node': 'deploy node' }, + 'compute_image_management': { + 'list_images': 'list images', + 'get_image': 'get image', + 'create_image': 'create image', + 'copy_image': 'copy image', + 'delete_image': 'delete image' + }, 'compute_block_storage': { 'list_volumes': 'list volumes', 'create_volume': 'create volume', @@ -156,8 +165,8 @@ def get_provider_api_names(Provider): def generate_providers_table(api): result = {} - if api in ['compute_main', 'compute_block_storage', - 'compute_key_pair_management']: + if api in ['compute_main', 'compute_image_management', + 'compute_block_storage', 'compute_key_pair_management']: driver = NodeDriver drivers = COMPUTE_DRIVERS provider = ComputeProvider @@ -354,6 +363,8 @@ def generate_tables(): if api == 'compute_main': file_name_2 = '_supported_methods_main.rst' + elif api == 'compute_image_management': + file_name_2 = '_supported_methods_image_management.rst' elif api == 'compute_block_storage': file_name_2 = '_supported_methods_block_storage.rst' elif api == 'compute_key_pair_management': diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 1e2d2c05da..787d8586e0 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -1,64 +1,64 @@ -===================================== ============ ============= ============== ============= ============= -Provider create image delete image get image list images copy image -===================================== ============ ============= ============== ============= ============= -`Abiquo`_ no no no yes no -`Bluebox Blocks`_ no no no yes no -`Brightbox`_ no no no yes no -`CloudFrames`_ no no no yes no -`CloudSigma (API v2.0)`_ no no no yes no -`CloudStack`_ no no no yes no -`Digital Ocean`_ no no no yes no -`Dreamhost`_ no no no yes no -`Amazon EC2`_ yes yes yes yes yes -`Amazon EC2 (ap-northeast-1)`_ yes yes yes yes yes -`Amazon EC2 (ap-southeast-1)`_ yes yes yes yes yes -`Amazon EC2 (ap-southeast-2)`_ yes yes yes yes yes -`Amazon EC2 (eu-west-1)`_ yes yes yes yes yes -`Amazon EC2 (eu-west-1)`_ yes yes yes yes yes -`Amazon EC2 (sa-east-1)`_ yes yes yes yes yes -`Amazon EC2`_ yes yes yes yes yes -`Amazon EC2 (us-west-1)`_ yes yes yes yes yes -`Amazon EC2 (us-west-2)`_ yes yes yes yes yes -`Enomaly Elastic Computing Platform`_ no no no yes no -`ElasticHosts`_ no no no yes no -`ElasticHosts (syd-y)`_ no no no yes no -`ElasticHosts (tor-p)`_ no no no yes no -`ElasticHosts (cn-1)`_ no no no yes no -`ElasticHosts (lon-p)`_ no no no yes no -`ElasticHosts (lon-b)`_ no no no yes no -`ElasticHosts (sat-p)`_ no no no yes no -`ElasticHosts (lax-p)`_ no no no yes no -`ElasticHosts (sjc-c)`_ no no no yes no -`Eucalyptus`_ yes yes yes yes yes -`Exoscale`_ no no no no no -`Gandi`_ no no no yes no -`Google Compute Engine`_ no no no yes no -`GoGrid`_ no no no yes no -`HostVirtual`_ no no no yes no -`IBM SmartCloud Enterprise`_ no no no yes no -`Ikoula`_ no no no no no -`Joyent`_ no no no yes no -`KTUCloud`_ no no no yes no -`Libvirt`_ no no no no no -`Linode`_ no no no yes no -`NephoScale`_ no no no yes no -`Nimbus`_ yes yes yes yes yes -`Ninefold`_ no no no no no -`OpenNebula (v3.8)`_ no no no yes no -`OpenStack`_ yes yes yes yes no -`Opsource`_ no no no yes no -`Rackspace Cloud (Next Gen)`_ yes yes yes yes no -`Rackspace Cloud (First Gen)`_ yes yes yes yes no -`RimuHosting`_ no no no yes no -`ServerLove`_ no no no no no -`skalicloud`_ no no no no no -`SoftLayer`_ no no no yes no -`vCloud`_ no no no yes no -`VCL`_ no no no yes no -`vCloud`_ no no no yes no -`Voxel VoxCLOUD`_ no no no yes no -`vps.net`_ no no no yes no -===================================== ============ ============= ============== ============= ============= +===================================== =========== ========= ============ ============ ========== +Provider list images get image create image delete image copy image +===================================== =========== ========= ============ ============ ========== +`Abiquo`_ yes no no no no +`Bluebox Blocks`_ yes no no no no +`Brightbox`_ yes no no no no +`CloudFrames`_ yes no no no no +`CloudSigma (API v2.0)`_ yes no no no no +`CloudStack`_ yes no no no no +`Digital Ocean`_ yes no no no no +`Dreamhost`_ yes no no no no +`Amazon EC2`_ yes yes yes yes yes +`Amazon EC2 (ap-northeast-1)`_ yes yes yes yes yes +`Amazon EC2 (ap-southeast-1)`_ yes yes yes yes yes +`Amazon EC2 (ap-southeast-2)`_ yes yes yes yes yes +`Amazon EC2 (eu-west-1)`_ yes yes yes yes yes +`Amazon EC2 (eu-west-1)`_ yes yes yes yes yes +`Amazon EC2 (sa-east-1)`_ yes yes yes yes yes +`Amazon EC2`_ yes yes yes yes yes +`Amazon EC2 (us-west-1)`_ yes yes yes yes yes +`Amazon EC2 (us-west-2)`_ yes yes yes yes yes +`Enomaly Elastic Computing Platform`_ yes no no no no +`ElasticHosts`_ yes no no no no +`ElasticHosts (syd-y)`_ yes no no no no +`ElasticHosts (tor-p)`_ yes no no no no +`ElasticHosts (cn-1)`_ yes no no no no +`ElasticHosts (lon-p)`_ yes no no no no +`ElasticHosts (lon-b)`_ yes no no no no +`ElasticHosts (sat-p)`_ yes no no no no +`ElasticHosts (lax-p)`_ yes no no no no +`ElasticHosts (sjc-c)`_ yes no no no no +`Eucalyptus`_ yes yes yes yes yes +`Exoscale`_ yes no no no no +`Gandi`_ yes no no no no +`Google Compute Engine`_ yes no no no no +`GoGrid`_ yes no no no no +`HostVirtual`_ yes no no no no +`IBM SmartCloud Enterprise`_ yes no yes no no +`Ikoula`_ yes no no no no +`Joyent`_ yes no no no no +`KTUCloud`_ yes no no no no +`Libvirt`_ no no no no no +`Linode`_ yes no no no no +`NephoScale`_ yes no no no no +`Nimbus`_ yes yes yes yes yes +`Ninefold`_ yes no no no no +`OpenNebula (v3.8)`_ yes no no no no +`OpenStack`_ yes yes no no no +`Opsource`_ yes no no no no +`Rackspace Cloud (Next Gen)`_ yes yes yes yes no +`Rackspace Cloud (First Gen)`_ yes yes yes yes no +`RimuHosting`_ yes no no no no +`ServerLove`_ yes no no no no +`skalicloud`_ yes no no no no +`SoftLayer`_ yes no no no no +`vCloud`_ yes no no no no +`VCL`_ yes no no no no +`vCloud`_ yes no no no no +`Voxel VoxCLOUD`_ yes no no no no +`vps.net`_ yes no no no no +===================================== =========== ========= ============ ============ ========== .. _`Abiquo`: http://www.abiquo.com/ .. _`Bluebox Blocks`: http://bluebox.net @@ -116,4 +116,4 @@ Provider create image delete image get image .. _`VCL`: http://incubator.apache.org/vcl/ .. _`vCloud`: http://www.vmware.com/products/vcloud/ .. _`Voxel VoxCLOUD`: http://www.voxel.net/ -.. _`vps.net`: http://vps.net/ \ No newline at end of file +.. _`vps.net`: http://vps.net/ From 865431320488b4a425171bf618ede7937df6549e Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Wed, 26 Mar 2014 09:29:34 -0700 Subject: [PATCH 038/315] Add support for Google DNS API. Google recently added a new API for DNS management. This new driver implement simple operations to create and view zones and records. Closes #269 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 + libcloud/dns/drivers/google.py | 343 ++++++++++++++++++ libcloud/dns/providers.py | 2 +- libcloud/dns/types.py | 1 + .../google/get_zone_does_not_exists.json | 13 + .../dns/fixtures/google/managed_zones_1.json | 1 + .../test/dns/fixtures/google/no_record.json | 5 + libcloud/test/dns/fixtures/google/record.json | 17 + .../dns/fixtures/google/records_list.json | 1 + .../test/dns/fixtures/google/zone_create.json | 1 + .../test/dns/fixtures/google/zone_list.json | 1 + libcloud/test/dns/test_google.py | 179 +++++++++ libcloud/test/secrets.py-dist | 2 + 13 files changed, 569 insertions(+), 1 deletion(-) create mode 100644 libcloud/dns/drivers/google.py create mode 100644 libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json create mode 100644 libcloud/test/dns/fixtures/google/managed_zones_1.json create mode 100644 libcloud/test/dns/fixtures/google/no_record.json create mode 100644 libcloud/test/dns/fixtures/google/record.json create mode 100644 libcloud/test/dns/fixtures/google/records_list.json create mode 100644 libcloud/test/dns/fixtures/google/zone_create.json create mode 100644 libcloud/test/dns/fixtures/google/zone_list.json create mode 100644 libcloud/test/dns/test_google.py diff --git a/CHANGES.rst b/CHANGES.rst index 89bf70e561..382c9873a2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -119,6 +119,10 @@ DNS user to create a record with multiple values with a single call. [Tomaz Muraus] +- Add new driver for Google DNS. + (GITHUB-269) + [Franck Cuny] + Changes with Apache Libcloud 0.14.1 ----------------------------------- diff --git a/libcloud/dns/drivers/google.py b/libcloud/dns/drivers/google.py new file mode 100644 index 0000000000..aabc783098 --- /dev/null +++ b/libcloud/dns/drivers/google.py @@ -0,0 +1,343 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + 'GoogleDNSDriver' +] + +API_VERSION = 'v1beta1' + +import re +from libcloud.common.google import GoogleResponse, GoogleBaseConnection +from libcloud.common.google import ResourceNotFoundError +from libcloud.dns.types import Provider, RecordType +from libcloud.dns.types import ZoneDoesNotExistError, RecordDoesNotExistError +from libcloud.dns.base import DNSDriver, Zone, Record + + +class GoogleDNSResponse(GoogleResponse): + pass + + +class GoogleDNSConnection(GoogleBaseConnection): + host = "www.googleapis.com" + responseCls = GoogleDNSResponse + + def __init__(self, user_id, key, secure, auth_type=None, + credential_file=None, project=None): + self.scope = [ + 'https://www.googleapis.com/auth/ndev.clouddns.readwrite' + ] + super(GoogleDNSConnection, self).\ + __init__(user_id, key, secure=secure, auth_type=auth_type, + credential_file=credential_file) + self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project) + + +class GoogleDNSDriver(DNSDriver): + type = Provider.GOOGLE + name = 'Google DNS' + connectionCls = GoogleDNSConnection + website = 'https://cloud.google.com/' + + RECORD_TYPE_MAP = { + RecordType.A: 'A', + RecordType.AAAA: 'AAAA', + RecordType.CNAME: 'CNAME', + RecordType.MX: 'MX', + RecordType.NS: 'NS', + RecordType.PTR: 'PTR', + RecordType.SOA: 'SOA', + RecordType.SPF: 'SPF', + RecordType.SRV: 'SRV', + RecordType.TXT: 'TXT', + } + + def __init__(self, user_id, key, project=None, auth_type=None): + self.auth_type = auth_type + self.project = project + if not self.project: + raise ValueError('Project name must be specified using ' + '"project" keyword.') + super(GoogleDNSDriver, self).__init__(user_id, key) + + def iterate_zones(self): + """ + Return a generator to iterate over available zones. + + :rtype: ``generator`` of :class:`Zone` + """ + return self._get_more('zones') + + def iterate_records(self, zone): + """ + Return a generator to iterate over records for the provided zone. + + :param zone: Zone to list records for. + :type zone: :class:`Zone` + + :rtype: ``generator`` of :class:`Record` + """ + return self._get_more('records', zone=zone) + + def get_zone(self, zone_id): + """ + Return a Zone instance. + + :param zone_id: ID of the required zone + :type zone_id: ``str`` + + :rtype: :class:`Zone` + """ + request = '/managedZones/%s' % (zone_id) + + try: + response = self.connection.request(request, method='GET').object + except ResourceNotFoundError: + raise ZoneDoesNotExistError(value='', + driver=self.connection.driver, + zone_id=zone_id) + + return self._to_zone(response) + + def get_record(self, zone_id, record_id): + """ + Return a Record instance. + + :param zone_id: ID of the required zone + :type zone_id: ``str`` + + :param record_id: ID of the required record + :type record_id: ``str`` + + :rtype: :class:`Record` + """ + (record_name, record_type) = record_id.split('-') + + params = { + 'name': record_name, + 'type': record_type, + } + + request = '/managedZones/%s/rrsets' % (zone_id) + + try: + response = self.connection.request(request, method='GET', + params=params).object + except ResourceNotFoundError: + raise ZoneDoesNotExistError(value='', + driver=self.connection.driver, + zone_id=zone_id) + + if len(response['rrsets']) > 0: + return self._to_record(response['rrsets'][0], zone_id) + + raise RecordDoesNotExistError(value='', driver=self.connection.driver, + record_id=record_id) + + def create_zone(self, domain, type='master', ttl=None, extra=None): + """ + Create a new zone. + + :param domain: Zone domain name (e.g. example.com.) with a \'.\' + at the end. + :type domain: ``str`` + + :param type: Zone type (master is the only one supported). + :type type: ``str`` + + :param ttl: TTL for new records. (unused) + :type ttl: ``int`` + + :param extra: Extra attributes (driver specific). (optional) + :type extra: ``dict`` + + :rtype: :class:`Zone` + """ + name = None + description = '' + + if extra: + description = extra.get('description') + name = extra.get('name') + + if name is None: + name = self._cleanup_domain(domain) + + data = { + 'dnsName': domain, + 'name': name, + 'description': description, + } + + request = '/managedZones' + response = self.connection.request(request, method='POST', + data=data).object + return self._to_zone(response) + + def create_record(self, name, zone, type, data, extra=None): + """ + Create a new record. + + :param name: Record name fully qualified, with a \'.\' at the end. + :type name: ``str`` + + :param zone: Zone where the requested record is created. + :type zone: :class:`Zone` + + :param type: DNS record type (A, AAAA, ...). + :type type: :class:`RecordType` + + :param data: Data for the record (depends on the record type). + :type data: ``str`` + + :param extra: Extra attributes. (optional) + :type extra: ``dict`` + + :rtype: :class:`Record` + """ + ttl = data.get('ttl', None) + rrdatas = data.get('rrdatas', []) + + data = { + 'additions': [ + { + 'name': name, + 'type': type, + 'ttl': int(ttl), + 'rrdatas': rrdatas, + } + ] + } + request = '/managedZones/%s/changes' % (zone.id) + response = self.connection.request(request, method='POST', + data=data).object + return self._to_record(response['additions'][0], zone) + + def delete_zone(self, zone): + """ + Delete a zone. + + Note: This will delete all the records belonging to this zone. + + :param zone: Zone to delete. + :type zone: :class:`Zone` + + :rtype: ``bool`` + """ + request = '/managedZones/%s' % (zone.id) + response = self.connection.request(request, method='DELETE') + return response.success() + + def delete_record(self, record): + """ + Delete a record. + + :param record: Record to delete. + :type record: :class:`Record` + + :rtype: ``bool`` + """ + data = { + 'deletions': [ + { + 'name': record.name, + 'type': record.type, + 'rrdatas': record.data['rrdatas'], + 'ttl': record.data['ttl'] + } + ] + } + request = '/managedZones/%s/changes' % (record.zone.id) + response = self.connection.request(request, method='POST', + data=data) + return response.success() + + def _get_more(self, rtype, **kwargs): + last_key = None + exhausted = False + while not exhausted: + items, last_key, exhausted = self._get_data(rtype, last_key, + **kwargs) + for item in items: + yield item + + def _get_data(self, rtype, last_key, **kwargs): + params = {} + + if last_key: + params['pageToken'] = last_key + + if rtype == 'zones': + request = '/managedZones' + transform_func = self._to_zones + r_key = 'managedZones' + elif rtype == 'records': + zone = kwargs['zone'] + request = '/managedZones/%s/rrsets' % (zone.id) + transform_func = self._to_records + r_key = 'rrsets' + + response = self.connection.request(request, method='GET', + params=params,) + + if response.success(): + nextpage = response.object.get('nextPageToken', None) + items = transform_func(response.object.get(r_key), **kwargs) + exhausted = False if nextpage is not None else True + return items, nextpage, exhausted + else: + return [], None, True + + def _ex_connection_class_kwargs(self): + return {'auth_type': self.auth_type, + 'project': self.project} + + def _to_zones(self, response): + zones = [] + for r in response: + zones.append(self._to_zone(r)) + return zones + + def _to_zone(self, r): + extra = {} + + if 'description' in r: + extra['description'] = r.get('description') + + extra['creationTime'] = r.get('creationTime') + extra['nameServers'] = r.get('nameServers') + + return Zone(id=r['id'], domain=r['dnsName'], + type='master', ttl=0, driver=self, extra=extra) + + def _to_records(self, response, zone): + records = [] + for r in response: + records.append(self._to_record(r, zone)) + return records + + def _to_record(self, r, zone): + record_id = '%s-%s' % (r['name'], r['type']) + return Record(id=record_id, name=r['name'], + type=r['type'], data=r, zone=zone, + driver=self, extra={}) + + def _cleanup_domain(self, domain): + # name can only contain lower case alphanumeric characters and hyphens + domain = re.sub(r'[^a-zA-Z0-9-]', '-', domain) + if domain[-1] == '-': + domain = domain[:-1] + return domain diff --git a/libcloud/dns/providers.py b/libcloud/dns/providers.py index 53aa724f9e..64483a45ab 100644 --- a/libcloud/dns/providers.py +++ b/libcloud/dns/providers.py @@ -32,7 +32,7 @@ ('libcloud.dns.drivers.route53', 'Route53DNSDriver'), Provider.GANDI: ('libcloud.dns.drivers.gandi', 'GandiDNSDriver'), - + Provider.GOOGLE: ('libcloud.dns.drivers.google', 'GoogleDNSDriver'), # Deprecated Provider.RACKSPACE_US: ('libcloud.dns.drivers.rackspace', 'RackspaceUSDNSDriver'), diff --git a/libcloud/dns/types.py b/libcloud/dns/types.py index a08f1b0215..3b3a79e60f 100644 --- a/libcloud/dns/types.py +++ b/libcloud/dns/types.py @@ -35,6 +35,7 @@ class Provider(object): ROUTE53 = 'route53' HOSTVIRTUAL = 'hostvirtual' GANDI = 'gandi' + GOOGLE = 'google' # Deprecated RACKSPACE_US = 'rackspace_us' diff --git a/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json b/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json new file mode 100644 index 0000000000..ddd959d4bc --- /dev/null +++ b/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json @@ -0,0 +1,13 @@ +{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "notFound", + "message": "The 'parameters.managedZone' resource named '2' does not exist." + } + ], + "code": 404, + "message": "The 'parameters.managedZone' resource named '2' does not exist." + } +} diff --git a/libcloud/test/dns/fixtures/google/managed_zones_1.json b/libcloud/test/dns/fixtures/google/managed_zones_1.json new file mode 100644 index 0000000000..240eb10a2d --- /dev/null +++ b/libcloud/test/dns/fixtures/google/managed_zones_1.json @@ -0,0 +1 @@ +{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud1.googledomains.com.", "ns-cloud2.googledomains.com.", "ns-cloud3.googledomains.com.", "ns-cloud4.googledomains.com."], "creationTime": "2014-03-29T23:06:00.921Z", "dnsName": "example.com.", "id": "1", "description": ""} diff --git a/libcloud/test/dns/fixtures/google/no_record.json b/libcloud/test/dns/fixtures/google/no_record.json new file mode 100644 index 0000000000..f23c44394e --- /dev/null +++ b/libcloud/test/dns/fixtures/google/no_record.json @@ -0,0 +1,5 @@ +{ + "kind": "dns#resourceRecordSetsListResponse", + "rrsets": [ + ] +} diff --git a/libcloud/test/dns/fixtures/google/record.json b/libcloud/test/dns/fixtures/google/record.json new file mode 100644 index 0000000000..c1bb80738f --- /dev/null +++ b/libcloud/test/dns/fixtures/google/record.json @@ -0,0 +1,17 @@ +{ + "kind": "dns#resourceRecordSetsListResponse", + "rrsets": [ + { + "kind": "dns#resourceRecordSet", + "name": "foo.example.com.", + "type": "A", + "ttl": 21600, + "rrdatas": [ + "ns-cloud-c1.googledomains.com.", + "ns-cloud-c2.googledomains.com.", + "ns-cloud-c3.googledomains.com.", + "ns-cloud-c4.googledomains.com." + ] + } + ] +} diff --git a/libcloud/test/dns/fixtures/google/records_list.json b/libcloud/test/dns/fixtures/google/records_list.json new file mode 100644 index 0000000000..3c3f2b1b38 --- /dev/null +++ b/libcloud/test/dns/fixtures/google/records_list.json @@ -0,0 +1 @@ +{"rrsets": [{"rrdatas": ["ns-cloud-d1.googledomains.com.", "ns-cloud-d2.googledomains.com.", "ns-cloud-d3.googledomains.com.", "ns-cloud-d4.googledomains.com."], "kind": "dns#resourceRecordSet", "type": "NS", "name": "example.com.", "ttl": 21600}, {"rrdatas": ["ns-cloud-d1.googledomains.com. dns-admin.google.com. 0 21600 3600 1209600 300"], "kind": "dns#resourceRecordSet", "type": "SOA", "name": "example.com.", "ttl": 21600}, {"rrdatas": ["1.2.3.4"], "kind": "dns#resourceRecordSet", "type": "A", "name": "foo.example.com.", "ttl": 3600}], "kind": "dns#resourceRecordSetsListResponse"} diff --git a/libcloud/test/dns/fixtures/google/zone_create.json b/libcloud/test/dns/fixtures/google/zone_create.json new file mode 100644 index 0000000000..bdaafddc44 --- /dev/null +++ b/libcloud/test/dns/fixtures/google/zone_create.json @@ -0,0 +1 @@ +{"kind": "dns#managedZone", "name": "example-org", "nameServers": ["ns-cloud-b1.googledomains.com.", "ns-cloud-b2.googledomains.com.", "ns-cloud-b3.googledomains.com.", "ns-cloud-b4.googledomains.com."], "creationTime": "2014-03-30T04:44:20.834Z", "dnsName": "example.org.", "id": "3", "description": "new domain for example.org"} diff --git a/libcloud/test/dns/fixtures/google/zone_list.json b/libcloud/test/dns/fixtures/google/zone_list.json new file mode 100644 index 0000000000..b3f0fd7840 --- /dev/null +++ b/libcloud/test/dns/fixtures/google/zone_list.json @@ -0,0 +1 @@ +{"kind": "dns#managedZonesListResponse", "managedZones": [{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud-e1.googledomains.com.", "ns-cloud-e2.googledomains.com.", "ns-cloud-e3.googledomains.com.", "ns-cloud-e4.googledomains.com."], "creationTime": "2014-03-29T22:45:47.618Z", "dnsName": "example.com.", "id": "1", "description": ""}, {"kind": "dns#managedZone", "name": "example-net", "nameServers": ["ns-cloud-d1.googledomains.com.", "ns-cloud-d2.googledomains.com.", "ns-cloud-d3.googledomains.com.", "ns-cloud-d4.googledomains.com."], "creationTime": "2014-03-29T22:45:46.990Z", "dnsName": "example.net.", "id": "2", "description": ""}]} diff --git a/libcloud/test/dns/test_google.py b/libcloud/test/dns/test_google.py new file mode 100644 index 0000000000..2a4b8c7d34 --- /dev/null +++ b/libcloud/test/dns/test_google.py @@ -0,0 +1,179 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +import sys +import unittest + +from libcloud.utils.py3 import httplib + +from libcloud.dns.types import ZoneDoesNotExistError +from libcloud.dns.types import RecordDoesNotExistError +from libcloud.dns.drivers.google import GoogleDNSDriver +from libcloud.common.google import (GoogleBaseAuthConnection, + GoogleInstalledAppAuthConnection, + GoogleBaseConnection) + +from libcloud.test.common.test_google import GoogleAuthMockHttp +from libcloud.test import MockHttpTestCase, LibcloudTestCase +from libcloud.test.file_fixtures import DNSFileFixtures +from libcloud.test.secrets import DNS_PARAMS_GOOGLE, DNS_KEYWORD_PARAMS_GOOGLE + + +class GoogleTests(LibcloudTestCase): + GoogleBaseConnection._get_token_info_from_file = lambda x: None + GoogleBaseConnection._write_token_info_to_file = lambda x: None + GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' + + def setUp(self): + GoogleDNSMockHttp.test = self + GoogleDNSDriver.connectionCls.conn_classes = (GoogleDNSMockHttp, + GoogleDNSMockHttp) + GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, + GoogleAuthMockHttp) + GoogleDNSMockHttp.type = None + kwargs = DNS_KEYWORD_PARAMS_GOOGLE.copy() + kwargs['auth_type'] = 'IA' + self.driver = GoogleDNSDriver(*DNS_PARAMS_GOOGLE, **kwargs) + + def test_list_zones(self): + zones = self.driver.list_zones() + self.assertEqual(len(zones), 2) + + def test_list_records(self): + zone = self.driver.list_zones()[0] + records = self.driver.list_records(zone=zone) + self.assertEqual(len(records), 3) + + def test_get_zone(self): + zone = self.driver.get_zone(1) + self.assertEqual(zone.id, '1') + self.assertEqual(zone.domain, 'example.com.') + + def test_get_zone_does_not_exist(self): + GoogleDNSMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_zone(2) + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, 2) + else: + self.fail('Exception not thrown') + + def test_get_record(self): + GoogleDNSMockHttp.type = 'FILTER_ZONES' + zone = self.driver.list_zones()[0] + record = self.driver.get_record(zone.id, "foo.example.com.-A") + self.assertEqual(record.name, 'foo.example.com.') + self.assertEqual(record.type, 'A') + + def test_get_record_zone_does_not_exist(self): + GoogleDNSMockHttp.type = 'ZONE_DOES_NOT_EXIST' + + try: + self.driver.get_record(2, 'a-a') + except ZoneDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.zone_id, 2) + else: + self.fail('Exception not thrown') + + def test_get_record_record_does_not_exist(self): + GoogleDNSMockHttp.type = 'RECORD_DOES_NOT_EXIST' + try: + self.driver.get_record(1, "foo-A") + except RecordDoesNotExistError: + e = sys.exc_info()[1] + self.assertEqual(e.record_id, 'foo-A') + else: + self.fail('Exception not thrown') + + def test_create_zone(self): + extra = {'description': 'new domain for example.org'} + zone = self.driver.create_zone('example.org.', extra) + self.assertEqual(zone.domain, 'example.org.') + self.assertEqual(zone.extra['description'], extra['description']) + self.assertEqual(len(zone.extra['nameServers']), 4) + + def test_delete_zone(self): + zone = self.driver.get_zone(1) + res = self.driver.delete_zone(zone) + self.assertTrue(res) + + +class GoogleDNSMockHttp(MockHttpTestCase): + fixtures = DNSFileFixtures('google') + + def _dns_v1beta1_projects_project_name_managedZones(self, method, url, + body, headers): + if method == 'POST': + body = self.fixtures.load('zone_create.json') + else: + body = self.fixtures.load('zone_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_FILTER_ZONES( + self, method, url, body, headers): + body = self.fixtures.load('zone_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_1_rrsets_FILTER_ZONES( + self, method, url, body, headers): + body = self.fixtures.load('record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_1_rrsets( + self, method, url, body, headers): + body = self.fixtures.load('records_list.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_1(self, method, + url, body, + headers): + if method == 'GET': + body = self.fixtures.load('managed_zones_1.json') + elif method == 'DELETE': + body = None + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_2_ZONE_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('get_zone_does_not_exists.json') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _dns_v1beta1_projects_project_name_managedZones_3_ZONE_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('get_zone_does_not_exists.json') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + + def _dns_v1beta1_projects_project_name_managedZones_1_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('managed_zones_1.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_1_rrsets_RECORD_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('no_record.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _dns_v1beta1_projects_project_name_managedZones_2_rrsets_ZONE_DOES_NOT_EXIST( + self, method, url, body, headers): + body = self.fixtures.load('get_zone_does_not_exists.json') + return (httplib.NOT_FOUND, body, {}, + httplib.responses[httplib.NOT_FOUND]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index 773a2c25b1..030176e4c0 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -63,3 +63,5 @@ DNS_PARAMS_RACKSPACE = ('user', 'key') DNS_PARAMS_HOSTVIRTUAL = ('key',) DNS_PARAMS_ROUTE53 = ('access_id', 'secret') DNS_GANDI = ('user', ) +DNS_PARAMS_GOOGLE = ('email_address', 'key') +DNS_KEYWORD_PARAMS_GOOGLE = {'project': 'project_name'} From 644de1dd3de94ef1c40d2ef4071ce8d99b3f4cc6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 29 Apr 2014 08:04:23 +0200 Subject: [PATCH 039/315] Re-generate supported providers table. --- docs/dns/_supported_methods.rst | 2 ++ docs/dns/_supported_providers.rst | 2 ++ 2 files changed, 4 insertions(+) diff --git a/docs/dns/_supported_methods.rst b/docs/dns/_supported_methods.rst index a2f903ec60..399358a609 100644 --- a/docs/dns/_supported_methods.rst +++ b/docs/dns/_supported_methods.rst @@ -2,6 +2,7 @@ Provider list zones list records create zone update zone create record update record delete zone delete record ===================== ========== ============ =========== =========== ============= ============= =========== ============= `Gandi DNS`_ yes yes yes yes yes yes yes yes +`Google DNS`_ yes yes yes no yes no yes yes `Host Virtual DNS`_ yes yes yes yes yes yes yes yes `Linode DNS`_ yes yes yes yes yes yes yes yes `Rackspace DNS`_ yes yes yes yes yes yes yes yes @@ -12,6 +13,7 @@ Provider list zones list records create zone update zone create rec ===================== ========== ============ =========== =========== ============= ============= =========== ============= .. _`Gandi DNS`: http://www.gandi.net/domain +.. _`Google DNS`: https://cloud.google.com/ .. _`Host Virtual DNS`: http://www.vr.org/ .. _`Linode DNS`: http://www.linode.com/ .. _`Rackspace DNS`: http://www.rackspace.com/ diff --git a/docs/dns/_supported_providers.rst b/docs/dns/_supported_providers.rst index 168e023d0c..a8c497c29b 100644 --- a/docs/dns/_supported_providers.rst +++ b/docs/dns/_supported_providers.rst @@ -2,6 +2,7 @@ Provider Documentation Provider constant Module Class Name ===================== ======================================= ================= ======================================= ============================= `Gandi DNS`_ GANDI :mod:`libcloud.dns.drivers.gandi` :class:`GandiDNSDriver` +`Google DNS`_ GOOGLE :mod:`libcloud.dns.drivers.google` :class:`GoogleDNSDriver` `Host Virtual DNS`_ :doc:`Click ` HOSTVIRTUAL :mod:`libcloud.dns.drivers.hostvirtual` :class:`HostVirtualDNSDriver` `Linode DNS`_ LINODE :mod:`libcloud.dns.drivers.linode` :class:`LinodeDNSDriver` `Rackspace DNS`_ RACKSPACE :mod:`libcloud.dns.drivers.rackspace` :class:`RackspaceDNSDriver` @@ -12,6 +13,7 @@ Provider Documentation Provider constant ===================== ======================================= ================= ======================================= ============================= .. _`Gandi DNS`: http://www.gandi.net/domain +.. _`Google DNS`: https://cloud.google.com/ .. _`Host Virtual DNS`: http://www.vr.org/ .. _`Linode DNS`: http://www.linode.com/ .. _`Rackspace DNS`: http://www.rackspace.com/ From 793e85440789aaf1ade81220789d1465a7c0f1ee Mon Sep 17 00:00:00 2001 From: Matthew Lehman Date: Tue, 29 Apr 2014 00:05:32 -0400 Subject: [PATCH 040/315] Change create_image API call params from Reboot to NoReboot and negated set logic Closes #284 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 0a57b0bb1e..8c1b046ba1 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1811,7 +1811,7 @@ def create_image(self, node, name, description=None, reboot=False, params = {'Action': 'CreateImage', 'InstanceId': node.id, 'Name': name, - 'Reboot': reboot} + 'NoReboot': not reboot} if description is not None: params['Description'] = description From 4f5a7e5f91ed7202799da0a2d78b36a03dfda61e Mon Sep 17 00:00:00 2001 From: Torsten Schlabach Date: Fri, 2 May 2014 14:14:29 +0200 Subject: [PATCH 041/315] Added OpenStack_1_1_NodeDriver to the 'API Docs' section Closes #288 Signed-off-by: Tomaz Muraus --- docs/compute/drivers/openstack.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/docs/compute/drivers/openstack.rst b/docs/compute/drivers/openstack.rst index cd8fc91bda..0d46fb79f0 100644 --- a/docs/compute/drivers/openstack.rst +++ b/docs/compute/drivers/openstack.rst @@ -188,6 +188,21 @@ service are two different services which listen on different ports. API Docs -------- +Please note that there are two API versions of the OpenStack Compute API, which +are supported by two different subclasses of the OpenStackNodeDriver. The +default is the 1.1 API. The 1.0 API is supported to be able to connect to +OpenStack instances which do not yet support the version 1.1 API. + +Compute 1.1 API version (current) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: libcloud.compute.drivers.openstack.OpenStack_1_1_NodeDriver + :members: + :inherited-members: + +Compute 1.0 API version (old installations) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. autoclass:: libcloud.compute.drivers.openstack.OpenStack_1_0_NodeDriver :members: :inherited-members: From b20c62a97167d71fe1fc3f7ba4bc2938476d7042 Mon Sep 17 00:00:00 2001 From: Jeff Moody Date: Mon, 5 May 2014 14:05:30 -0400 Subject: [PATCH 042/315] LIBCLOUD-550_DisplayName Added support for "displayname" CloudStack deployment parameter and removed automatic setting of "name" parameter. This will allow for deployment of instances without a hostname, but with a displayname, so that you can have multiple tenants using the same displayname with unique hostnames being generated from the instance UUID. Closes #289 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 8 ++++++++ libcloud/compute/drivers/cloudstack.py | 11 ++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 382c9873a2..3d295d92cc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -101,6 +101,14 @@ Compute (GITHUB-277) [Matt Lehman] +- Allow user to specify "displayname" attribute when creating a CloudStack node + by passing "ex_displayname" argument to the method. + + Also allow "name" argument to be empty (None). This way CloudStack + automatically uses Node's UUID for the name. + (GITHUB-289) + [Jeff Moody] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index cd0cc3ef83..0cd48e8e8e 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -700,6 +700,9 @@ def create_node(self, **kwargs): the node :type ex_security_groups: ``list`` of ``str`` + :keyword ex_displayname: String containing instance display name + :type ex_displayname: ``str`` + :rtype: :class:`.CloudStackNode` """ @@ -712,9 +715,7 @@ def create_node(self, **kwargs): return node def _create_args_to_params(self, node, **kwargs): - server_params = { - 'name': kwargs.get('name'), - } + server_params = {} # TODO: Refactor and use "kwarg_to_server_params" map name = kwargs.get('name', None) @@ -727,10 +728,14 @@ def _create_args_to_params(self, node, **kwargs): ex_key_name = kwargs.get('ex_keyname', None) ex_user_data = kwargs.get('ex_userdata', None) ex_security_groups = kwargs.get('ex_security_groups', None) + ex_displayname = kwargs.get('ex_displayname', None) if name: server_params['name'] = name + if ex_displayname: + server_params['displayname'] = ex_displayname + if size: server_params['serviceofferingid'] = size.id From 6f9b004019675a0c9c612cdaf2e45c3be42a4c74 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 6 May 2014 20:22:53 +0200 Subject: [PATCH 043/315] Deprecate "key" argument in the SSHClient class in favor of new "key_files" argument. Also add a new "key_material" argument. This argument can contain raw string version of a private key. Note 1: "key_files" and "key_material" arguments are mutually exclusive. Note 2: "key_material" argument is not supported in the ShellOutSSHClient. --- CHANGES.rst | 9 +++ libcloud/compute/base.py | 2 +- libcloud/compute/ssh.py | 87 ++++++++++++++++++------ libcloud/test/compute/test_ssh_client.py | 79 +++++++++++++++++++-- 4 files changed, 151 insertions(+), 26 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 3d295d92cc..e7af0ad6b6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -109,6 +109,15 @@ Compute (GITHUB-289) [Jeff Moody] +- Deprecate "key" argument in the SSHClient class in favor of new "key_files" + argument. + + Also add a new "key_material" argument. This argument can contain raw string + version of a private key. + + Note 1: "key_files" and "key_material" arguments are mutually exclusive. + Note 2: "key_material" argument is not supported in the ShellOutSSHClient. + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 6c34186f40..06a9249db6 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -1394,7 +1394,7 @@ def _connect_and_run_deployment_script(self, task, node, ssh_hostname, ssh_client = SSHClient(hostname=ssh_hostname, port=ssh_port, username=ssh_username, password=ssh_password, - key=ssh_key_file, + key_files=ssh_key_file, timeout=ssh_timeout) # Connect to the SSH server running on the node diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index 7fb1af2149..68a4da487f 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -14,8 +14,9 @@ # limitations under the License. """ -Wraps multiple ways to communicate over SSH +Wraps multiple ways to communicate over SSH. """ + have_paramiko = False try: @@ -32,6 +33,7 @@ import time import subprocess import logging +import warnings from os.path import split as psplit from os.path import join as pjoin @@ -50,7 +52,7 @@ class BaseSSHClient(object): """ def __init__(self, hostname, port=22, username='root', password=None, - key=None, timeout=None): + key=None, key_files=None, timeout=None): """ :type hostname: ``str`` :keyword hostname: Hostname or IP address to connect to. @@ -66,14 +68,24 @@ def __init__(self, hostname, port=22, username='root', password=None, to unlock a private key if a password protected key is used. - :type key: ``str`` or ``list`` - :keyword key: A list of paths to the private key files to use. + :param key: Deprecated in favor of ``key_files`` argument. + + :type key_files: ``str`` or ``list`` + :keyword key_files: A list of paths to the private key files to use. """ + if key is not None: + message = ('You are using deprecated "key" argument which has ' + 'been replaced with "key_files" argument') + warnings.warn(message, DeprecationWarning) + + # key_files has precedent + key_files = key if not key_files else key_files + self.hostname = hostname self.port = port self.username = username self.password = password - self.key = key + self.key_files = key_files self.timeout = timeout def connect(self): @@ -116,8 +128,8 @@ def delete(self, path): :type path: ``str`` :keyword path: File path on the remote node. - :return: True if the file has been successfully deleted, False - otherwise. + :return: True if the file has been successfully deleted, + False otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -139,8 +151,8 @@ def close(self): """ Shutdown connection to the remote node. - :return: True if the connection has been successfully closed, False - otherwise. + :return: True if the connection has been successfully closed, + False otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -165,7 +177,7 @@ class ParamikoSSHClient(BaseSSHClient): A SSH Client powered by Paramiko. """ def __init__(self, hostname, port=22, username='root', password=None, - key=None, timeout=None): + key=None, key_files=None, key_material=None, timeout=None): """ Authentication is always attempted in the following order: @@ -177,8 +189,19 @@ def __init__(self, hostname, port=22, username='root', password=None, - Plain username/password auth, if a password was given (if password is provided) """ - super(ParamikoSSHClient, self).__init__(hostname, port, username, - password, key, timeout) + if key_files and key_material: + raise ValueError(('key_files and key_material arguments are ' + 'mutually exclusive')) + + super(ParamikoSSHClient, self).__init__(hostname=hostname, port=port, + username=username, + password=password, + key=key, + key_files=key_files, + timeout=timeout) + + self.key_material = key_material + self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.logger = self._get_and_setup_logger() @@ -193,10 +216,13 @@ def connect(self): if self.password: conninfo['password'] = self.password - if self.key: - conninfo['key_filename'] = self.key + if self.key_files: + conninfo['key_filename'] = self.key_files + + if self.key_material: + conninfo['pkey'] = self._get_pkey_object(key=self.key_material) - if not self.password and not self.key: + if not self.password and not (self.key_files or self.key_material): conninfo['allow_agent'] = True conninfo['look_for_keys'] = True @@ -345,6 +371,23 @@ def close(self): self.client.close() return True + def _get_pkey_object(self, key): + """ + Try to detect private key type and return paramiko.PKey object. + """ + + for cls in [paramiko.RSAKey, paramiko.DSSKey, paramiko.ECDSAKey]: + try: + key = cls.from_private_key(StringIO(key)) + except paramiko.ssh_exception.SSHException: + # Invalid key, try other key type + pass + else: + return key + + msg = 'Invalid or unsupported key type' + raise paramiko.ssh_exception.SSHException(msg) + class ShellOutSSHClient(BaseSSHClient): """ @@ -355,9 +398,13 @@ class ShellOutSSHClient(BaseSSHClient): """ def __init__(self, hostname, port=22, username='root', password=None, - key=None, timeout=None): - super(ShellOutSSHClient, self).__init__(hostname, port, username, - password, key, timeout) + key=None, key_files=None, timeout=None): + super(ShellOutSSHClient, self).__init__(hostname=hostname, + port=port, username=username, + password=password, + key=key, + key_files=key_files, + timeout=timeout) if self.password: raise ValueError('ShellOutSSHClient only supports key auth') @@ -403,8 +450,8 @@ def close(self): def _get_base_ssh_command(self): cmd = ['ssh'] - if self.key: - cmd += ['-i', self.key] + if self.key_files: + cmd += ['-i', self.key_files] if self.timeout: cmd += ['-oConnectTimeout=%s' % (self.timeout)] diff --git a/libcloud/test/compute/test_ssh_client.py b/libcloud/test/compute/test_ssh_client.py index d996119841..857f4e2eea 100644 --- a/libcloud/test/compute/test_ssh_client.py +++ b/libcloud/test/compute/test_ssh_client.py @@ -20,20 +20,25 @@ import os import sys import tempfile -import unittest from libcloud import _init_once +from libcloud.test import LibcloudTestCase +from libcloud.test import unittest from libcloud.compute.ssh import ParamikoSSHClient from libcloud.compute.ssh import ShellOutSSHClient from libcloud.compute.ssh import have_paramiko +from libcloud.utils.py3 import StringIO + from mock import patch, Mock if not have_paramiko: ParamikoSSHClient = None # NOQA +else: + import paramiko -class ParamikoSSHClientTests(unittest.TestCase): +class ParamikoSSHClientTests(LibcloudTestCase): @patch('paramiko.SSHClient', Mock) def setUp(self): @@ -68,7 +73,7 @@ def test_create_with_password(self): self.assertLogMsg('Connecting to server') @patch('paramiko.SSHClient', Mock) - def test_create_with_key(self): + def test_deprecated_key_argument(self): conn_params = {'hostname': 'dummy.host.org', 'username': 'ubuntu', 'key': 'id_rsa'} @@ -84,6 +89,70 @@ def test_create_with_key(self): mock.client.connect.assert_called_once_with(**expected_conn) self.assertLogMsg('Connecting to server') + def test_key_files_and_key_material_arguments_are_mutual_exclusive(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_files': 'id_rsa', + 'key_material': 'key'} + + expected_msg = ('key_files and key_material arguments are mutually ' + 'exclusive') + self.assertRaisesRegexp(ValueError, expected_msg, + ParamikoSSHClient, **conn_params) + + @patch('paramiko.SSHClient', Mock) + def test_key_material_argument(self): + path = os.path.join(os.path.dirname(__file__), + 'fixtures', 'misc', 'dummy_rsa') + + with open(path, 'r') as fp: + private_key = fp.read() + + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_material': private_key} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + pkey = paramiko.RSAKey.from_private_key(StringIO(private_key)) + expected_conn = {'username': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'pkey': pkey, + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + + @patch('paramiko.SSHClient', Mock) + def test_key_material_argument_invalid_key(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_material': 'id_rsa'} + + mock = ParamikoSSHClient(**conn_params) + + expected_msg = 'Invalid or unsupported key type' + self.assertRaisesRegexp(paramiko.ssh_exception.SSHException, + expected_msg, mock.connect) + + @patch('paramiko.SSHClient', Mock) + def test_create_with_key(self): + conn_params = {'hostname': 'dummy.host.org', + 'username': 'ubuntu', + 'key_files': 'id_rsa'} + mock = ParamikoSSHClient(**conn_params) + mock.connect() + + expected_conn = {'username': 'ubuntu', + 'allow_agent': False, + 'hostname': 'dummy.host.org', + 'look_for_keys': False, + 'key_filename': 'id_rsa', + 'port': 22} + mock.client.connect.assert_called_once_with(**expected_conn) + self.assertLogMsg('Connecting to server') + @patch('paramiko.SSHClient', Mock) def test_create_with_password_and_key(self): conn_params = {'hostname': 'dummy.host.org', @@ -185,11 +254,11 @@ def assertLogMsg(self, expected_msg): if not ParamikoSSHClient: - class ParamikoSSHClientTests(unittest.TestCase): # NOQA + class ParamikoSSHClientTests(LibcloudTestCase): # NOQA pass -class ShellOutSSHClientTests(unittest.TestCase): +class ShellOutSSHClientTests(LibcloudTestCase): def test_password_auth_not_supported(self): try: From 940e10da215ff40d253013bcbf10b7382dc15466 Mon Sep 17 00:00:00 2001 From: earthgecko Date: Tue, 29 Apr 2014 16:50:07 +0100 Subject: [PATCH 044/315] [LIBCLOUD-547] linode.py labels too long with name use linode id instead Closes #287 --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/linode.py | 22 ++++++++++++---------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e7af0ad6b6..8a64df2fc1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -118,6 +118,11 @@ Compute Note 1: "key_files" and "key_material" arguments are mutually exclusive. Note 2: "key_material" argument is not supported in the ShellOutSSHClient. +- Use node id attribute instead of the name for the "lconfig" label in the + Linode driver. This way the label is never longer than 48 characters. + (GITHUB-287) + [earthgecko] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/linode.py b/libcloud/compute/drivers/linode.py index c58f56a1c1..4960f29472 100644 --- a/libcloud/compute/drivers/linode.py +++ b/libcloud/compute/drivers/linode.py @@ -284,16 +284,6 @@ def create_node(self, **kwargs): comments = "Created by Apache libcloud " if\ "ex_comment" not in kwargs else kwargs["ex_comment"] - # Labels - label = { - "lconfig": "[%s] Configuration Profile" % name, - "lroot": "[%s] %s Disk Image" % (name, image.name), - "lswap": "[%s] Swap Space" % name - } - for what in ["lconfig", "lroot", "lswap"]: - if what in kwargs: - label[what] = kwargs[what] - # Step 1: linode.create params = { "api_action": "linode.create", @@ -320,6 +310,18 @@ def create_node(self, **kwargs): } self.connection.request(API_ROOT, params=params) + # Step 1d. Labels + # use the linode id as the name can be up to 63 chars and the labels + # are limited to 48 chars + label = { + "lconfig": "[%s] Configuration Profile" % linode["id"], + "lroot": "[%s] %s Disk Image" % (linode["id"], image.name), + "lswap": "[%s] Swap Space" % linode["id"] + } + for what in ["lconfig", "lroot", "lswap"]: + if what in kwargs: + label[what] = kwargs[what] + # Step 2: linode.disk.createfromdistribution if not root: root = binascii.b2a_base64(os.urandom(8)).decode('ascii').strip() From 0a049c43fe565021879401c5b59333a387f9a240 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Beno=C3=AEt=20Canet?= Date: Tue, 15 Apr 2014 15:50:54 +0200 Subject: [PATCH 045/315] Add support for the Outscale SAS cloud. This commit introduce the Outscale SAS cloud an european cloud provider featuring an EC2 compatible API. Special care have been taken to leave room for Outscale INC an US based cloud using the same infrastructure but governed by US laws. Closes #285 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 + .../images/provider_logos/outscale.png | Bin 0 -> 8437 bytes .../_supported_methods_block_storage.rst | 2 + .../_supported_methods_image_management.rst | 2 + ..._supported_methods_key_pair_management.rst | 2 + docs/compute/_supported_methods_main.rst | 2 + docs/compute/_supported_providers.rst | 124 ++-- docs/compute/drivers/outscale_sas.rst | 31 + libcloud/compute/drivers/ec2.py | 596 +++++++++++++++++- libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 2 + libcloud/data/pricing.json | 69 ++ libcloud/test/compute/test_ec2.py | 93 +++ 13 files changed, 867 insertions(+), 62 deletions(-) create mode 100644 docs/_static/images/provider_logos/outscale.png create mode 100644 docs/compute/drivers/outscale_sas.rst diff --git a/CHANGES.rst b/CHANGES.rst index 8a64df2fc1..0e4ba16198 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -123,6 +123,10 @@ Compute (GITHUB-287) [earthgecko] +- Add a new driver for Outscale cloud (http://www.outscale.com). + (GITHUB-285) + [Benoit Canet] + Load Balancer ~~~~~~~~~~~~~ diff --git a/docs/_static/images/provider_logos/outscale.png b/docs/_static/images/provider_logos/outscale.png new file mode 100644 index 0000000000000000000000000000000000000000..c11c92a8f1c9ded5d593a04721f0da7ca875c0b5 GIT binary patch literal 8437 zcmaKSWmH_v(k?o9aF^ijGB|^426qdCyACc1o&<*^xVsN-!Gl{8EVx6E;1c{!&UfDP z-5>YdUVH87-K*-U>grn6)m`&mQ(XxMlL8X~0RcxvSza3f0g)5lS3?KD-=p{K4DcUv zsDdF>$ITY%3-Po;kg;~Nw4qgTf!Nt-+d!=SJw|LK5D<_V9dr$$hTvCXR&FlbkiRtC zelG5CZUh7gke@rm%E<;wYiVQW;3`Rf($P&%>tHQOZy*Hb1-r}H*gGf(c-rU$sOwq< zI9Z8W(}SdFCH%zT04_FA2(6!sv#XbwpCtXiaK+&Lzr#HAwErT3I!V(1k5YzUOTk!g+D+pK0z@dpcua}?Y|p6T$-n~t(dmF z;=g6Vza;7Hp-^`*9v)v`Uv6IkZZ}Um9zIb~(Z3k{{9JGfE-!yqD8!G;)r;XD1bG`T zD^CY^sDqm;?O#NQrJFZYk{+(<|4hNf{Xej-UjH@|++aL@5O*FvZr;CB`iBq<{{NG@ zxcrCO3#x7Nzxw_^fxUG7-EDZZZM@vPJ+0siXUp(cl)IRmrws(^=BexE=KPNpHSOJ? zZeI3o?zD0`Kw4HX#LB_-?*QAsIKW^r6<04P#MR11MP8B~j=}BVU@ayfA|#?Hq9`vb zFT&5qryw9AD8nbu%PRs@P>_=o;^qH`SKiIa+r`Ee`VX)5|MDvQSKhzQ;NlLKEN|oK z;A3O0=;`J{`>#ohIsA8BWdAGPzj>|yyDoD7m6r#u4A0-C{lAv_pCP#a{O$h7ap52T zaeW(CxW9YC9h)cZ>J|Zk+D=7YM%Qog$PCR;*I-d<8p|arzyDiD80$5G&}UXHLbaJR zvN+0VAaiCr+dYfzv-9;DSh$m1h)+?I58PW;R%t1tC3E*Jihmq?{9DOd`g4aNNg9@^ zdm{eSy~GWq^=8>?O7uqjB)D6L9GHm%v9PdM#08uI7 zl8KpQ*n^3TK#TLf1#D6e(>*U>q56XR-OWmbmRoNvA#(ri`f$ADb#=0X%EQ+`!$Tkm z>GR_3SKA#`wW6>qN@of(vY$*)%vRJPMy({hAt486iI7iZWNEN0CH-^BzQFcD{QIa) zgZH6)4<2K!5jEB4$cVDL49tDZzxrCS<$ns+ubowX?IC-zz%G(;E~8j!p&nUUr@Ov9 zD)@p|-vx7g8nQ{{aX35t+T$*JG8`Ir+%f};BD}xOf^O97Whb0}@!I1xg3mKj$R0E1 zgqjiZM~MZGgpi+@3M6~eHJoO6{Qc-6DSN8=_5ItRHJ<%y=oa3fde-1JmgKlM6KU*d z28A;c8CgqwABAq8rMn+?%^Pc!B~TDH$l98JT9`g*gA!ODH-OdtnRF7D2khDvbUrh| zDx&hrGdbk9(%$#kRmHA}f+qG#OwCTt`6|{4Ap_FgG$+gpXWK&JOme`Wi@aeNG3E1s za@rZIWt~E2q={qK!_`eK==#?LYpY&u*Zrz_%loM-tuI{7^%wWpOXdOS@5W*$aFF}` z(Va9GVQDn0G_gesQ#T6wjK^6*fo(klpM-x#4w9l9UFeY16vzx6#}03Q$}009gq1@q zL`VUVu)KPW2vAj5pNCA@d$T_@G5Vn90GNQ7gZ`+gudpyk<3%SYR*}b6oj=?^01*SY zG#+T#?&cD}ee*?<8si&@1Rk;(nlb;em&O_uI?_K<^H&1`qy&9?#SNf z9)S!I&Jx9OJ3{EO1L-?@)MHChXL>TS{xsO1(Cm?%P>-dEFP9*1Ed7V(u|{aBzG(?G`@p1pym!hHQa;7^1(SKWyvhn1yNC5i`*N zgS^a~vh>BBo?-8XT(>;aaHN?rX&$n9{f$}UmYgLYO1{P}c*)VZ3gseCA@n7qi%vkN zhfXmDDy1zfu!&K&R zLj&4ZdqTj!p{$ja)&qMmr`d`qAEnZvviW@#?W5C|E(Sz9v3R(GbUk3FM#Eq*NoC7> zm}(V}vV&Y%ty;J+$~Jf`G&Xn%OF$}+@*krv<#9i6R2y`!-hUqyC*?K?+Uw+)SGFWP zMyYrFP0InS%x|Fdi>pF0)sHLaQa|~1+1T7nHqzSB(KA0>xh0$>OsKDA;&Qvs;IPys zaekj8>B43FnD?n&Ll@;7gF0^cfdM~8fnA%M3LtMdqikXlPfyy4#pwU=_oLvd8+Hwf|v|vVxBca!{YgUc3?R&lSgL}E#cYfrxPRnnu$+2oO!Q}JP z3#CF5snCLwDn*IZZ&I>LiTTwipR$)jJ*Pp4n+``pmn*f$igbmkP|?zA9tDsTHJHPN zV{J+{=Lfmjq~6Z4Qg?Uuk27(!jwLo`oTd?aCDn4zXhB%mZuXj+)O4A42A~Onx%x>! z)%fy-B`xBL8`5USU|y1!a6xN2rRKGpMu8frWc5WdNJ{EybtHx5%^Imc6u^q@R%Wfy zSGU_2iKVf(z3p6*Pj9Hn2Kua8oKQ$}jhXjH6Dv3fru-Lwnz^@ttyKGTC&Y>JW>+GNSe$@N8sBcFx~&r56qR$s5lY*@V} zJALGgM**8X5T1W#&Xaq-%iVB}O-f1 zv59wfcBZ}%tTs0{k4fb;B4}}2>sol!QtbJVb{ z7t55`_rW$1+o@&hzE)(blyu`#t1t|88MngVS4T{f=D4r*-T4_@%9um zB}{ET{7VbD7Wm1BQM?UJXoiTPpc=ASp0ZB`GxzlIw5c5rVU?BH%^WdQirIwZ!777yLEZQjMc0*x7fh7xT zI(^E>%&0X#YEDiL{<`r5n((Oy=0Kn7t5-qz=wtC8A08eGHo8g!9I-e!IPM!OD%cx} zizAONVQua0fVWs8FJ`8U|G2b-MMp;u>ArKLC^{j-@oqvnC;M(fM%&n(Bkp^whSjrB zVG9ONn6o~Ia4EUMGrs;z*n`#33ZBuUK?*GC^%0b|%0(_Y;aB+>n@XvIH_7AKljn@) z+zOXo-dzE~$K0g&Fr_zdZQYcWf?8lT)l^7th{UZBN0)L#bS?^NSQXJ$-&718_7;}v z5X{|PYG`@80AMbM&f|NOw=3Jc_MQ+Ol@{aDc2E5z zNLmfd78OHHKWjD_mFrf-%0<7Zws&%J+7B|XoSl$I{dX zy(`{g{rCy>_w!9Kk@YO`Y#ZtKd~gh&o-a&erg4MyDqnW#3Lv{9qN1Rmov+ItGnET^ zCjFr1nrAE9o=fyzUKa?F3b_2Wcx*kZup2t21bi2cS=kbpm}!zLnVv^`LVy`r$VT*T zdR_vzAfrUL8s9q*D`a<65K8|fCx?6wAr zshOFXtA>_lR>NWN%szBSIvyujGMefTwZ?K<`W$KW=>74Y^ds@^&W=l?T3+R0QC?o& zcl3S_Zn~)zoplm2e8?6$a@E0TLqpR z&bdmb`_bB9gEk|#qj@=#Sw+V4ZZ2$mqE|SmkO3~^MteJT^+^+HRe#A9tkE(l1_rAa z1GnsAC#bqFe%~N@=!a2Q`!aknADJ8*I2&-V#H30V6f@3S7yRejSnb+s9_SN+zTXyMW%TrBkB5hc_u2_RBqU6o zfBg7yN17dzHi|$~#2cPf7SA6NpLW!jm$PY9>yt|FNcjo;NKa31%j>?0CF8S14)6V? zJ?{+6?1I7E1@@ypr>4FEaBq!e*P@}JfzXc+Df1oiHle26o0tL1Z>9}qH$k_j2NA7?(Mb2 zs*BKCC0nCK!qSjS!f1MSJlSd^BO_OEd0dgnw*3-^y$0hbZ~Q@v)PNiiE~w-Cj*5y( zZEtEIO*%=rZo3a${Wk^USP5ff5%vAQ5KlJ@?Rd#l0W*o~haHY{RYoju`5_{-v=$#5 zw%Pdj&a~WbBs%8z;rn#3-|v^2@2*sf&s&afq9P(Bb`K8N2S4+ne0uTl#2xqTlt4l5 zlLN5h@Y+hc{wHe|LR?&&ci{CA`D=asYecqXv~xOdU*BBOH%|{ot*x#8>UlLNi7z_x zvGW*lece)!Z@wKPt2aA^ioai~iwcAFkWK;0?T;_@_8s`Et$fXZl$=DwTlOrss>3_y z{XdDakj1iV$Un|`m6w-qFflQ?9bDawz1t0o$zCacPssALzPZU?^pf8F;j)W-f}N$Inp_2QG2j~qvO4{ z@SCO>UrqNLRV#*Z@XsIsE*BTqsMS!sOGGXuJe6uGEwxL|%ruG6WW`1h9*H)^8SL+8 zNN!}2uR~)lBgAO6(y+0!d#+`eMy;_!O4LlX1;FTteU935$r75bQ^?llwx{m}Y#%gJJ$8QRd(@Sm?bBfhnq z&41Lfc`2b>F5dM714-Gr5MvMQw{K;`!ox|uLAW&=K=>wzePD`;RzCb&SG4slfhL_3 zJh-nS*~}I{3eeKNU4B23A0gy@FhjkaVU#@;D*G%ZAkYwVNJ2sa#6b6<|70KlkIV^o z7q2{N%LVXA+RRD8eV*KrHqNt+#BTnNkuoY;mGuPZP7!jOj&2nm@`5&~se(1-Q-+0=Ea>umQ%FK!<+J++~)r{20tXo7XE?yFYN2YKMhb2{=^G@s*4 z**uKISFul9Z&!qq+uLR}aYrsmV^(oU+Sm1GjHT`Ma`=EX-?uW7UV;Mi|9I!v$PVMe-k z;t>b-MPO(kZgAE);lm7%qzoOKG90r=yzVad9pK05y3W?>V*$I#WhB?qQfg!$lO5=C zt4D1j&T~8`!^@YsHi<*6OWCk2w4?p_r^dG<$KS8hu=@^&u}>~H&LY8Mb;jYr z!4D)PB${8YxqN(llne|ExuJ)Fy6k%DUS17-mEzS>NbXB$QuH{Kw;B|p#o?WZr{85tR` z)pMmDW!xaUliBywGI;GzUjsM#BA>%S>HVp&8DgQb=5;h8vhKSl413Ji42oD{O^@WG921yKqf5^qR*xs~Yq6MK#V)Neu@{Th znK!6k&~7I?Ol|+1ZuDcVV}{Pr%fQ?74v<3SgQg&jvm6) z{8I4A2@m@$XmuR4@Vyn93G{W{ElAma&2S9i!xAgMqAfG=FcQJdAy$n~Zk(A|2gc}j zs12v4CLC&}qpYncFty`P2eb$uU0%Csc~xVBga3$sC62njfAX17fc}||PJFOjla#-I zzLLneWD%tZCLMMM)aF*%%S0McYR&CEld)S>)znDcFS&Jtv-qqA_1yPIpf zg~m)f-S6~e=bE`*R1o=hHF_(2Zgj$CO5PJu7 zFZamto?GAt7N3;fk<}siMg>lT0m!=$Wsb3qO@zF5!f{zp)xy!uhe9H%jOn8F^cZTZ zViMjT2jAc~*8UD1y!O=f58+{#_(NR5!Aj#89x=+LrlyvO`RUDzvQOUYITH}G zg^oI5r0?Of;((f&&jkeqX3$hpK=Bn*ga%7#x4gW375%*ZNpY3)sRu`OS=k@*oQrDN zd>99ZrwUmGWlT(r@J$O<&nzbjsxP`}$3pg0xEdLjA^QH)l@DW;%t~tFfUQ7s>8hyB zyQ5PeJ-*;$j5=amP=4!BuzvB!+_xT48ORV_ki&yv2iz%uKAeM2Gl; z2*!fWlVHPROO2;xCfwCGH;;$rr^fhy0~Dywhsi8%4osxrz)r%O7= z&IOZ7malOEF|b)>-p13UIWkTy@b?(ddS~me+|A(JX9X|&rPQC>yu48klqCJa^hQor zZH`M5Wd)sTnPO~X5e5keucgGVJB{du`sI0$*IiYwXRo}OFr7k zoD?SGe5m@cTlN;k_5RbU4X`|FSmtwQ{Bt_|JmH$Jsob7!oiHM2(&CD9EF{UU<}Q8$8@H8n z3HdK#&qDivlo;_NvdXa$vv#lVYMjM7ep7)$9w1G|*@#1-@SIjsc~2A+bm0J4ptNUp zHU|e_RSRtmP}!<^d8;r1@v&n!jtG(`&A{Is7{QqP99eVL5JQ(?ROPXUi}(|JE=@&O zjMe$(^_8TgoPg~oPc-Lj+moG)kJRqJi z`{LP`7J_;^&uE!wfG97BJ2MDg;MlJOX+Q?R9ntti;-6XKs5M&s6-?OB`CW!^*H)^= z`x=__$`hvQIZy!g=gsy6zslbP82%3R3|Z!e>zvL+F6%S^d{O zh_k7?1yMh!I*_=!Aj@Ab-#ZW2Ga1^*dno!n~GA(2C8FPsh*M#kF>H6v z=&?S1_uh+yM&i!N5fO+3U)4C1C>Y8JTfz~Wu+%GWFePv$Li2#)L5z<#N;s`WRd&Hy zJ)!-_;yr+v>~E#em3z@~EWi}Cud0Tvzm@q|I#=xP9}!d( L)a7erEyDf}`k%+0 literal 0 HcmV?d00001 diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index 23860dba34..ffbd0bc48f 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -47,6 +47,7 @@ Provider list volumes create volume destroy volume `OpenNebula (v3.8)`_ yes yes yes yes yes no no `OpenStack`_ yes yes yes yes yes no no `Opsource`_ no no no no no no no +`Outscale SAS`_ yes yes yes yes yes yes yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes yes no no `Rackspace Cloud (First Gen)`_ yes yes yes yes yes no no `RimuHosting`_ no no no no no no no @@ -106,6 +107,7 @@ Provider list volumes create volume destroy volume .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 787d8586e0..9dcb206834 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -47,6 +47,7 @@ Provider list images get image create image delete `OpenNebula (v3.8)`_ yes no no no no `OpenStack`_ yes yes no no no `Opsource`_ yes no no no no +`Outscale SAS`_ yes yes yes yes yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes no `Rackspace Cloud (First Gen)`_ yes yes yes yes no `RimuHosting`_ yes no no no no @@ -106,6 +107,7 @@ Provider list images get image create image delete .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index 2bc8fedd08..d37d9875d8 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -47,6 +47,7 @@ Provider list key pairs get key pair create key pai `OpenNebula (v3.8)`_ no no no no no no `OpenStack`_ no no no no no no `Opsource`_ no no no no no no +`Outscale SAS`_ yes yes yes yes no yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes no yes `Rackspace Cloud (First Gen)`_ no no no no no no `RimuHosting`_ no no no no no no @@ -106,6 +107,7 @@ Provider list key pairs get key pair create key pai .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index 84ffffdce2..c2f728eec1 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -47,6 +47,7 @@ Provider list nodes create node reboot node destroy `OpenNebula (v3.8)`_ yes yes yes yes yes yes no `OpenStack`_ yes no yes yes yes yes no `Opsource`_ yes yes yes yes yes yes yes +`Outscale SAS`_ yes yes yes yes yes yes yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes yes yes yes `Rackspace Cloud (First Gen)`_ yes yes yes yes yes yes yes `RimuHosting`_ yes yes yes yes yes yes yes @@ -106,6 +107,7 @@ Provider list nodes create node reboot node destroy .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index 0a2c888018..5960b8cccf 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -1,64 +1,65 @@ -===================================== =========================================== =================== ============================================== ==================================== -Provider Documentation Provider constant Module Class Name -===================================== =========================================== =================== ============================================== ==================================== -`Abiquo`_ ABIQUO :mod:`libcloud.compute.drivers.abiquo` :class:`AbiquoNodeDriver` -`Bluebox Blocks`_ BLUEBOX :mod:`libcloud.compute.drivers.bluebox` :class:`BlueboxNodeDriver` -`Brightbox`_ BRIGHTBOX :mod:`libcloud.compute.drivers.brightbox` :class:`BrightboxNodeDriver` -`CloudFrames`_ :doc:`Click ` CLOUDFRAMES :mod:`libcloud.compute.drivers.cloudframes` :class:`CloudFramesNodeDriver` -`CloudSigma (API v2.0)`_ :doc:`Click ` CLOUDSIGMA :mod:`libcloud.compute.drivers.cloudsigma` :class:`CloudSigmaNodeDriver` -`CloudStack`_ :doc:`Click ` CLOUDSTACK :mod:`libcloud.compute.drivers.cloudstack` :class:`CloudStackNodeDriver` -`Digital Ocean`_ DIGITAL_OCEAN :mod:`libcloud.compute.drivers.digitalocean` :class:`DigitalOceanNodeDriver` -`Dreamhost`_ DREAMHOST :mod:`libcloud.compute.drivers.dreamhost` :class:`DreamhostNodeDriver` -`Amazon EC2`_ :doc:`Click ` EC2 :mod:`libcloud.compute.drivers.ec2` :class:`EC2NodeDriver` -`Amazon EC2 (ap-northeast-1)`_ EC2_AP_NORTHEAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2APNENodeDriver` -`Amazon EC2 (ap-southeast-1)`_ EC2_AP_SOUTHEAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2APSENodeDriver` -`Amazon EC2 (ap-southeast-2)`_ EC2_AP_SOUTHEAST2 :mod:`libcloud.compute.drivers.ec2` :class:`EC2APSESydneyNodeDriver` -`Amazon EC2 (eu-west-1)`_ EC2_EU :mod:`libcloud.compute.drivers.ec2` :class:`EC2EUNodeDriver` -`Amazon EC2 (eu-west-1)`_ EC2_EU_WEST :mod:`libcloud.compute.drivers.ec2` :class:`EC2EUNodeDriver` -`Amazon EC2 (sa-east-1)`_ EC2_SA_EAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2SAEastNodeDriver` -`Amazon EC2`_ EC2_US_EAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2NodeDriver` -`Amazon EC2 (us-west-1)`_ EC2_US_WEST :mod:`libcloud.compute.drivers.ec2` :class:`EC2USWestNodeDriver` -`Amazon EC2 (us-west-2)`_ EC2_US_WEST_OREGON :mod:`libcloud.compute.drivers.ec2` :class:`EC2USWestOregonNodeDriver` -`Enomaly Elastic Computing Platform`_ ECP :mod:`libcloud.compute.drivers.ecp` :class:`ECPNodeDriver` -`ElasticHosts`_ ELASTICHOSTS :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsNodeDriver` -`ElasticHosts (syd-y)`_ ELASTICHOSTS_AU1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsAU1NodeDriver` -`ElasticHosts (tor-p)`_ ELASTICHOSTS_CA1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsCA1NodeDriver` -`ElasticHosts (cn-1)`_ ELASTICHOSTS_CN1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsCN1NodeDriver` -`ElasticHosts (lon-p)`_ ELASTICHOSTS_UK1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUK1NodeDriver` -`ElasticHosts (lon-b)`_ ELASTICHOSTS_UK2 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUK2NodeDriver` -`ElasticHosts (sat-p)`_ ELASTICHOSTS_US1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUS1NodeDriver` -`ElasticHosts (lax-p)`_ ELASTICHOSTS_US2 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUS2NodeDriver` -`ElasticHosts (sjc-c)`_ ELASTICHOSTS_US3 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUS3NodeDriver` -`Eucalyptus`_ EUCALYPTUS :mod:`libcloud.compute.drivers.ec2` :class:`EucNodeDriver` -`Exoscale`_ :doc:`Click ` EXOSCALE :mod:`libcloud.compute.drivers.exoscale` :class:`ExoscaleNodeDriver` -`Gandi`_ GANDI :mod:`libcloud.compute.drivers.gandi` :class:`GandiNodeDriver` -`Google Compute Engine`_ :doc:`Click ` GCE :mod:`libcloud.compute.drivers.gce` :class:`GCENodeDriver` -`GoGrid`_ GOGRID :mod:`libcloud.compute.drivers.gogrid` :class:`GoGridNodeDriver` -`HostVirtual`_ HOSTVIRTUAL :mod:`libcloud.compute.drivers.hostvirtual` :class:`HostVirtualNodeDriver` -`IBM SmartCloud Enterprise`_ IBM :mod:`libcloud.compute.drivers.ibm_sce` :class:`IBMNodeDriver` -`Ikoula`_ :doc:`Click ` IKOULA :mod:`libcloud.compute.drivers.ikoula` :class:`IkoulaNodeDriver` -`Joyent`_ JOYENT :mod:`libcloud.compute.drivers.joyent` :class:`JoyentNodeDriver` -`KTUCloud`_ KTUCLOUD :mod:`libcloud.compute.drivers.ktucloud` :class:`KTUCloudNodeDriver` -`Libvirt`_ :doc:`Click ` LIBVIRT :mod:`libcloud.compute.drivers.libvirt_driver` :class:`LibvirtNodeDriver` -`Linode`_ LINODE :mod:`libcloud.compute.drivers.linode` :class:`LinodeNodeDriver` -`NephoScale`_ NEPHOSCALE :mod:`libcloud.compute.drivers.nephoscale` :class:`NephoscaleNodeDriver` -`Nimbus`_ :doc:`Click ` NIMBUS :mod:`libcloud.compute.drivers.ec2` :class:`NimbusNodeDriver` -`Ninefold`_ NINEFOLD :mod:`libcloud.compute.drivers.ninefold` :class:`NinefoldNodeDriver` -`OpenNebula (v3.8)`_ OPENNEBULA :mod:`libcloud.compute.drivers.opennebula` :class:`OpenNebulaNodeDriver` -`OpenStack`_ :doc:`Click ` OPENSTACK :mod:`libcloud.compute.drivers.openstack` :class:`OpenStackNodeDriver` -`Opsource`_ OPSOURCE :mod:`libcloud.compute.drivers.opsource` :class:`OpsourceNodeDriver` -`Rackspace Cloud (Next Gen)`_ :doc:`Click ` RACKSPACE :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceNodeDriver` -`Rackspace Cloud (First Gen)`_ RACKSPACE_FIRST_GEN :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceFirstGenNodeDriver` -`RimuHosting`_ RIMUHOSTING :mod:`libcloud.compute.drivers.rimuhosting` :class:`RimuHostingNodeDriver` -`ServerLove`_ SERVERLOVE :mod:`libcloud.compute.drivers.serverlove` :class:`ServerLoveNodeDriver` -`skalicloud`_ SKALICLOUD :mod:`libcloud.compute.drivers.skalicloud` :class:`SkaliCloudNodeDriver` -`SoftLayer`_ SOFTLAYER :mod:`libcloud.compute.drivers.softlayer` :class:`SoftLayerNodeDriver` -`vCloud`_ TERREMARK :mod:`libcloud.compute.drivers.vcloud` :class:`TerremarkDriver` -`VCL`_ VCL :mod:`libcloud.compute.drivers.vcl` :class:`VCLNodeDriver` -`vCloud`_ :doc:`Click ` VCLOUD :mod:`libcloud.compute.drivers.vcloud` :class:`VCloudNodeDriver` -`Voxel VoxCLOUD`_ VOXEL :mod:`libcloud.compute.drivers.voxel` :class:`VoxelNodeDriver` -`vps.net`_ VPSNET :mod:`libcloud.compute.drivers.vpsnet` :class:`VPSNetNodeDriver` -===================================== =========================================== =================== ============================================== ==================================== +===================================== ============================================ =================== ============================================== ==================================== +Provider Documentation Provider constant Module Class Name +===================================== ============================================ =================== ============================================== ==================================== +`Abiquo`_ ABIQUO :mod:`libcloud.compute.drivers.abiquo` :class:`AbiquoNodeDriver` +`Bluebox Blocks`_ BLUEBOX :mod:`libcloud.compute.drivers.bluebox` :class:`BlueboxNodeDriver` +`Brightbox`_ BRIGHTBOX :mod:`libcloud.compute.drivers.brightbox` :class:`BrightboxNodeDriver` +`CloudFrames`_ :doc:`Click ` CLOUDFRAMES :mod:`libcloud.compute.drivers.cloudframes` :class:`CloudFramesNodeDriver` +`CloudSigma (API v2.0)`_ :doc:`Click ` CLOUDSIGMA :mod:`libcloud.compute.drivers.cloudsigma` :class:`CloudSigmaNodeDriver` +`CloudStack`_ :doc:`Click ` CLOUDSTACK :mod:`libcloud.compute.drivers.cloudstack` :class:`CloudStackNodeDriver` +`Digital Ocean`_ DIGITAL_OCEAN :mod:`libcloud.compute.drivers.digitalocean` :class:`DigitalOceanNodeDriver` +`Dreamhost`_ DREAMHOST :mod:`libcloud.compute.drivers.dreamhost` :class:`DreamhostNodeDriver` +`Amazon EC2`_ :doc:`Click ` EC2 :mod:`libcloud.compute.drivers.ec2` :class:`EC2NodeDriver` +`Amazon EC2 (ap-northeast-1)`_ EC2_AP_NORTHEAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2APNENodeDriver` +`Amazon EC2 (ap-southeast-1)`_ EC2_AP_SOUTHEAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2APSENodeDriver` +`Amazon EC2 (ap-southeast-2)`_ EC2_AP_SOUTHEAST2 :mod:`libcloud.compute.drivers.ec2` :class:`EC2APSESydneyNodeDriver` +`Amazon EC2 (eu-west-1)`_ EC2_EU :mod:`libcloud.compute.drivers.ec2` :class:`EC2EUNodeDriver` +`Amazon EC2 (eu-west-1)`_ EC2_EU_WEST :mod:`libcloud.compute.drivers.ec2` :class:`EC2EUNodeDriver` +`Amazon EC2 (sa-east-1)`_ EC2_SA_EAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2SAEastNodeDriver` +`Amazon EC2`_ EC2_US_EAST :mod:`libcloud.compute.drivers.ec2` :class:`EC2NodeDriver` +`Amazon EC2 (us-west-1)`_ EC2_US_WEST :mod:`libcloud.compute.drivers.ec2` :class:`EC2USWestNodeDriver` +`Amazon EC2 (us-west-2)`_ EC2_US_WEST_OREGON :mod:`libcloud.compute.drivers.ec2` :class:`EC2USWestOregonNodeDriver` +`Enomaly Elastic Computing Platform`_ ECP :mod:`libcloud.compute.drivers.ecp` :class:`ECPNodeDriver` +`ElasticHosts`_ ELASTICHOSTS :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsNodeDriver` +`ElasticHosts (syd-y)`_ ELASTICHOSTS_AU1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsAU1NodeDriver` +`ElasticHosts (tor-p)`_ ELASTICHOSTS_CA1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsCA1NodeDriver` +`ElasticHosts (cn-1)`_ ELASTICHOSTS_CN1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsCN1NodeDriver` +`ElasticHosts (lon-p)`_ ELASTICHOSTS_UK1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUK1NodeDriver` +`ElasticHosts (lon-b)`_ ELASTICHOSTS_UK2 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUK2NodeDriver` +`ElasticHosts (sat-p)`_ ELASTICHOSTS_US1 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUS1NodeDriver` +`ElasticHosts (lax-p)`_ ELASTICHOSTS_US2 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUS2NodeDriver` +`ElasticHosts (sjc-c)`_ ELASTICHOSTS_US3 :mod:`libcloud.compute.drivers.elastichosts` :class:`ElasticHostsUS3NodeDriver` +`Eucalyptus`_ EUCALYPTUS :mod:`libcloud.compute.drivers.ec2` :class:`EucNodeDriver` +`Exoscale`_ :doc:`Click ` EXOSCALE :mod:`libcloud.compute.drivers.exoscale` :class:`ExoscaleNodeDriver` +`Gandi`_ GANDI :mod:`libcloud.compute.drivers.gandi` :class:`GandiNodeDriver` +`Google Compute Engine`_ :doc:`Click ` GCE :mod:`libcloud.compute.drivers.gce` :class:`GCENodeDriver` +`GoGrid`_ GOGRID :mod:`libcloud.compute.drivers.gogrid` :class:`GoGridNodeDriver` +`HostVirtual`_ HOSTVIRTUAL :mod:`libcloud.compute.drivers.hostvirtual` :class:`HostVirtualNodeDriver` +`IBM SmartCloud Enterprise`_ IBM :mod:`libcloud.compute.drivers.ibm_sce` :class:`IBMNodeDriver` +`Ikoula`_ :doc:`Click ` IKOULA :mod:`libcloud.compute.drivers.ikoula` :class:`IkoulaNodeDriver` +`Joyent`_ JOYENT :mod:`libcloud.compute.drivers.joyent` :class:`JoyentNodeDriver` +`KTUCloud`_ KTUCLOUD :mod:`libcloud.compute.drivers.ktucloud` :class:`KTUCloudNodeDriver` +`Libvirt`_ :doc:`Click ` LIBVIRT :mod:`libcloud.compute.drivers.libvirt_driver` :class:`LibvirtNodeDriver` +`Linode`_ LINODE :mod:`libcloud.compute.drivers.linode` :class:`LinodeNodeDriver` +`NephoScale`_ NEPHOSCALE :mod:`libcloud.compute.drivers.nephoscale` :class:`NephoscaleNodeDriver` +`Nimbus`_ :doc:`Click ` NIMBUS :mod:`libcloud.compute.drivers.ec2` :class:`NimbusNodeDriver` +`Ninefold`_ NINEFOLD :mod:`libcloud.compute.drivers.ninefold` :class:`NinefoldNodeDriver` +`OpenNebula (v3.8)`_ OPENNEBULA :mod:`libcloud.compute.drivers.opennebula` :class:`OpenNebulaNodeDriver` +`OpenStack`_ :doc:`Click ` OPENSTACK :mod:`libcloud.compute.drivers.openstack` :class:`OpenStackNodeDriver` +`Opsource`_ OPSOURCE :mod:`libcloud.compute.drivers.opsource` :class:`OpsourceNodeDriver` +`Outscale SAS`_ :doc:`Click ` OUTSCALE_SAS :mod:`libcloud.compute.drivers.ec2` :class:`OutscaleSASNodeDriver` +`Rackspace Cloud (Next Gen)`_ :doc:`Click ` RACKSPACE :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceNodeDriver` +`Rackspace Cloud (First Gen)`_ RACKSPACE_FIRST_GEN :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceFirstGenNodeDriver` +`RimuHosting`_ RIMUHOSTING :mod:`libcloud.compute.drivers.rimuhosting` :class:`RimuHostingNodeDriver` +`ServerLove`_ SERVERLOVE :mod:`libcloud.compute.drivers.serverlove` :class:`ServerLoveNodeDriver` +`skalicloud`_ SKALICLOUD :mod:`libcloud.compute.drivers.skalicloud` :class:`SkaliCloudNodeDriver` +`SoftLayer`_ SOFTLAYER :mod:`libcloud.compute.drivers.softlayer` :class:`SoftLayerNodeDriver` +`vCloud`_ TERREMARK :mod:`libcloud.compute.drivers.vcloud` :class:`TerremarkDriver` +`VCL`_ VCL :mod:`libcloud.compute.drivers.vcl` :class:`VCLNodeDriver` +`vCloud`_ :doc:`Click ` VCLOUD :mod:`libcloud.compute.drivers.vcloud` :class:`VCloudNodeDriver` +`Voxel VoxCLOUD`_ VOXEL :mod:`libcloud.compute.drivers.voxel` :class:`VoxelNodeDriver` +`vps.net`_ VPSNET :mod:`libcloud.compute.drivers.vpsnet` :class:`VPSNetNodeDriver` +===================================== ============================================ =================== ============================================== ==================================== .. _`Abiquo`: http://www.abiquo.com/ .. _`Bluebox Blocks`: http://bluebox.net @@ -106,6 +107,7 @@ Provider Documentation .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/drivers/outscale_sas.rst b/docs/compute/drivers/outscale_sas.rst new file mode 100644 index 0000000000..03e7d4e3c2 --- /dev/null +++ b/docs/compute/drivers/outscale_sas.rst @@ -0,0 +1,31 @@ +Outscale Driver Documentation +============================= + +`Outscale SAS`_ provides an IaaS platform allowing +developers to benefit from all the flexibility of the Cloud. +This IaaS platform relies on TINA OS, its Cloud manager which purpose is to +provide great performances on the Cloud. +TINA OS is an own developed software with APIs compatible with AWS EC2 (TM). + +.. figure:: /_static/images/provider_logos/outscale.png + :align: center + :width: 300 + :target: https://www.outscale.com/ + +Outscale users can start virtual machines in the following regions: + +* EU West (Paris France) Region +* US East (Boston US) Region +* (Soon) US East (New Jersey) Region +* (Soon) Asia (Hong Kong) Region + +Outscale SAS is an european company: prices are in €. + +API Docs +-------- + +.. autoclass:: libcloud.compute.drivers.ec2.OutscaleNodeDriver + :members: + :inherited-members: + +.. _`Outscale SAS`: https://www.outscale.com/ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 8c1b046ba1..494336936d 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -14,7 +14,7 @@ # limitations under the License. """ -Amazon EC2, Eucalyptus and Nimbus drivers. +Amazon EC2, Eucalyptus, Nimbus and Outscale drivers. """ import re @@ -47,6 +47,8 @@ 'API_VERSION', 'NAMESPACE', 'INSTANCE_TYPES', + 'OUTSCALE_INSTANCE_TYPES', + 'OUTSCALE_SAS_REGION_DETAILS', 'DEFAULT_EUCA_API_VERSION', 'EUCA_NAMESPACE', @@ -56,6 +58,8 @@ 'NimbusNodeDriver', 'EucNodeDriver', + 'OutscaleSASNodeDriver', + 'EC2NodeLocation', 'EC2ReservedNode', 'EC2Network', @@ -622,6 +626,340 @@ } } + +""" +Sizes must be hardcoded because Outscale doesn't provide an API to fetch them. +Outscale cloud instances share some names with EC2 but have differents +specifications so declare them in another constant. +""" +OUTSCALE_INSTANCE_TYPES = { + 't1.micro': { + 'id': 't1.micro', + 'name': 'Micro Instance', + 'ram': 615, + 'disk': 0, + 'bandwidth': None + }, + 'm1.small': { + 'id': 'm1.small', + 'name': 'Standard Small Instance', + 'ram': 1740, + 'disk': 150, + 'bandwidth': None + }, + 'm1.medium': { + 'id': 'm1.medium', + 'name': 'Standard Medium Instance', + 'ram': 3840, + 'disk': 420, + 'bandwidth': None + }, + 'm1.large': { + 'id': 'm1.large', + 'name': 'Standard Large Instance', + 'ram': 7680, + 'disk': 840, + 'bandwidth': None + }, + 'm1.xlarge': { + 'id': 'm1.xlarge', + 'name': 'Standard Extra Large Instance', + 'ram': 15360, + 'disk': 1680, + 'bandwidth': None + }, + 'c1.medium': { + 'id': 'c1.medium', + 'name': 'Compute Optimized Medium Instance', + 'ram': 1740, + 'disk': 340, + 'bandwidth': None + }, + 'c1.xlarge': { + 'id': 'c1.xlarge', + 'name': 'Compute Optimized Extra Large Instance', + 'ram': 7168, + 'disk': 1680, + 'bandwidth': None + }, + 'c3.large': { + 'id': 'c3.large', + 'name': 'Compute Optimized Large Instance', + 'ram': 3840, + 'disk': 32, + 'bandwidth': None + }, + 'c3.xlarge': { + 'id': 'c3.xlarge', + 'name': 'Compute Optimized Extra Large Instance', + 'ram': 7168, + 'disk': 80, + 'bandwidth': None + }, + 'c3.2xlarge': { + 'id': 'c3.2xlarge', + 'name': 'Compute Optimized Double Extra Large Instance', + 'ram': 15359, + 'disk': 160, + 'bandwidth': None + }, + 'c3.4xlarge': { + 'id': 'c3.4xlarge', + 'name': 'Compute Optimized Quadruple Extra Large Instance', + 'ram': 30720, + 'disk': 320, + 'bandwidth': None + }, + 'c3.8xlarge': { + 'id': 'c3.8xlarge', + 'name': 'Compute Optimized Eight Extra Large Instance', + 'ram': 61440, + 'disk': 640, + 'bandwidth': None + }, + 'm2.xlarge': { + 'id': 'm2.xlarge', + 'name': 'High Memory Extra Large Instance', + 'ram': 17510, + 'disk': 420, + 'bandwidth': None + }, + 'm2.2xlarge': { + 'id': 'm2.2xlarge', + 'name': 'High Memory Double Extra Large Instance', + 'ram': 35020, + 'disk': 840, + 'bandwidth': None + }, + 'm2.4xlarge': { + 'id': 'm2.4xlarge', + 'name': 'High Memory Quadruple Extra Large Instance', + 'ram': 70042, + 'disk': 1680, + 'bandwidth': None + }, + 'nv1.small': { + 'id': 'nv1.small', + 'name': 'GPU Small Instance', + 'ram': 1739, + 'disk': 150, + 'bandwidth': None + }, + 'nv1.medium': { + 'id': 'nv1.medium', + 'name': 'GPU Medium Instance', + 'ram': 3839, + 'disk': 420, + 'bandwidth': None + }, + 'nv1.large': { + 'id': 'nv1.large', + 'name': 'GPU Large Instance', + 'ram': 7679, + 'disk': 840, + 'bandwidth': None + }, + 'nv1.xlarge': { + 'id': 'nv1.xlarge', + 'name': 'GPU Extra Large Instance', + 'ram': 15358, + 'disk': 1680, + 'bandwidth': None + }, + 'g2.2xlarge': { + 'id': 'g2.2xlarge', + 'name': 'GPU Double Extra Large Instance', + 'ram': 15360, + 'disk': 60, + 'bandwidth': None + }, + 'cc1.4xlarge': { + 'id': 'cc1.4xlarge', + 'name': 'Cluster Compute Quadruple Extra Large Instance', + 'ram': 24576, + 'disk': 1680, + 'bandwidth': None + }, + 'cc2.8xlarge': { + 'id': 'cc2.8xlarge', + 'name': 'Cluster Compute Eight Extra Large Instance', + 'ram': 65536, + 'disk': 3360, + 'bandwidth': None + }, + 'hi1.xlarge': { + 'id': 'hi1.xlarge', + 'name': 'High Storage Extra Large Instance', + 'ram': 15361, + 'disk': 1680, + 'bandwidth': None + }, + 'm3.xlarge': { + 'id': 'm3.xlarge', + 'name': 'High Storage Optimized Extra Large Instance', + 'ram': 15357, + 'disk': 0, + 'bandwidth': None + }, + 'm3.2xlarge': { + 'id': 'm3.2xlarge', + 'name': 'High Storage Optimized Double Extra Large Instance', + 'ram': 30720, + 'disk': 0, + 'bandwidth': None + }, + 'm3s.xlarge': { + 'id': 'm3s.xlarge', + 'name': 'High Storage Optimized Extra Large Instance', + 'ram': 15359, + 'disk': 0, + 'bandwidth': None + }, + 'm3s.2xlarge': { + 'id': 'm3s.2xlarge', + 'name': 'High Storage Optimized Double Extra Large Instance', + 'ram': 30719, + 'disk': 0, + 'bandwidth': None + }, + 'cr1.8xlarge': { + 'id': 'cr1.8xlarge', + 'name': 'Memory Optimized Eight Extra Large Instance', + 'ram': 249855, + 'disk': 240, + 'bandwidth': None + }, + 'os1.2xlarge': { + 'id': 'os1.2xlarge', + 'name': 'Memory Optimized, High Storage, Passthrough NIC Double Extra ' + 'Large Instance', + 'ram': 65536, + 'disk': 60, + 'bandwidth': None + }, + 'os1.4xlarge': { + 'id': 'os1.4xlarge', + 'name': 'Memory Optimized, High Storage, Passthrough NIC Quadruple Ext' + 'ra Large Instance', + 'ram': 131072, + 'disk': 120, + 'bandwidth': None + }, + 'os1.8xlarge': { + 'id': 'os1.8xlarge', + 'name': 'Memory Optimized, High Storage, Passthrough NIC Eight Extra L' + 'arge Instance', + 'ram': 249856, + 'disk': 500, + 'bandwidth': None + }, + 'oc1.4xlarge': { + 'id': 'oc1.4xlarge', + 'name': 'Outscale Quadruple Extra Large Instance', + 'ram': 24575, + 'disk': 1680, + 'bandwidth': None + }, + 'oc2.8xlarge': { + 'id': 'oc2.8xlarge', + 'name': 'Outscale Eight Extra Large Instance', + 'ram': 65535, + 'disk': 3360, + 'bandwidth': None + } +} + + +""" +The function manipulating Outscale cloud regions will be overriden because +Outscale instances types are in a separate dict so also declare Outscale cloud +regions in another constant. +""" +OUTSCALE_SAS_REGION_DETAILS = { + 'eu-west-3': { + 'endpoint': 'api-ppd.outscale.com', + 'api_name': 'osc_sas_eu_west_3', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'eu-west-1': { + 'endpoint': 'api.eu-west-1.outscale.com', + 'api_name': 'osc_sas_eu_west_1', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'us-east-1': { + 'endpoint': 'api.us-east-1.outscale.com', + 'api_name': 'osc_sas_us_east_1', + 'country': 'USA', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + } +} + + """ Define the extra dictionary for specific resources """ @@ -4261,3 +4599,259 @@ def ex_create_tags(self, resource, tags): @inherits: :class:`EC2NodeDriver.ex_create_tags` """ pass + + +class OutscaleConnection(EC2Connection): + """ + Connection class for Outscale + """ + + host = None + + +class OutscaleNodeDriver(BaseEC2NodeDriver): + """ + Base Outscale FCU node driver. + + Outscale per provider driver classes inherit from it. + """ + + connectionCls = OutscaleConnection + name = 'Outscale' + website = 'http://www.outscale.com' + path = '/' + + NODE_STATE_MAP = { + 'pending': NodeState.PENDING, + 'running': NodeState.RUNNING, + 'shutting-down': NodeState.UNKNOWN, + 'terminated': NodeState.TERMINATED, + 'stopped': NodeState.STOPPED + } + + def create_node(self, **kwargs): + """ + Create a new Outscale node. The ex_iamprofile keyword is not supported. + + @inherits: :class:`BaseEC2NodeDriver.create_node` + + :keyword ex_keyname: The name of the key pair + :type ex_keyname: ``str`` + + :keyword ex_userdata: User data + :type ex_userdata: ``str`` + + :keyword ex_security_groups: A list of names of security groups to + assign to the node. + :type ex_security_groups: ``list`` + + :keyword ex_metadata: Key/Value metadata to associate with a node + :type ex_metadata: ``dict`` + + :keyword ex_mincount: Minimum number of instances to launch + :type ex_mincount: ``int`` + + :keyword ex_maxcount: Maximum number of instances to launch + :type ex_maxcount: ``int`` + + :keyword ex_clienttoken: Unique identifier to ensure idempotency + :type ex_clienttoken: ``str`` + + :keyword ex_blockdevicemappings: ``list`` of ``dict`` block device + mappings. + :type ex_blockdevicemappings: ``list`` of ``dict`` + + :keyword ex_ebs_optimized: EBS-Optimized if True + :type ex_ebs_optimized: ``bool`` + """ + if 'ex_iamprofile' in kwargs: + raise NotImplementedError("ex_iamprofile not implemented") + return super(OutscaleNodeDriver, self).create_node(**kwargs) + + def ex_create_network(self, cidr_block, name=None): + """ + Create a network/VPC. Outscale does not support instance_tenancy. + + :param cidr_block: The CIDR block assigned to the network + :type cidr_block: ``str`` + + :param name: An optional name for the network + :type name: ``str`` + + :return: Dictionary of network properties + :rtype: ``dict`` + """ + return super(OutscaleNodeDriver, self).ex_create_network(cidr_block, + name=name) + + def ex_modify_instance_attribute(self, node, disable_api_termination=None, + ebs_optimized=None, group_id=None, + source_dest_check=None, user_data=None, + instance_type=None): + """ + Modify node attributes. + Ouscale support the following attributes: + 'DisableApiTermination.Value', 'EbsOptimized', 'GroupId.n', + 'SourceDestCheck.Value', 'UserData.Value', + 'InstanceType.Value' + + :param node: Node instance + :type node: :class:`Node` + + :param attributes: Dictionary with node attributes + :type attributes: ``dict`` + + :return: True on success, False otherwise. + :rtype: ``bool`` + """ + attributes = {} + + if disable_api_termination is not None: + attributes['DisableApiTermination.Value'] = disable_api_termination + if ebs_optimized is not None: + attributes['EbsOptimized'] = ebs_optimized + if group_id is not None: + attributes['GroupId.n'] = group_id + if source_dest_check is not None: + attributes['SourceDestCheck.Value'] = source_dest_check + if user_data is not None: + attributes['UserData.Value'] = user_data + if instance_type is not None: + attributes['InstanceType.Value'] = instance_type + + return super(OutscaleNodeDriver, self).ex_modify_instance_attribute( + node, attributes) + + def ex_register_image(self, name, description=None, architecture=None, + root_device_name=None, block_device_mapping=None): + """ + Registers a Machine Image based off of an EBS-backed instance. + Can also be used to create images from snapshots. + + Outscale does not support image_location, kernel_id and ramdisk_id. + + :param name: The name for the AMI being registered + :type name: ``str`` + + :param description: The description of the AMI (optional) + :type description: ``str`` + + :param architecture: The architecture of the AMI (i386/x86_64) + (optional) + :type architecture: ``str`` + + :param root_device_name: The device name for the root device + Required if registering a EBS-backed AMI + :type root_device_name: ``str`` + + :param block_device_mapping: A dictionary of the disk layout + (optional) + :type block_device_mapping: ``dict`` + + :rtype: :class:`NodeImage` + """ + return super(OutscaleNodeDriver, self).ex_register_image( + name, description=description, architecture=architecture, + root_device_name=root_device_name, + block_device_mapping=block_device_mapping) + + def ex_copy_image(self, source_region, image, name=None, description=None): + """ + Outscale does not support copying images. + + @inherits: :class:`EC2NodeDriver.ex_copy_image` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_get_limits(self): + """ + Outscale does not support getting limits. + + @inherits: :class:`EC2NodeDriver.ex_get_limits` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_create_network_interface(self, subnet, name=None, + description=None, + private_ip_address=None): + """ + Outscale does not support creating a network interface within a VPC. + + @inherits: :class:`EC2NodeDriver.ex_create_network_interface` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_delete_network_interface(self, network_interface): + """ + Outscale does not support deleting a network interface within a VPC. + + @inherits: :class:`EC2NodeDriver.ex_delete_network_interface` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_attach_network_interface_to_node(self, network_interface, + node, device_index): + """ + Outscale does not support attaching a network interface. + + @inherits: :class:`EC2NodeDriver.ex_attach_network_interface_to_node` + """ + raise NotImplementedError(self._not_implemented_msg) + + def ex_detach_network_interface(self, attachment_id, force=False): + """ + Outscale does not support detaching a network interface + + @inherits: :class:`EC2NodeDriver.ex_detach_network_interface` + """ + raise NotImplementedError(self._not_implemented_msg) + + +class OutscaleSASNodeDriver(OutscaleNodeDriver): + """ + Outscale SAS node driver + """ + name = 'Outscale SAS' + type = Provider.OUTSCALE_SAS + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', **kwargs): + if hasattr(self, '_region'): + region = self._region + + if region not in OUTSCALE_SAS_REGION_DETAILS.keys(): + raise ValueError('Invalid region: %s' % (region)) + + details = OUTSCALE_SAS_REGION_DETAILS[region] + self.region_name = region + self.api_name = details['api_name'] + self.country = details['country'] + + self.connectionCls.host = details['endpoint'] + + self._not_implemented_msg =\ + 'This method is not supported in the Outscale driver' + + super(OutscaleNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) + + def list_sizes(self, location=None): + """ + List available instance flavors/sizes + + This override the EC2 default method in order to use Outscale infos. + + :rtype: ``list`` of :class:`NodeSize` + """ + available_types =\ + OUTSCALE_SAS_REGION_DETAILS[self.region_name]['instance_types'] + sizes = [] + + for instance_type in available_types: + attributes = OUTSCALE_INSTANCE_TYPES[instance_type] + attributes = copy.deepcopy(attributes) + price = self._get_size_price(size_id=instance_type) + attributes.update({'price': price}) + sizes.append(NodeSize(driver=self, **attributes)) + return sizes diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 8cc5a3c1c5..87a46b7214 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -139,6 +139,8 @@ ('libcloud.compute.drivers.exoscale', 'ExoscaleNodeDriver'), Provider.IKOULA: ('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'), + Provider.OUTSCALE_SAS: + ('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index b736b2f7cf..561474e0bc 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -74,6 +74,7 @@ class Provider(object): :cvar NEPHOSCALE: NephoScale driver :cvar EXOSCALE: Exoscale driver. :cvar IKOULA: Ikoula driver. + :cvar OUTSCALE_SAS: Outscale SAS driver. """ DUMMY = 'dummy' EC2 = 'ec2_us_east' @@ -117,6 +118,7 @@ class Provider(object): CLOUDFRAMES = 'cloudframes' EXOSCALE = 'exoscale' IKOULA = 'ikoula' + OUTSCALE_SAS = 'outscale_sas' # Deprecated constants which are still supported EC2_US_EAST = 'ec2_us_east' diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 9e08d3027f..6034ec49e4 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -421,6 +421,75 @@ "minimum": 15, "maximum": 200, "low": 50 + }, + "osc_sas_eu_west_3": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.130", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.460", + "m2.2xlarge": "0.920", + "m2.4xlarge": "1.840", + "nv1.small": "5.220", + "nv1.medium": "5.310", + "nv1.large": "5.490", + "nv1.xlarge": "5.860", + "cc1.4xlarge": "1.460", + "cc2.8xlarge": "2.700", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.750", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" + }, + "osc_sas_eu_west_1": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.130", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.460", + "m2.2xlarge": "0.920", + "m2.4xlarge": "1.840", + "nv1.small": "5.220", + "nv1.medium": "5.310", + "nv1.large": "5.490", + "nv1.xlarge": "5.860", + "cc1.4xlarge": "1.460", + "cc2.8xlarge": "2.700", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.750", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" + }, + "osc_sas_us_east_1": { + "t1.micro": "0.020", + "m1.small": "0.070", + "m1.medium": "0.180", + "m1.large": "0.260", + "m1.xlarge": "0.730", + "c1.medium": "0.170", + "c1.xlarge": "0.660", + "m2.xlarge": "0.460", + "m2.2xlarge": "1.020", + "m2.4xlarge": "2.040", + "nv1.small": "5.220", + "nv1.medium": "5.310", + "nv1.large": "5.490", + "nv1.xlarge": "5.860", + "cc1.4xlarge": "1.610", + "cc2.8xlarge": "2.700", + "m3.xlarge": "0.550", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.750", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" } }, "storage": {}, diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 64010f40d9..0a2cf1eb28 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -30,6 +30,7 @@ from libcloud.compute.drivers.ec2 import EC2APSESydneyNodeDriver from libcloud.compute.drivers.ec2 import EC2SAEastNodeDriver from libcloud.compute.drivers.ec2 import NimbusNodeDriver, EucNodeDriver +from libcloud.compute.drivers.ec2 import OutscaleSASNodeDriver from libcloud.compute.drivers.ec2 import IdempotentParamError from libcloud.compute.drivers.ec2 import REGION_DETAILS from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone @@ -1537,5 +1538,97 @@ def test_list_sizes(self): self.assertTrue('m3.xlarge' in ids) +class OutscaleTests(EC2Tests): + + def setUp(self): + OutscaleSASNodeDriver.connectionCls.conn_classes = (None, EC2MockHttp) + EC2MockHttp.use_param = 'Action' + EC2MockHttp.type = None + self.driver = OutscaleSASNodeDriver(key=EC2_PARAMS[0], + secret=EC2_PARAMS[1], + host='some.outscalecloud.com') + + def test_ex_create_network(self): + # overridden from EC2Tests -- Outscale don't support instance_tenancy + vpc = self.driver.ex_create_network('192.168.55.0/24', + name='Test VPC') + + self.assertEqual('vpc-ad3527cf', vpc.id) + self.assertEqual('192.168.55.0/24', vpc.cidr_block) + self.assertEqual('pending', vpc.extra['state']) + + def test_ex_copy_image(self): + # overridden from EC2Tests -- Outscale does not support copying images + image = self.driver.list_images()[0] + try: + self.driver.ex_copy_image('us-east-1', image, + name='Faux Image', + description='Test Image Copy') + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_get_limits(self): + # overridden from EC2Tests -- Outscale does not support getting limits + try: + self.driver.ex_get_limits() + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_create_network_interface(self): + # overridden from EC2Tests -- Outscale don't allow creating interfaces + subnet = self.driver.ex_list_subnets()[0] + try: + self.driver.ex_create_network_interface( + subnet, + name='Test Interface', + description='My Test') + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_delete_network_interface(self): + # overridden from EC2Tests -- Outscale don't allow deleting interfaces + interface = self.driver.ex_list_network_interfaces()[0] + try: + self.driver.ex_delete_network_interface(interface) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_attach_network_interface_to_node(self): + # overridden from EC2Tests -- Outscale don't allow attaching interfaces + node = self.driver.list_nodes()[0] + interface = self.driver.ex_list_network_interfaces()[0] + try: + self.driver.ex_attach_network_interface_to_node(interface, node, 1) + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_ex_detach_network_interface(self): + # overridden from EC2Tests -- Outscale don't allow detaching interfaces + try: + self.driver.ex_detach_network_interface('eni-attach-2b588b47') + except NotImplementedError: + pass + else: + self.fail('Exception was not thrown') + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + ids = [s.id for s in sizes] + self.assertTrue('m1.small' in ids) + self.assertTrue('m1.large' in ids) + self.assertTrue('m1.xlarge' in ids) + + if __name__ == '__main__': sys.exit(unittest.main()) From 08f6c2c45e9e58e173d00662efcd5e06cd21d3cf Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Thu, 8 May 2014 19:35:19 +0000 Subject: [PATCH 046/315] Fix Google DNS to use valid record split character Closes #290 Signed-off-by: Tomaz Muraus --- libcloud/dns/drivers/google.py | 10 ++-- .../google/get_zone_does_not_exists.json | 4 +- libcloud/test/dns/fixtures/google/zone.json | 1 + libcloud/test/dns/test_google.py | 50 +++++++++---------- 4 files changed, 34 insertions(+), 31 deletions(-) create mode 100644 libcloud/test/dns/fixtures/google/zone.json diff --git a/libcloud/dns/drivers/google.py b/libcloud/dns/drivers/google.py index aabc783098..44a1231db5 100644 --- a/libcloud/dns/drivers/google.py +++ b/libcloud/dns/drivers/google.py @@ -124,7 +124,7 @@ def get_record(self, zone_id, record_id): :rtype: :class:`Record` """ - (record_name, record_type) = record_id.split('-') + (record_type, record_name) = record_id.split(':', 1) params = { 'name': record_name, @@ -142,7 +142,8 @@ def get_record(self, zone_id, record_id): zone_id=zone_id) if len(response['rrsets']) > 0: - return self._to_record(response['rrsets'][0], zone_id) + zone = self.get_zone(zone_id) + return self._to_record(response['rrsets'][0], zone) raise RecordDoesNotExistError(value='', driver=self.connection.driver, record_id=record_id) @@ -319,8 +320,9 @@ def _to_zone(self, r): extra['creationTime'] = r.get('creationTime') extra['nameServers'] = r.get('nameServers') + extra['id'] = r.get('id') - return Zone(id=r['id'], domain=r['dnsName'], + return Zone(id=r['name'], domain=r['dnsName'], type='master', ttl=0, driver=self, extra=extra) def _to_records(self, response, zone): @@ -330,7 +332,7 @@ def _to_records(self, response, zone): return records def _to_record(self, r, zone): - record_id = '%s-%s' % (r['name'], r['type']) + record_id = '%s:%s' % (r['type'], r['name']) return Record(id=record_id, name=r['name'], type=r['type'], data=r, zone=zone, driver=self, extra={}) diff --git a/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json b/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json index ddd959d4bc..a38eb6aac1 100644 --- a/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json +++ b/libcloud/test/dns/fixtures/google/get_zone_does_not_exists.json @@ -4,10 +4,10 @@ { "domain": "global", "reason": "notFound", - "message": "The 'parameters.managedZone' resource named '2' does not exist." + "message": "The 'parameters.managedZone' resource named 'example-com' does not exist." } ], "code": 404, - "message": "The 'parameters.managedZone' resource named '2' does not exist." + "message": "The 'parameters.managedZone' resource named 'example-com' does not exist." } } diff --git a/libcloud/test/dns/fixtures/google/zone.json b/libcloud/test/dns/fixtures/google/zone.json new file mode 100644 index 0000000000..a380c6ee38 --- /dev/null +++ b/libcloud/test/dns/fixtures/google/zone.json @@ -0,0 +1 @@ +{"kind": "dns#managedZone", "name": "example-com", "nameServers": ["ns-cloud-e1.googledomains.com.", "ns-cloud-e2.googledomains.com.", "ns-cloud-e3.googledomains.com.", "ns-cloud-e4.googledomains.com."], "creationTime": "2014-03-29T22:45:47.618Z", "dnsName": "example.com.", "id": "1", "description": ""} diff --git a/libcloud/test/dns/test_google.py b/libcloud/test/dns/test_google.py index 2a4b8c7d34..e6d0595bac 100644 --- a/libcloud/test/dns/test_google.py +++ b/libcloud/test/dns/test_google.py @@ -56,46 +56,48 @@ def test_list_records(self): self.assertEqual(len(records), 3) def test_get_zone(self): - zone = self.driver.get_zone(1) - self.assertEqual(zone.id, '1') + zone = self.driver.get_zone('example-com') + self.assertEqual(zone.id, 'example-com') self.assertEqual(zone.domain, 'example.com.') def test_get_zone_does_not_exist(self): GoogleDNSMockHttp.type = 'ZONE_DOES_NOT_EXIST' try: - self.driver.get_zone(2) + self.driver.get_zone('example-com') except ZoneDoesNotExistError: e = sys.exc_info()[1] - self.assertEqual(e.zone_id, 2) + self.assertEqual(e.zone_id, 'example-com') else: self.fail('Exception not thrown') def test_get_record(self): GoogleDNSMockHttp.type = 'FILTER_ZONES' zone = self.driver.list_zones()[0] - record = self.driver.get_record(zone.id, "foo.example.com.-A") + record = self.driver.get_record(zone.id, "A:foo.example.com.") + self.assertEqual(record.id, 'A:foo.example.com.') self.assertEqual(record.name, 'foo.example.com.') self.assertEqual(record.type, 'A') + self.assertEqual(record.zone.id, 'example-com') def test_get_record_zone_does_not_exist(self): GoogleDNSMockHttp.type = 'ZONE_DOES_NOT_EXIST' try: - self.driver.get_record(2, 'a-a') + self.driver.get_record('example-com', 'a:a') except ZoneDoesNotExistError: e = sys.exc_info()[1] - self.assertEqual(e.zone_id, 2) + self.assertEqual(e.zone_id, 'example-com') else: self.fail('Exception not thrown') def test_get_record_record_does_not_exist(self): GoogleDNSMockHttp.type = 'RECORD_DOES_NOT_EXIST' try: - self.driver.get_record(1, "foo-A") + self.driver.get_record('example-com', "A:foo") except RecordDoesNotExistError: e = sys.exc_info()[1] - self.assertEqual(e.record_id, 'foo-A') + self.assertEqual(e.record_id, 'A:foo') else: self.fail('Exception not thrown') @@ -107,7 +109,7 @@ def test_create_zone(self): self.assertEqual(len(zone.extra['nameServers']), 4) def test_delete_zone(self): - zone = self.driver.get_zone(1) + zone = self.driver.get_zone('example-com') res = self.driver.delete_zone(zone) self.assertTrue(res) @@ -128,52 +130,50 @@ def _dns_v1beta1_projects_project_name_managedZones_FILTER_ZONES( body = self.fixtures.load('zone_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _dns_v1beta1_projects_project_name_managedZones_1_rrsets_FILTER_ZONES( + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets_FILTER_ZONES( self, method, url, body, headers): body = self.fixtures.load('record.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _dns_v1beta1_projects_project_name_managedZones_1_rrsets( + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets( self, method, url, body, headers): body = self.fixtures.load('records_list.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _dns_v1beta1_projects_project_name_managedZones_1(self, method, - url, body, - headers): + def _dns_v1beta1_projects_project_name_managedZones_example_com( + self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('managed_zones_1.json') elif method == 'DELETE': body = None return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _dns_v1beta1_projects_project_name_managedZones_2_ZONE_DOES_NOT_EXIST( + def _dns_v1beta1_projects_project_name_managedZones_example_com_ZONE_DOES_NOT_EXIST( self, method, url, body, headers): body = self.fixtures.load('get_zone_does_not_exists.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) - def _dns_v1beta1_projects_project_name_managedZones_3_ZONE_DOES_NOT_EXIST( - self, method, url, body, headers): - body = self.fixtures.load('get_zone_does_not_exists.json') - return (httplib.NOT_FOUND, body, {}, - httplib.responses[httplib.NOT_FOUND]) - - def _dns_v1beta1_projects_project_name_managedZones_1_RECORD_DOES_NOT_EXIST( + def _dns_v1beta1_projects_project_name_managedZones_example_com_RECORD_DOES_NOT_EXIST( self, method, url, body, headers): body = self.fixtures.load('managed_zones_1.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _dns_v1beta1_projects_project_name_managedZones_1_rrsets_RECORD_DOES_NOT_EXIST( + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets_RECORD_DOES_NOT_EXIST( self, method, url, body, headers): body = self.fixtures.load('no_record.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) - def _dns_v1beta1_projects_project_name_managedZones_2_rrsets_ZONE_DOES_NOT_EXIST( + def _dns_v1beta1_projects_project_name_managedZones_example_com_rrsets_ZONE_DOES_NOT_EXIST( self, method, url, body, headers): body = self.fixtures.load('get_zone_does_not_exists.json') return (httplib.NOT_FOUND, body, {}, httplib.responses[httplib.NOT_FOUND]) + def _dns_v1beta1_projects_project_name_managedZones_example_com_FILTER_ZONES( + self, method, url, body, headers): + body = self.fixtures.load('zone.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + if __name__ == '__main__': sys.exit(unittest.main()) From c452e7dbc17efefdbfadd57820dc3f2ae0d126b4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 14 May 2014 08:55:12 +0200 Subject: [PATCH 047/315] Use --head flag instead of -X HEAD when logging curl lines for HEAD requests in debug mode. Reported by Brian Metzler, part of LIBCLOUD-552. --- CHANGES.rst | 7 +++++++ libcloud/common/base.py | 6 +++++- libcloud/test/test_connection.py | 21 +++++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0e4ba16198..6eb473c741 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -12,6 +12,13 @@ General all the available images in the EC2 driver). [Andrew Mann] +- Use --head flag instead of -X HEAD when logging curl lines for HEAD requests + in debug mode. + + Reported by Brian Metzler. + (LIBCLOUD-552) + [Tomaz Muraus] + Compute ~~~~~~~ diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 5848c59c88..a42f228243 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -331,7 +331,11 @@ def makefile(self, *args, **kwargs): def _log_curl(self, method, url, body, headers): cmd = ["curl", "-i"] - cmd.extend(["-X", pquote(method)]) + if method.lower() == 'head': + # HEAD method need special handling + cmd.extend(["--head"]) + else: + cmd.extend(["-X", pquote(method)]) for h in headers: cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))]) diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index ed8999db1a..5df79191a0 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -21,6 +21,7 @@ from libcloud.test import unittest from libcloud.common.base import Connection +from libcloud.common.base import LoggingConnection class ConnectionClassTestCase(unittest.TestCase): @@ -182,5 +183,25 @@ def responseCls(connection, response): self.assertEqual(con.context, {}) + def test_log_curl(self): + url = '/test/path' + body = None + headers = {} + + con = LoggingConnection() + con.protocol = 'http' + con.host = 'example.com' + con.port = 80 + + for method in ['GET', 'POST', 'PUT', 'DELETE']: + cmd = con._log_curl(method=method, url=url, body=body, + headers=headers) + self.assertEqual(cmd, 'curl -i -X %s --compress http://example.com:80/test/path' % + (method)) + + # Should use --head for head requests + cmd = con._log_curl(method='HEAD', url=url, body=body, headers=headers) + self.assertEqual(cmd, 'curl -i --head --compress http://example.com:80/test/path') + if __name__ == '__main__': sys.exit(unittest.main()) From a91147f2b5d820bf2e4c2717b8fb4352d1491502 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 14 May 2014 12:24:42 +0200 Subject: [PATCH 048/315] Add new native driver for HP Public cloud (Helion). --- CHANGES.rst | 4 + .../images/misc/hp_cloud_console_projects.jpg | Bin 0 -> 70787 bytes .../_static/images/provider_logos/hpcloud.png | Bin 0 -> 13309 bytes docs/compute/drivers/hpcloud.rst | 44 ++++++++ .../compute/openstack/hpcloud_native.py | 13 +++ libcloud/compute/drivers/hpcloud.py | 99 ++++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 3 + 8 files changed, 165 insertions(+) create mode 100644 docs/_static/images/misc/hp_cloud_console_projects.jpg create mode 100644 docs/_static/images/provider_logos/hpcloud.png create mode 100644 docs/compute/drivers/hpcloud.rst create mode 100644 docs/examples/compute/openstack/hpcloud_native.py create mode 100644 libcloud/compute/drivers/hpcloud.py diff --git a/CHANGES.rst b/CHANGES.rst index 6eb473c741..ca519adbb8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -134,6 +134,10 @@ Compute (GITHUB-285) [Benoit Canet] +- Add new driver for HP Public Cloud (Helion) available via Provider.HPCLOUD + constant. + [Tomaz Muraus] + Load Balancer ~~~~~~~~~~~~~ diff --git a/docs/_static/images/misc/hp_cloud_console_projects.jpg b/docs/_static/images/misc/hp_cloud_console_projects.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70d0a4a487dc156ad2b4c771275581817b757a77 GIT binary patch literal 70787 zcmeFa2Urwawm(`(5=nyO)G8n-Ny)h}kVHgKvVe*b1te!0L_m-rARw_tqLL-YCLebyrp{Pmh6 zCw~15UVHcgNB;F7jsuqg(j!L>f4~2MGdQt%!!aw>9iDkfTLTBd*bMQjEbj*`45 z*CHi34IE)0A!Q&TwgV85R&tUbuOGx8(tZS_k%ICl)iG-Dg39B-5fW0;BV?q9RD);z z!Seta13BZV3l}L)=-#C~?aU+|6q|XJ_flC4v)(5ppTs?v$5h8ySlQS)_|FIko;@ci zB`qT>Cx7{hs+zin=GB|G^bHJ+j7==>Kd`d4v9)t`bNBG{^7aXS5)vBrG&~|M{zXD! z(#zzO*IC&)xq10--o7iZsI024sjX{mYwzgndf)wFaA^4R$mo}^W3zMf3ya^DmRD9$ zTiZLk=snE-!69EH0O>Dmf$zU?_B*~9K)#NUk&%*79`Z$U#Pg7G1~T$f7bqAn>Qdfy zK5<$+=qS^r*vzsPDqaaaB=bF&PsdpJBxm_ihpheJ?7zm?;j=HHTOtq~T^$jae}O=LoTecH%O*si8(Ttw>Fx9Qq&J(s7e8WZN(h~*fFt7gN zPLAf)c)t~>>JYBK5jvY3zz!RzBm(h! z7C4tA0(D;SAJjs7Q6Mg6;=gv#7XR)6$^!kR{fUGXsHT&l=6^R*Qby9zCLs)oMG(J@1>}YlaOhH9{)LMH@6F$iqe}#6dXfEvkP;mHP=CDDKG8G=*-<^2@Ym@MkAOe~8 zL_pv>0<-AD3g5}49AJO}XJI~2b#?0w~s z00H{l2R1$RL$2a~l`9Oh;pUMzV8`<4_Cy=k*2l*Nw$J6r1C_A8`u^rH`eo@J*Edbc zd8SDW;l&?y&ssia6$y$T59)MQ+Yp_(cc`MDhx(xh#4nx*Cn%U4BxcSQvCNH35&Tbg z`sR*1F@JpCxy?00MsraycECE1Daj-z^r5?~WKU|;nQ6T=D#>|eQ5AKYVa@r7~#|qX)TI?hD6%h4^@4NG(X*W*C z9BM)AhchjK&h+VbELFPfb?_V;5)uBsDdlivWCW#Us-x6S#WiE;tfnYqK6#G1pTmn@ z$HrjI3^`erB9(YSX|j%0josje#`}|8JQumCvK~A#{mSqsJeWcpkh6%&ciR=2-kz** z2dBEz+v<^~#pSBR5Txa3PQH{(j9?WWGNcWsAb8ed@BNRgxK9#w-5N3>0}ML4hdUzcszdv}vP*>c9| z?C@CuU_O3eTgy^3=*^V5@tAb7yXXB&6D=fB3xOFy!}l{~Whsm4BqFrx2+yZNA%7HE z5As(lsI5?piQ-$s*`6Up6V@0{NCrsW& zZ0nfG9}HU5Iy)@(a2{-&liOMKW+DmtojU(up>9Ors{wR-$$xRv%G5rO@S%(dz!jXZTZmb8w0;b3e;hV_C{Ezp?@0XJNdFqUoMO=0w1;t`oMoaTs+9Z4E@O9&B$ME$0*E)qn1IaqH}|7t|+E4$KprLZLe% zMBo=SW&c4HQ^S2>ug9cDvhb;vc7;s+?u%EF?*Z(5bUN342Q}PeE6IkYvGPinG(Ps& zwO$Z9ohCuPYB*t)n26YY1+(xD`7_fpA8b&c+3?w4ujtjfH0*SPaRgo7BK#~m`)yjZ zza3UOv@T^O3ZEjy%ka_|E_~IJj27#YuBXUNBqg7uF+*88)MW!FtVnbv~eG~Hm> z8Ta-}E@3gcri*|GAygFuU3@YU3MC5|NmvZpVWZ@rQWe{ZqAh-F0)KR?LvvbJ>3&xU z&}oJ^$K32<@0!)Z$KR)ymTq>3)S9bvZBgz^lJxut@&6>Wm_dKGC+M#xdV})9pULdE z)*<+_m8x)c{fSJ5h`>({l=ZV)03pHjo+Tp**&hr&w0^l$IA)x&(a3i?$z*%zqoKg(#JJG0QFR1f` zu~YAK3vA~jW|xTo7j$X94Tj+j!wHh275-EVVC)Uc!aV}Bh8S8RFcS?y7N2N`R$F(8 zG1%!%Y0dYfiZ4Bt#2SgbzPQYMJfLBK^1C5ULhNv%+5}(OlRF>+h2P355y22LoCKfq z8q0xW0&k$cQ)TU4VWr?};zCHV4k#Dl6lM@Jxn?dkDwcPWK7*jI4_K01JcbA?79|3@ z^`AvEzM)s`>yxnCI;T9H-YBL3_#IRbKGBcdp1ZSgPK;({<6b% zY_SZ1m44lf2wV;YV|Fm-G~pbA)>Rb<*!1%3f|cz>RLZ*H6*`)@;=WsrbGfb=9(Qv5 zKocj(hi30yr}bqHTi--x=~qOr_FM_w8qt0Et$Mhao6Dnx@i~mtqL-jkkt6(CyAb?+61F1-T~SZpJ%E{qDewGOR`B!e z{aiCVn5AeXiG7~^mLfRQ6XIg$ABXWm^Y*cio7tK|>D9AbF2qdAnUj{~!|dfKxxQkGTnR?;>0| zoGSrdc)Q#`k>Pp#2YqxJcP2Lit*gT2gp+`_o%9CFA%AxvRp**xlu_l;Mfg&JF_7AO z+5pL2wbX7Hu29gv@*H8AVZ(Sk4&bEfyY9v0Ug}n>{%78`N|9_Cl*(zY=5opcnf>of zdy?)^XeZ!g@EoSs_T>pDfFF(yA7 z9&`)73AviuMg)$Z9wcyqVY72^$`o<}ld_oguF^O*SEJh7dDbVYJ}prF%I#6@IWa&q z71`4gK$diCa3}Syu%XY~oM< z{kL0hTyk*<_%9{(H(y|4@d-2ip-q%1hm7D{jBBTiXY90TSKi~dr-3RqM$aR;!qFYg zC3y`u=SByWA!|IR-x)3XYKxj;3{) zTFdT(Wt`e#1RDF1Rj%`dTuHH?=Iqkv0Ek@%PgMf1Za(JCSTTAa+?VwCWmW}u;PRmOjx^f)U-5ZJO`i1|eK zdLF&S0bk!Nu}{&jsl+5?E~j|pxmv!p9qh1`P0`TPysOueAvT3>U`>6;ggRlkw=Jt8%^41!V0`hAO@_wAH&3r zb_DPu7v$42=qr!`(JAtM$ftK|LTCAEY>$*$+c01M%)7)#2gpw&J2K9V_T}Lli)}`T zfCI+cVfM(FXWEq6=Y~>6y}K{0B+w7~PetqWs6bRPoahK7xM@KjI@TXm=(b_4Gg#5% zEovqerL=KBk?&bAV<%Nxf!L+&2&4l27;Ij=m~d?cr3KtoGHS= zzIDW$6?LR9iT|#a#}nbiY_Yyh1+Q&{4PTRdR8;_fKzo4PNTmgrF~+)`010cIIx{4h zDyNpm)wyBba_Y*Bn}Tnz84GJFZj5&x06Va;%jnOQ7}xyX*?#g)cbyZqV?|LJ1yX(K z!ig;5akR5S;-j;s{p|=n2fM9~>WY$gRp$vYPQB#?jUe7LuwPgs5NN#u- zl~}%&e@$D6(M98E4Y&RJrovR9q#p>3Jg~v?hD{&EO&{RXVfF!z*0 z*j7wM1YSjK#f*hr8_dHvxchq2h`kwaI5KE_bRwwu^wbTW1sWDo*)xrXs<9BQN>z6; zuB%T-`&2p%M*X;7VKQg^t*;DKdWM~rMwCapB(GIJlI^3^xhido_QVE){Lf1sJm?B< z@D$DAujptHqDNngJO0&4s_keuL1e*yN)1>s8X=q>R+N~h^}PW~g<8K!`{uP!Q!_mY z(N`_=s%lmJ)TN7ckhzs$i>ooDm`AF2j}J(;a0j!N>y&mqvl8Izl*WZAqN443M?>9w zH%z8))eaUGODWuaD)yjKEk$hac4WznP1Ng6yyIEX2Y3&~`0JGelF%_BeJ!iX^6DDo zyp~J1Ak9`~4?Fu)g=aq719V=+ysF6Dp!wy>waH`t(3UCIV6~b-$-TIT+HQr|?Alc4 ziPIluoa9cXjSkQwM_U=qE9WscRqd;ue;lQ}{^b4bEu*T5rK$8hKRIM>%p>bkWM|BS zN4l@Y$885X7uoJ6ox0e<#W0~)mFS;x?qJf>B6B-BN%Yz4Rb{c%jvLFPQ#_S}l3>+z zye+P_vbI+J3^|RolVw!9{{+uqkDDFleF0!#x=Ka*N9Ry^-odd35AWM=GA0UNtzKp8$7cqSkCW z`@;qI*o{v3@u+my%7~2?UIEG~ZRIGK>9e=6k*>Zbh5<>udMgJ{CS@d82%}l$xbXRO zehuSC z7d?Tj5>8BsLjzJL%$-|#q;pecNVL!aIzKgHFFPd&o6tLt2rMXhy)tNSV%8W^kz`== zY5Y3!jTdt$yKY$m8O?F|A)TFj)vMn05fvMhm%xxa!}(0Iyu2=1jPUvNyRf-QW>T)S zyK2ln?p-DwsKNu$B-G_r`P@J^e=7!0moi&#XuAgdHvG*WNEry>k_ptZ>$j-kr`VW(|n< z3Piw6cZP5rlQpOH6-Sjn;T1YQpBsCxZ^IPLkb~)s#!ovN3Yw#$ zu|rsG;o~(PY(24|PVT}&0<9LucKk`3O{_yI1w|TE9UjzdA}ZAlW7E@`hHAtF*!pbu z5cxdTc{nYUd815-n{T7bc$noVA$RI{JUaD(>YfDp=j}_p3d&b^>-#B}<)f3G^2}{g+(+O}!L->U z&JDVjxx>7qNanN1k>^eiOQ7!_HjUoPFe@Yi);i*o&QWDm;#pjG?mbx_I$;7`dy}Oq zHmg3XaZgtZql>w5JGwiDfa#biYgE5b6M`1^{7s;=}_AY-Sm`|5DUER@Xjm{#K zePvK<-pTH;KBxJeI zSN4d!FHJ?twIlVte7wV*v_;ZhQdGLa<>Z6A%NN&-$J9e4wJD~&Bzrm9u0Ni~yBQ?D zVeDyvoWDUb6Wd=TX&6zc$aWOYhU zob+?K2Pb)ISJF#jQV_V4@|&TG4s2sps43g>;}Z2BpL8?Av%2+|6s;^U}p&{2IWw&&nUpX0Q6x0>Q#mE%wYjrTtCAvFw4ZLFulMITsE zeVy1hR736@(7nmRiKk!hy@$Oz!XLg*%h77G?3<3H3EEs1Dz3m(&Qs$((BIhkqlKJ~ zODE%#1v@~nGoK~s31a^ZPZtcjX68yiGf#%;bxVGZw1jf;4XK7 zhTk~tRWn&zq)ERcs_pBU%Z(6{VBrWI0h&Sz|D;(O2i7)v>d*%JZ}vxxuC4~gJeQt+ zPO^oR*o&6y?tR!YgQpllZ*9?@xE}g_*Vr*Hx=$+Z(X^KWNg`#FYf5d7EsH#Dgt2X+ z^nrVd|G2M4qTpy6@-4uXHWVE(r2$!;UNCu`gv>%_|IUQ}V$lCCOa6Z|vHBlnW&g&T z?|MyZTQFciWAx_WYtpqF6yqm-UW=131k=kR*m(xEj{*9LXZYw^f}kG)p=ml#14u@i zSiQ!LktI(>c)E-Ri+RT81lSgJX!7bx+s<-dBzt6^(!E=<>@**?4o6m_V~mOwAI#P` zRkCGm&3s;8uo3;_-MSx}R?03*$F8QsjDmu@r!$eqHQ#tQQDGv`Xs})@T8EEGJ_j2& ziUDLsF;LmKqR=Am(&368>>RD5P@bYL}%?L8?1%9>FhEZ*)vN77%`shSJE_3 zR8&Pb&AdzC9sNc*zyV-*BEL0(MSacwm}JPZIb@0Ox&XBOPegz&4YL=6*&2W%?GQL^ zz|SE?!otPtCV`3;m1H#BF5j6si=h7+lvL~6RpazJ+qRnsc;)YHn3dqQ?SG!;9xFYP?7;j!VH{z)z&-s$~y zjj2g-!LLDEc{s*RO=^@@2Wy*dWrTvmabuAu@pE&QeD6Oj!`t&uo{HhTmJfY8x*Qha zAZ4E}75&V|LCkVL!9?<|7*@aqwP7J?3qfjchGjs8i`=;-Is&AJHaATDSPjNPDqPei zwhF77gS!`eMn7YCvbJ8)$ux)0Jh)9n?-FI|jpDo>Gn=itTs&7)bN1v0bAba& zpU(QdA{v<5Lp~SMM8=T%n_R z6nDQz9P+aGk@zE%1DF|U7NL$KfE*JtM|*F(wad^wg$bnR(_pl1YZh1SZuO>DZPAruQs=;*k2PCYT>`{uxaq>LpMjkpfu+A${EZvAZ*uMT*n)A~sx$|nc(33%d zJ+Uh-%sa16?(%LI%HgPH3yKV}dN8^bS3Cap(C=D;V|w#hPC{uhw+E}Lxp&Iu2<3=d37vuP?FLg*fzAn4KLMhwnKct6r3@V&bnk0nePMawPXz94xavip!Jt`wh zK(E~X21mwdJnIZNHCo9gTSUTSAw|_>&*Mh(BduTzI3tuoUgYkT(ITAu z6nFIJSDzkKhc^x1SM}ItZ*kOO<~)k)@bOP-)#mdWZ8typ`P93S`=9x%GT21dW253P z^9+9YX!L!r`E;Gb<<^ulMQ<+VPuq;JraWO|Fq<&Rj_Zm}y&V~8U6#9xjTOqAt=kK4 zu$W80q){NGWAbdq?%zBEr_Da2J@|D$GW1eT>CuN^fwx;Bka>0bsHII4o7Zirc?$_6 zl~(IpWPU+axIp@(Ab4YQzg%&+o08NSM1mVuKamL7)H@&fUJE$rp6coY_-K}b!GfGG zH$}JbP7kNQe6Pg2sX_h-sqXI9e89C=VP}WhVG(TB{gLMDT@cK{3E~7ve)~%(71h|( zR0XjcaH6?+McLkx5t^oS>)kF-`WkpSDdowTgSkLCI9b~qMJd*x(dT}m?v|;yMw8L* zy@KHbkD~NDbG&gFUd69~1t96{99gJUE~Def!Fuqba$Wbs1eqK+(G&IkG=-Xd#1 z%gRXI?ZBFiMwP?0K+1fl+UXjfSk2z8&^1o;E15_lse}kRw?vD5u1p z;?(k`9$4&{^51D4KS|1afVd-1GQ5D=YznxrF*H_w1+xr8ty(~*cbT`re(&4vh9<>J zV~fcTXo??aUbOwZC#gK3(kc6D;y$3}oaXB@0)uZJM1YGW*svkV8VAKpMwVn#fRIA} z9$u9PhFBFORo~!NdPCcyq~2(a&zj!NIy?i|>=Od@{9i4}ek;d;_m0GX6EcxxqT@G* zxNZFo`nK`lF0=aK&|o2Od3){W@%pp+3pWq1_a+d5e6SR#9HU&`a7cmfp)SBm;(_9y zjj+L-et2z6g)gCkgufI1zytfODnflNNyQGjt}R3Gy90ItY7v;cr!RsVtpsNkO7xjj zut0SlHk|{Ow4r5^)BC7k0$vQGy97%+JZ_NSpXGvGgOQNE6AlNF$7!cq{r@nt$`p7 z|2;a)erD!xbgcX-9VuXiIf+f)60f&|djs;@dJ)USP6}B~!kzWo+>X#u7~gqCPH6v? zLbT{l0#XDoIXr-$C4~?FNk9s?xft*Wr2kn`KMSaj2*_p~zP%#Fp7=>tkS{5}if9|r zCwO=OzoF!3RsDTRepc1rqlDRmB6{+OS|*bVz^wIy60pTo_R!V}%*IWp0|i3(0ZY2f zur+n$4?gX-xbHdOEG^Y6a6n73T{*!Pjue{`eKKAfr1wD7k?%K zpsEkwK+yhy0RI=9woQF>GBUWO_r?##f4l+yoY!B(p>Ie)<}SQ}W(fXd_?jRL>Fxgk zXijMN1+-DX*5ZB(G%40N%*7u-a~5>c{{5{VaQ5T=4A1Hn73$x(>(5?#h-E+UjGI+k zjtE=~^ts3X3n)&2ed2!sKu~jk*T1s}xBrH%e}g02SFpd{_je}(aiO;V%bk9_%VAeW z`w(=gl^%;m%xWES>^(J|1Bjt{l0kev{#Sf<`{(#vXgogtC1%Hq_bJc$2+FOugueo? z&Hj$}J{Vs9W?W$Rel}K}CwS~IP=TJlO*o8RU@y8lyi>uz6At1sc&KnF2iP2jsYB5E zD^#TN6`V_W#Ku#*Bz5`M6TtGAGYGAM(9N;KKJ}l_?Cc>{S^NPM!&HZSzcl-}NBO#K z95VbP+8lx1TWMUsvq1!eN2{+7BYut>e;)<@%=dJEqwl{5icJ6Kz_U_OdQm*<6@89|*fHs1f+40^@2Ol5-r3Aj+pooF1!;Pn4`yFEG1hDpW4ERLos{{kD4Q%sE z7FK?g;GYRb`g_@R^^aS}OrIrZJwE!pn{Tl6mOgto-*b6-U40REyeL-FL2q2IqGsqw zdFuN-wI}D61ibiw^WOcIT8QVSPU5ijHH&?87y%yvEiZyCjX|*JvvpPl4~o9>e@pdt zf>|wu)KtFEhzbr4C5x`*w)?DKAF+3m(EnFTvVSG2fJu+t;#b0A{x))%Dq#5QWk1pW zcFkPyQ-f@2x~XB=V0#xsYES+woo|S!+I7ln@Eo(sy3m73QIXteFFvz1;p^PS-U|%l zkPjhm>nL-B|3ww;CeRdEAZX zdjA1=g}N%tMBuMt`SW?sbuf|qYXbO}XYfk5iZYj`i{hF$?ekOvGy2cHG6 zIr=RgEa6!h!)D}RAvR@mO}-WQQa15fiKEoZV^&$Z@bleJC*9i4`m z=j;qtM7}&sUH8qy@%rfCNrl0ker{;%b1&D!SZF2X07V8F)(c;emVlD zo?nv61GmARPc2IeanWqMdnqGrzRph>pHm^m;YHfc!{1a#zk@Ya$T1LSUYUQiJO`yyu)@ z_(PGT+2YDPuK-MSLUDb0D`0T4`g`k4Y-B<8nKyn{@TC<_WL{q|=A)MyKZ|A7YiT8C zC)^2Pi`~1%duH;DvtD zLlyfKFbca2@tRss!!T{bb`q77wYOX(GGsSySGR7C#cQI_$#b^u+NamrOv1um`4)_y zO}lzUGrgVb?8LF>xAHsHn^<9|#;;&A8h|6eh&JL z9G0v?TgX@d)v%idt=*{F;Lz<9m-ib8N00rxkMcI}jyowQ4B>9$Gj}&bOWMGG zrNNEU##SS|))8OTCO=+2ntpw1pO-D~T|MLHuRMsuNfY4v6vklP7qkRHf=VVQmJeCZWo*9CT9x39#c zq7WTZ6vL9slJHOtM1;uq!mPTux!#;3R8$I?cS@lj0j@atogA3E@}j9hUDoHW(492c zd(j>uKr)0ARQ{pR2Y)f3X&QuWj}sS883lc|D?;JV*mmim7MBdf&i4GP=ZMoZk5#%5 zx_USke2zna6eiDLu+U8*N_^_W#PuW@iBRu=J2_X$TVvNho?Y{Guk5a?JTUJng0;$J z&l4{G;&dv=Z*j11?0=bW7Qz6v^LL#^*XLh$Mz)#8Uo5ZLqO0WXW)c_Os2|wpKPtc( z5a1#-SP6~HfD9@#cg=rYOppwBVEHEazWtp9U~nGVpu_VLT#qK;ryN5buDeeoqgE_- zsNem?U@y-eI{Yu*Vs40}M1y_o{(T6tCLSmH?rd*`*zX?~_mU~)qNHONca$sp)?k$c zxHZb8r{%!>$w5)}L8OW4>bK1gk=Oc|ag|$r$eciIn?N2L|JVlquwmu2YW zIAY3c^U)vzFMqLCgQrqp?$=&27Kd%II3cRYoQ2`o?l6hec|jT+8|fzS7+jb-eD*DpUNS`Tyvg>mzLC~OQMJv^Q2I#2LQu$Ib^P`` z{Mya(MnuUedwaMBtW_q*74)|r=%K&mfq!N%5iTbFfJk$G6a@XY8sm;K>2Ez@WAU&u zZQQ%=W#gCA#yV45xbhm{BDPKGttD1Zg$VFaw!N(26^7YKhv~C6a;Z0lB`l(yk~Teh zskR-KFr=T9KTL8oyhf_>5U#UH4PR)<)H=ccb$EC9-Ry1E77|$28i^q zktstZ2nhE=p!R?8qM<04V+}F}mOM)ip$w(N7Vvo_z=9bUe0S?GUa&1NaME>4LyA6;A|Iei3fbIYB|N+WK>N`r{RcL8|f4W`9t9i15G0 zn2P*YG}9Qh9r(dtp$dF5WCi*wJ`cVQlz@S#0$cv89UbZuY!(*>6IsyRg|0>b|DoVN zGWd@j{Kpsmg9rb?#Q#}bFj2=JDg2SGf`N(t1O{y5qL zZVcfR9FT($nI4o%Sz~2_rhvk8}Z->e>0 zWZ2N>MWPdEJJL`qiMC2<=BgD1jzO=Uv3&_+*YCR9!JmZ;kMR8x&BNJDM(3V4TQ{%V`R}o$TJS3k(@&?GW?>KG60pfAzVpaGU~V`(3&+ z)~BK2Xtu&))rz~8;)#q35814^w=O+yq`bNs!0A=qVHj{B=K5zxa&)p%cO>s|EI ziQ_V-P35N9-xE$$%zrD6Z-yP)z(GW7T|JeWa$F0J4!T8jI}|0#j#6>7ivvIgRg02+ z&L=;S*BFOf2VRZR!Lf(lp&!1U5{pi)3W9gOvBi5c;y2d@wqOzR?uA6)1aH&3QAsxU zFFWNlDLUaBU&eYI`>rnroDV?G^I5Ll&P_ro=vE+09#_C=Y!S#9(~eHw9A#8Yq+e^y=T)QH-=6R6<4Hk zrYoP$RnRPoox3D|_h>WjtzMb-1d6&poJZza8;hV6wUHDhN|M85ilw5~I;4$JK*KD7 zjqASXpxSv#sZVPG(jx&3UZL+X1-Jd3RO(ApS=9ywkC}BEqjABQp+x}9tw(bNLfniB z9Ucy;mue{(t9CTjjlGIyr&L$%WDd*!mNz0PBGWFt=Ja+2UH9T>%ZSi>7JV8D5|SYH z7R4c-0+=Zk_=O4cKClR!scoU3T)c6*qp16fVW%?{SYeg+*T<2qMWy1i&S@?#7yHgZ zj`&?2-Mu&)WwPBN&018X<;vB>gFQ1EW3?`PMpE?X7}d50+e2%qjrbK78N;ficw^IE zPum*P2~Ea|=ik{JBGn9egQF|W`&$o0)8A<4kJ+GE>Yg-*JI+>KNoZNCZSL-`&0OWC zCqw3S#BkX=+j4Z*gwHh~j`vtPE5m6^5^o1{IL2BtlDzNJ;%Rpusl&v~nele^9pqM> zxa?rD{+wH!Q>*7P&&F#x;6a9*lBOb$$6RA5gmPK>p!V~)awndk<9Q2FYuN_;coDRugN69>^ zKrq>vSdU3fo);{y$6hH|bhy{HNW)xfxfisyp=rx6+6tpK`?NjXzHw>14O&$l?5=%p zRftx0rH0i|@95m^u17V(Ur6KEI{nwW*A28yn=LAMvhv_i9r%sPSpKkYnuZ(E?U5d1 zUpL!)49CrtF*>ti5ywq0&pGgj+%S#1dvB8|c8Q(jQ$pz@afxVhtyx5kYAxhZ|dabSfn`@imX?O4@Eex}GV+i(j@M^W(%qaZn^( zom3Qk+oP{`cAO8|dWAa`cfU4%Ii(nVq2r;~(yUXcGM{p?z|)=n4c4Liwho##iDGA? zC*ZM3P&fDft6#77iItxo-_MG`XA!{Xb6XRT2^Axc)ZL3#MF)(!F0|X-da&ShqgFSl zTUe-{K!Fn-aKfFPH%+(iG`Ze52S4#u!`<6$?TZ!51(j*MCn0|z;NT`;3$iNC7^m#t*hQW3v;6RWJ=4nK|)dE@1zimVRa2smX!5FY}K@3{c?8cF(uP9Yd{~2UeSAwy3xrT zxi?;^mHyb6Fz(FeP*53gEB4kMl7XAcN$G6`0cw;%6)}`JNeqXizgU<2gPercUI!vT zbTf#an+&Wv(gOB2f7V4_IG(at#w8GoU=^M4mPAvBRo`vn^>Vk1U!qQ_8p5f>Mza z*`$^1Cz08ft0TXW2)nSDkR&(Wq-I6^9Q^*Hoa~AEg5#rgv8bi?Nzw6{xCyVYW((Gl z1H$&LZGle}*!(YCn+}7gH9pK~u&I8xkJj z?!>TSV*S;<=vKM;MED|1#?=wZdqIB*{^HoD`rh{;5Z9!E{Wf%G!_e-e=+owXC|j&XH1JM+6*3v?0yT+`))L* zfqcV$k1gcn1$LSM*M6V9bbGW7#(oZR{PvnBfhBZax8_B^B-|!lPhCLRMCwr<-@=0H zRO6YxPFm$bYEv8D$5TvHOu8WTq$IcK0Mf`~FALO{Tr%Emm7yqG=$X87mn<+-W6&_i zqTQlPRNoUN9X2gpziBYiIB>%EN~-fN@W{;v5A%)Um<3uV&uli!9B8BC-!u+Vhn?2G z=b6ZIl3v&W`xM^s*`L-QH5(H;#fh{kFS4ssPDLwSHc8!J-S!@YreTxIL+`OBIWo~T zwI+2~&^E}-N8MB%cJGzhsICJaF0v_2?Tdq+(f;tYwUqXQ7^RX@uk7+1)`+Ki8!TFS z!=f83P;)&U)t|g{T2iFIFIjkzSyj`JOjdYI)RWZZ$=zqLqbuh2`SuHJ; zQ%*(QDJjC1?}a~<#NB1BWX`V=#o1ilq=1>7gVT3W-?l?Hu~*yhLcIF9Sk{?*N}Tvw6vBexOVWI##qdjuzJO-kM26iZ1^-iZ`BeCR(yI_q1xK6)L;qQ zJfxG5+jI{r9yZ~pixM*(R@6tq=5{BOLW>nO9jnA`9i-}?*ZEwrXzVPF{xhqGe%>2rR!)p?C zDjcWNLM^NDmiP&Gbm2?z>6Dl-6h;Xr*aYbe+=Dww!@%bsn#Nc?HYFa-(8*YiWr}%B z!IO375W8ht{q37a+vcYseq2a=TZQf_4|8Q$@6A2>q*DgCqZB##1TiU7A}}VuFV(E} z#yYz@ufHa)y=C!oA8ZXS&?I4D+>3;@>a7JpK96hmWxg`aDNdEGO*((&Rmg3}wmUpU zb-6lHawDCVt%ng-*A_P}}QDTiQ%i|;K0zE$XzJM|LIxWtH8$5}Xk z)%&E%tMWQR5cpqskoG{UE2a;Qn!u(PiNIVwVQL7re99JV1xq3V5d}3_Oe9)ZUv03A z8j^=6SP;&|Y;4F79<#vr_FOESLV5(QO_74N=@UP$m ztrEO@KVj=oZk<93>+&&p zGVu9mGZ?rx@skjNfw8k55q&>NBI~D110^3R363d||KrKo|0!*^44i(J;cxVjqiTi! z2g3Z%ZGW(@|A^#2BKZrD{l{_sfbIVg$-hG+`lcF75#jk?X)Z?V0!jleHB6;+zL!>DKNXy-b-`zh5HZV#o+n zf*xzdk68W~1JWst8&k)yEn%>|s{>g^o@A=u*GD3*M$>a0yO%^khBP ze52peJl0=rTmw@wJ2E2Wuh*j$-T9rqa!T>tn|S=aa`*e40eVLP_oAq)_+1CPZLPt| zM!h}t_~v)Fn;Fqo;ve`=LZ|p+I9;RDteF&62L=tn>Zb|#weSyoo$|*zT*RSbh z;0dM*T@_l&k)+cN)W-O_Cdy@wqRw>x`5L|NQwE{>&vfdO^;=Fxtx%a~6+4?(&y;X4 zLm4Eg9h};HDvQgqCt@Zi$CMZO+{xSxC?DsM@f>B!DlWLIBHm>utIh*dng#wL_tB;HzHEgr?fDc%KoT&U5@=SDJa2L#{^#o z{{Md1D&8#Mal?8PC{@H=ecnOi$a2=0O1Mw{4J8J8b47)d$-om$TaDWVTk)D#hybPz zTIzsUD%vDaf?0XT07VpYIzAYzCn`C?kn%8sXv{jN)Bc(-5eNcnp(rfDi@Zgq`B zV-9yKfKjEH4caGc9Wg1or|O4}4hr7Yoa zMz9$PP#7Pi?cv*~yWnC7+-mL;BsGtC|Fsy_-w{S21v%x)vZ@9DzYP`$hUUl3Qp^A6 z-W+3Wvo7|n=HfSFRz@=D*;c1{!Vy)$%7)m>nr|l5Pq7P;IXh5}yPVih0Y+ZeM%n9C zA{XP$R|$d+qTRCgsa@5f=l#Dvr_U={()EJf9%?YPMEBBnto%Rhy?0nsLANgqqGAOL zAV>)c2uhRQiKsLY5EPIa1!*E8(rXk%nsh;>MWjiM^xmXIK!|kdy-Nut1W4kYVD~-m z_nvd_^PPLoz2}cSPqMT3?3p#I{MMQ^YclojQ0L!Nx01fKb^kKk<|j&B`8$MwaiaH$ zs*O4b{(hkTtE{E)Zp)VU&5X)SnXko~ny74Rw&$OLed?X)d9_SN=Z0*Kc$e(+oZ-U3 zXX9BDZob`qVlHKe>m2@!9UyM44iiW``bd;3GvrI4&F1V+K84@Whoe=#^XTituH0;@>WeJf0vXtLKsO( z_`gZXBQU%F%RQLNkD<@DW>GdTH(Q-DAe7!-$qdfGdwL=N9?aElxpwOdV?RVNO*qCE zUQo*(#I3J7>3d&!&n3H%jU3#yz42rVD)Kbs(=w|ITdo?dWYTNW+x8j5pL-CiD3H+y z8CS%LOyfa}6XlEhwC8Ab^_Ir$7jdkJ+xE#-BFLm0ufjOr-a^A+tWo%i`q_Nd1iWFwdotfwN?t~Dh; z>~|w`$&L`Qx9IT}e*^?0AGqwl8v3;7~CCSbrHx5fi*PfGCp}-(V6{?myE@2Q2 z8ko}u^%Lw&-RQz2dMoNumYe$B-=w=2HCCGx^@w!3)8k%^qrKocJ^tmYqaJFi42#k* zYb)yq-ECLt8O>G;GvAK0XI@dG?HoJSJVER#;`~Q zMY*9ALEEIV{1;;e3+Hle?hK<#-{5Sd1dfS(omW$+Q`!C44DpP zCflZd`X<^9x?6P)r!HcQWWiEg)dJfng4`KC)|!|-v*qjU5TvFeF6?be)_m$d)ram2 znHtrnFAnpoDR{ntQi`DcVDHQjBW$Lkh%pIwseAtBo=+L*oCB0{#2*|u*5)JisKKS) z-NbDK7UU_6lNg10QH-7`GY54!TU-cvb~fyeD<=fqGrwdUA#34mWtXT%UD6(|;(&0w z!me|Z?l$iF1C;pI$I=M)YO@^w#{9v`4C;|UW==+Ae5 zRw2|~Sap}bFBRqLgqnx>812F+`r;|E&c{@ij^O+gl;{)#TMwYmmA?7XxA#bt8S)GP zBm~Cl*9Nxi6v7X1vC7 z3toVbp%^^4NE!WZlP8PG#rAz-hR-=hZ0 z=6N*SuV9HH6K2+#S;!;lt;$wQclo9^i?MeJ*`Z9S^$4skE^u27Z_>G@esXADPo$@| zKLh)8s=>|qw8?=I<&zLTfm8k7LFh)wS2nMq1Q5;`!0IJesR$m;g^pLEJ74jCf`oNg{b+C!TL<;^fGf&d)p~2 zbZH+y$(FA?{<2jax}cMWzeh-u6#)Tg=36+E1YFsLW$W%ZQybc$)HIHK#|^PF-4do@ z$?>8Gl4E@iWZ_agRPu^5^RuK7^g1U<^+`VJnq;gZ$Yo zoz%T8?qK`z?oFKKIQ3HqMM?)AW@VdVV0ibTVV`A>;zrbRuEP0d{O;1Sd#sQc@!io| z?k2SP8*MKeu$>+>8k!crr%e3s_)hz39|-&A_*WQv3V-a{W1%oTz>@2Dw>e4K%ugpRV5VsQh!7tZO-_pHZDt&(|?2*7Q0Xf6N)FhCz5!2j0g^NR(}hVMSA-gNOOy zaefmawr1MVE>WY&Ax#50AKBk$Y#yDU|4jJ8;yf-4@$yFL@kW(6nBKN}%S(LM zriaUr+rDFMW9z}5lh?0=k$!lK?d*qYvpp`OLkq5VISb>^m!ORH{WDpJi|r1G(jBXwg?TDMK#N!ZwV4SK|%ZoaeSLNBksBDTO}uTJ7QaSw9&~iPqhT==084)%?+2aHAH$B z5bRVi#GH`@lk3*bawdCI&C^tN$qNK1hKmMkoJ^-XOn)j;=Tzw(yH{fn?~_jx#<(l2 zu3+{Q1mR29D@zUxS!UVNm`U;HZklR4H&bjB?GpIp-CYXZi(^^3)Lmc**X=r?*RxI= z%y9oy;kNXVODtod^>owc1Q+Y_ayUxIY53Qs2E73 zCboG}<8)H-U`^x6OXs{MVpI3iN^6q{-a?OAtWN*LNZ##`4_d{80b98_4%7!Mwkj)< ztaPFsDq6FWT!wF#hmlA1v>1lplTnzYm+d5%EvNhpmROJ(HUyUEp=8qX&Scfw>`d_L z$yBQ<%p`>1p%0%~OSu#_#*N-u^IhHqYj98>vIjzjy{hA|BCHyg{hQ zMz&-r_$;&o|7b#ZDWFH&6Y?qM2Jxe+;C{aQe<{*LdwHqiE+i$U|6`!olpA`912 zS!H~)(feD^hXl{Rd-C+h8pTs#lyy>7Gz`IYNxP_3IW(48?n*bXU0s=7d66oR0SA zQ^md(I$q*0o{USIPcaoH^06dDsH@w_0ffUckX~QL@$VR&I-B2pMFH;)dwRS;3pQ4sU&|qHe~yi zOkIotY}ZA@;-HLGBd=Y}gDP3w`McatIgB5sxb(^4k{zh-yQtG{j z**#KlXPKKlGcuX|vh5|ac2Hu_-4%EcWS3$C1RSA;Ig24{M|-V97bx!X?rvD!C|tS6 zJ4MY;P#74kB-)}_nPJuNK8g#3gz{4^V^FHzIc-tVBIGU1Tp0VqrJLg(HJXZy0_Rqg zCRn869X#3b@|fYEa+BL5et2Gh7>w72Q8q_7K1RR{{JNEI(7(ET?u|TSzz219URPwF zf^+pWG{_~TVp&nO%Uv~nzSPFQTzyn3=$0ZOnc8eJeGq17T>@)kUwlB2UX|VX*V}^UdH+JoB zTAp5bJc6XLj9%z^8Fr?fcdR_BPgVVDZ7rKM)tM7dceAc5pHT#5gISpiablxsS>?9Y z4%p^-D1EwOT8+a4da_WSb8NRLrtj|M(T(Ah$n>~0vgc_*!kM^q9J;t1%xv{1d%UyO;M8+EdS+G3+bwD3 z*k&2~7j_qH-s#9jBH2{@7p8QzS8e)kM+n@%^DxGL zu$WHVw%#5>ElZ>4E+!f_OXR?C5F<_QcSoI6CCpVM$Fh6pXp6TcvoYRiYQ1rXPv}Pc z5F$8fyKjqlLOP-WQQ#C%E&+;^Z5@t8`5>txvO&$qcm^w%R%DvI;AJ$sTHeAQ=>|@L z@q?W@Nr=A0+F|n^z7Su$0#AONf-3z0A)NQci+S-U`uZVa_Tf@Tnyass3^d76WPt4B zHDKMWhPb*i%}b-^>zvZu93v!idHj!Elzx{HY52C{;X_rs2>jvYF0{7@?66EjB34!L z<_nlgK^)y!A7*+rO6eviBl~RRJ*u*Fx<*-l_hHSf43GZpfCwVUknU@ZOl7nJ+s@_= zW(PsEIz$4wCZhXA!wt^zGdI+0U*3Hs==_}Mbc(kBwhdmSyL(H{!PeT^Iu)Ikl*OAr zER$9z+&o-bf3)Rd`}Vf>p6=xC0>X8};=>;9W9=#y<4U7G z#-Hh~!pX?YDLEM(A$i@8twya7hYaV599?+6E&FKG%0Q2|oV}R~!wmb537a{PI|`>L z7$)vEi&jIU@u8LNhQjzp2SlH?2bZ%gWi66c(II30+6)L?cODz+N))_krcB#+ovB1# zn)?>5sj=^Yp%(08BLV(utAqWAB^lB5&PAG49R=bdR;pL#w)UU#x@lt3byOO`VREh| z^mHR9kBXg9G8W-lajXb2b6`Vf!3Oov`q(%fFB)GI1N3c$i8FGHH`ROEORD+s#XAVU zslp&bhN@!0kwu8wU5_*4bcXQB%&IMc^p9Cg+wPsZqVF7r$ZM&dxCMj-@u3h69*!PZ9G=>9)K}alX3qyKigr!5t7T8;M)E4z zl6-!F_bVG)M`ZGudYQMYly4J1vb+fiwc{IGFcP1Y(}LD2Pf=@-UP8A*WjbKMpG*@E z-sa-nsbRx9Wr#9+*5O_=2fFq)apJ^(|NOt68hL!DiUU?uGHxF)&&j+Rp>*xGt{>|o zc%_@Gb_ie+#-45x^Gv~G(&6aH7sS0{AWWFp+NF*!(WTyYw;_t?G5V&r6}x89tuXi4 zNqJ_(5}r0g*Kx!6yCGs)(3NqpSNw4hII|=}@gb;os!;+j=Xz)8!a%&A4xMZQyVxlh zK~j*WODPh+2?{v?jfqKb8Z6W75@yv|2suO%Ypo z(yOIjY`+g`CXC8^%_Lys^oj0ZFR}nw^XP(Dc2!Ncl3Is=*0l(rG0yu@#H(0bekpwm%-zV?@18bOtKJ z_hZ>}XdWMYMgYXg-Q@t)XNVt*L2gvb5&=W`m5ecq`n+p^G?4G2(2^H+&}~5ws~7tg zs}K3o9I|x?tn&;Xfn!QPeb*d;E`Vm{ATi7U{>f~Ei1Xfg`72VNV?dv&iDG)BvHVaU zU>W6)W|GgPStSO4H*9Ht7`hDm?P05#gzj6XAF@Lo3 z&xd5kHz~e0tZpWCL5e09a|D zGGnJ_il?PHGU7{<6Ho*RD0Gu9e@$kus;G9nTS&e^?!rJ?j+XOGiL7^878;CMb)KjS zRIbCoL;*GRoQ9!N_vtL28)~Gjy3Z9Yr+b8On%=)SyfAYw}fV!UZg+s-t z^<*#?Z-(Sr%VHI=oPN;hZoq=OhQUOD+CgJf>b;o{-3Zz0L=nTHmz8f`01|^qlU%fg ze6D0gw1P@FhxMTXU5<;?s7wYwG6GYy=MTy zJ>N?5Rl4?lp6*$&K@eyS3o&!~_hDM8BoonA7k3;c(URrgI88XaWdd1kZV&kaJ7Px+ zyxrLNl}sf9f%o~@mB@1MI4ALtFYqJgP_uVvWFM*DnsDQJP}OPSHi{q%*2fz%ioVI6 z~jDb!7_X0IyL%O$+bZ_%qzWxVr^8-gBq$kUGrM@EY z@M5o+n(V4M(Zwz7I^5XVo9J;5zRXL~29Etb_-N_+b(*XRrumY(uVlAz-~K&I`qw_1 zD!NZAh9`GFXC%H+m$JF+E|TT5hMtk}R)AQx9f3rl?D5ql6^;97VWP!n%hYtPHhG#u zXESSPa=z?Oy>7Pq{b#07g>$BEQo9C5>>8?)UodJDd~ib5z_C4F@b=$Z`hE$(Q6gX!~xn-Jhj> z>bSJI?{?3m_^fW8v#6AZWc$$4mD1oLlt!0HPF~saL}_8%gh-o!w+#!kV0tnrrr|`U zl-8;9d5aU#ye{5Ci*|>KQP$`Q4$KbsitXys%2dhQvLV|~vC-o*gkBTBqMuKrI_9wZk}6xC3Kawi%Ffe{JR1GMpOI{RNbZLCKs_*58l1~pUWeMusPzECV zV>GDrKTWK4g5nJ!%cZjHfH)^8>tWwsI}fcRf&wT}RUV0Rwth=rCRS3kr0-WlyK`Hr z)VQQUsx7Q#+kYhj{ zar|mK+Uw9Mdg2+7WE|wG_(YZxhAXSlbWX!7juI%9RmM|oy7}(-z4D@@m<;kAv@z-m zx+{L===C((HtT`zC~iih`*9LQcZLX5PQ(-Hbs<|^vggo*r;BiqCIM)rgoYj|zYb2x zEJG0a5SW9b4H?Vmx{$=Vq04zM!^ao{CEP!A`futU8Rt;%kY>^mW~B_5Td0|^+w`pL zdQdwuSAXTGp(J0~ffRnmD{9ALtB!+d^Xb5F5(6j$s+OJXKn6xT*c&&43k%r!`+lUi~2SkIZW!90(>IDIrS?UC25{CI`ELR zxl&PNdT*cfmScEW!P<7@i{@RXd>iZr?f29*~a8V<*kaK+W{!mw+#==`a9C}N%VwoaT)y_lp>xl z#9tibM|zJT@XR~hH|(5&)xq%lbm;mxAgJe0riAP(t!THOM_Cmo^wBrwvnPmp+2uHHL#sveppd37t9wiOYUWHK59$11Le~ zZEIlRcV!b4i5|#mXn_E96+@UNO=-rMI)Mg43m;jWCMDwhLt6&Zps!%4kZT}dEZZ{z z>3fOT#1U7)FP|+y&hEehvprul!G$ z12KTVn(L7ubr{g9lM4{207r>U;Us!O%fX$a4(`CDzwsVJew%Z9_}|UV$CA1X$Tdd; zYWCqNwyz%t5p2kiJ^b(Kfd1W>Lo2DrdD}M#7k%;DazAHQ0FyB$$4TU)4k(7MF_x@B z7TQCA=1|9X{-j>^ZQhzFHE{qxas!r#5ciXvvZpQju*ki6i8G~U0k=;$NtUl%PUkSY zQFHS%i-H7-U7Pm7XFUPCSgpuX50om8nP7gGqyuwbCD=U%A)ZjI3l#!H1w>L`AZZK0 zhY%7U{**T(obPh#b)3VQsErqTo~gIf&sf4?79dycWT*?Uh7~i`W{WFboC9c!5Vtr2D<>X=qCGkp$9Z=d~*#PAN0OiYj=Fb z3DuABQuI-Ij_c+mZc7{kwdj87i=DS{67Nz5z`rT3E?&x-K12XVrRuph!I%uI^H1cW9-)w46FJRD=n+S=qj}pZC~eds}wXkQA%KA)55itW~DqVwSPo zNG6)&)(Xw=OE-;kx=ubbJu}q52z*4K6TdMX6ekJtX201kB^3TSKdt#ULj%2mvrC() zJkU|DYR7)i)Wbz2K(2Tb(|BZaW7lhP9CpFrQ=#~f1SMsPUF@=q`n&1i$ZNH?&vCy$ zyVPbsaJ}9nO-fC3d-%i5j#Xs#Wc^7{pa(qv)B{h*mR)04gJpbi*LWW_*v@WLzQp45 z=Jj}3Xj3g3nm@ISWBlhOU{m(czaJ%;kv{~$p_S1#{QLt6pdGqk8VPv6=511;h<`7` zgWS&uAgM%>c6j}ts*vFSuW1#H^#jn=1yAyyYURHT#&725hrxKUqx18Bw>3Ke=cldt zBrD>sJfMi@fzaARRvAk`4&Hr|k-IKmwM2hw4f3&lD~Ng%`!r8_$KZ6p-_QPOZ)T|h z5G2`nP57dS9e_+-;Jm!50}c@Xt>9!UQ+TRnVCv1v1UuVwC5%GYDZ`dX1!#Tz%j$VG zoX_$LZJe7;ijkqAWocnVS|X#2j#A0-{;N0ea}ctpl+TVxJ#MvanjJCG`COc|HaqYo z$o>5dLVASlx=~&*MRlH>6B+`4?|r|dLGc}$eYL-1 zc1!y!*^OG8rpn2jmUX=80-C>W1~licOi8@*3-}?PfdUqa_)cot@oyTGC7PAqr~AsDz+vn*A0lw$VylN&;@f!@(ArAau~l`xTb|@nD(Ml=79=R*UnRZ6DfDURx$yI^ z%6r(Ie3RV5(RR1x6b3P-pyqkX`7M0lJ_(&9R0_K*0av5^hfyjSwJv5_kx`VbjCFGW zsLB`FJWuo>R#?e<*UHeGNDhnnD4?P9d=eJ}3a|nk1_v-q{TmE};S8b2pum>?uy_E| zZMA#E=kMUMXI1%_pAac^Op!oC{ToEu0rf0D;nqJwB$9 zN^JZ$WEC^6UKhy#kl5p~Z1NrV2>1@Y1M z*}n(_0G98#;x~v6-8f0|4`(1JW!b|>z8r`V?O@F8Z0g=s4MXlLWu18SQ5))kyN+Ji zZj?^v$^FrElqM1H-YnMimprxoa$!sax_7Qr%{OKKgyU0QF-@27))A%5Jo;DK7mKIK z-c8fbH)bncufE})6X4oE<)~J9TkV|Y^MW3DU2)Y(Pxj;9n1l5oH1%%B|JiXBz~I7t zeh}2*QmyRKPP}PWpT!n$T2*KE%S_SHE|t)OEH@vC?_NR0XqG&CA;tbO68xp;wY{Q!+I2O7Dc@@ zV(ak076eNQBDj5d*J0$+; zKzwTgN{T~-z;@7bVY$QdB^b_SRGrG%`-YnYk4XOTfqtp{&}06mGhe?waHaCW{#gT~ zSBV36H@dq;ZAIz_JJWUxaOya+^2rD)F;{H&NORqqQ;ZkTE`!;&Noz||N2%%LdN`W( zki1T|-(YJyY3e}H+F!_AG1K|nbS<_g@mhIqVQ1#jusT9XP{<&IOA5hC^LgJnC1bl! zPMQ4dWJ+WX6o{*?1lOr-`gOS`q)(hCmr4NQnYjfHnV zEc!U}53B*ar@#0Pcuz7A^DIx&yd^>xzS&TqyZ*lK-{#NXoi^(Kd=4dtTaNwki~my8 zKn48CvVZ%<{|ibV`kQb27dihmnGXIA*Z+fp{2$QyzY8+Z@4w;)f6)o}79cJFjNUK4 z%XR_|LZxDNDliz+eLo!eE!}NucNGj}9n!n#1Pfb*^_30P&dHAOMOd^h+s!v&SA+0Z z4<*|TkjLA>NKfzZn5>LTDSGc%N3SLR`FSngE~!F%vltEbt+DUV%lp?Nf^C)y{6J&ioL^wRi zRk!le_4DcJ@3nJ_#$0qMX*HW3tR!k49vog4^_1RIfQYz^+r?+ zmB$Ju6r&Zwbu91P*BzKZqzTuOH;z?C9QLv2dLPjf$+5vUhIzS^WqemVIJTv#;wARe zTl$%~_+&=?0`JD< z0eCa;=063gP8=B&Z?)AOdmzwo@qAPhdQqg->{yNV^U*R@kLgNhlkwIphY^1NzKN}D z1UMGYwBvUB!mS%}G?v^M9ObIv7ZaIckCo-sWFyrv*^lfqDB;H}GL~1M2N?Cyhs$#a zZ&W@WvE3`S8G|hZYczH(?gbNcHn6+${#{*|@YDw!525E)&rMv5+ceDnN_II5$CAau zn!%zao~G-t{E}e6Hc=So%Z*fOFy0dc#ZZc?>hzAf<{GFYonw-*9GKSa2n=h;AHY1Yq6D_ zH6z5hwShC`Sc-qT`-L|Caycg}Q;~A4Q%)^S-SO!+ACA%~AG#7yx0g3^cdQP@a;jcO zKH3|8{0raGg0&5QaFlh{)GMv2-UliRnsU~!S06o)?0LoLd4czJ#rUv-bm~cXa=rD1wGqi^sA4H|J4PXhy7y zgU!?eQ32LVsJbHkm?&>ah;75#8Kve*!|s{6P~Zc8T~y4a{@;H|rI)f$V5I0{o^2b7 z=vA-?-z#mB0+yzz>u^igm8vSri0Ey;h>Y`RoeH%FpHO9PKcsGcc4Ys&5%h4?QucXC zCFmM$T1yFQ=waKj^H#Kqy~^wAi%tY;b?TQYl)75M*rK6Kx1L&Pa8dNrhACx(i8xQQYngV-E zs`N3Uh~%4hovJKz+D*>)UdT~Ac`=otTHYY1x)Ca{o+(l&vzM&ZnYM+=zj-K46j4!R zHdGaydJh-e+W9&wv8N3-;lVwk&TO?MTQzpj<|tnA)BSbsr9*>qrY=_SW0^W+A?zv- zkKQOgbMckgIWhYd7euxCaegnPyW3mkbh*?y6S-o2iOIMdjSra*>}C&Yy|TPlJqIbG zfA8%3RW&*nrdzhbGBVt`64E9Noc#XmYhPSe(RU{ICucHRO`FWe4AVG6>cgEWH1aUo zF{6!q{EO>uZ{tRta!bOGR*D{bdUN%ZF+)#KtiIh@j>Yid8_<>Pnc2Mg4jBVbGiZ8T!tGy@F>$g5|g$K7S40ul?WKst(o zE46=r_l>T#s;1$zb?6gWF@}4dCzR9Xx2_e(j}iL~Ez6&n<{x@bKly>hzk|IlG z1kT{BqrB|oYrSTzqnaDgG>5y}ecVbLdwyUsEN8l5S!v_w%jLw|A9f$IMKxU*81O79P@J3U-?RQ`tV&T{WxQyK9}}Tlk(teKY?5R z#R~?Z``?eVKT?044mHPS$&p`wjQVtqH`oQ4?6Zd^u?-?{{WzMFZ|jxk1<*nWD?Cik zQxQbeMLa*C`WfPXUWA(qd&IV526t-g{ah^VBdiv(Fi*1K4ex5Wo;>JQ-NnzpsxwNs^z zW@K=?I2=qW@=tk~UM0k2*MrP*wRt41ID+q5fl`5u$FFtaRcY(I&II{B-#FjTW4m%mLEm<_$t3#t zUC(pvtcZrhS{BKUEO!%+y)o=3E0Z1W#T->%%BpXF9efegE9Ph*A1tiY60K2j7;tO% zh^6SDoHYnMrF_T#juSayRdq z8dABZ)P|Z&+xL--@2%iNISHgjmjimC3{*nZOZ6$2OYRp6I-)m+c7XJ@c`X=ou&2j* zy^g(|^Fg`?1v8^>6=aQD6VeZ>D3@q7w`Y*wY+U@L~jekT$m&kGF>J5 zF6wU}6L(>GZd&vG@HO35NjCV*nUocobeR|BEpaxs(D{Tl^V*6FFpE-J-(Zm{Ur+Ys z=X43w*yrQhnoYx$N99+N9yf^7ON>wq6_v}3c^ro=PuFta#JgE0&X$UFzhPI|_LE*e z$QH;vD{e~v%=#GZDP{Vau}bZ2@x|9(cF!~q+StH|74B;*91>Ryx^0?OqY3Is$#pEP zN4tD$|BRAs=!)~wO~_JLpvzg~8bL$;-)P1v5! zQ2Es1dTyf+_f>f8^coo)B3O-YqSeRh-79a=S5FYQuj)T?z18lwPoiOu8@+VdTzTK{ zs{stAH?d*G=*CG~{p(8=p>8H8Zwjz8IhX+F zB6@h)%*QgzxlB2ylP@9XUG$uANl9^}iPD<^v2fJmmBl&v>+!?K`fa-}fxAW2u_i^& zFdcolkyI8<+*_Y!gFMxg;ra+Q_lvlo5vac&GE`KtcnjsyC_wz z728A~4XqN8*lnljj6`i{R~@&{GD4w-m_fQ5m5fnc@2q2P!v@{5^$m}=Ig36QUr~59 z*03(Na!-Oxyx520%syO?%onGh+0D)xQDYCCx|0GzaO9kukV|~0<5WuIV9j0qtFnn0 z^&odC9v!wiN$d+`pIF_E&S>%b<7I=Xn`D>kuZ5e)pP=f1roBb{BH|i?lpT0hVAQSAkH$d~`a=qtl*bd+^%yl(xLd2M!Sx6>1pV(>v{sCb3|F~-qfHHm}gAfoI9*IAjH!9|Tf|$YpUyJIld@JLo(+pcy zPwrhVtIL=j+Yem}@2*Uiv0BdTziO>Tn1Ww>uszjmQMLFch(^OmcWtj@GSYHOzCP(h z5f&RUm=ip7_RfKocMPX!R_dr8kXO&E6MOSO*@0RjP6VT~VB)~hvk^UZz2(R~HrzNyh1_{|LMxO{JwWr3OkPC=%H?Ej6&I(1wNk_sc{4Q91Zof%Qa!=|}i0%B{%fz#wg-(NfUQ zA!Dv}{ra_<)B>e1-1gZRIQn4RRhea zn@5YS+J)F_s_FCq?y|WGdMl5)45##kE}MenczWQMkcNXOBhjImM29s-mWF3;kk*j( z7Xn1njpM|mN$3aS>gouw4o0K=eVpK3Avj}b02|I-0;D#bt-{rNdRt?Ki?Pr+$h@ct z{sXi#Gj$U(Q=_OllE0!5FZSS!YJJPqYJ!Zxu3)rywXd!NUmC(3PhpznlA8I4!nL*LGUsD1`4+($_EKd^;nSm608hGsy{59e{sZ6cHcv7?i^8|nTwbJ42Ai-KYtO1c8j_& zC?B5L8?s6yCnlTju+sQn{rQV9+-neP`oD-5FZz`%2Lck@M5nEsEkj<4Q7=d_}Um^=|G&rHN@Y{e*8VP=t zT!_)%CJ!(hap{LlAU|1GqE?fEECRivYX_MEfc*P#SD+-OFZwaffUqHBAsfGBz-!@m z8Gy~HeVau1s2>qbH;EBi#o()vyJp2=1xIB`us-WKRN~|45NXQSZ_z1fRJ0P{Wd_kA3_dhX6HG; z!#hI#56?j?*8XkQ`CTXeE7p1V|10Y-TKuev|<@oIuC_CR;bS zw~x82iRF%#3mcp^h31>hMlDS^)gV6&c-&1gku3Ny&2kyGMe}?|CwcCHr@4^SAMQAj zK5#6={)Z9)3LQx3+K-<=jfG^Pb z`RAXujqi7hxYP8Gt=KWaJ4(cl2>EUy|1tx=YmjLvcPYg7{vQUB?Kc<#d;J+JZ1VYg z2lMV%9{*j_{C^ub&i{K2+@#0$QM@{x z4o82jMmCD0HBq4NyqgOxKgl{menGeGCGGJm71Glm;Dp^>?`6MPJQ8=kQq3Zvr}-WB z`erFeCYbXdpkF^gi~``ZV`4+Aewp$==8uHe*uH}X68t31J2dW$Owc#f1^WRTfW@i& zhJ3ym91v3aVdK676ax}&x!zX0hIaz|Og zJE*V3{5ByZ`0BCY4sE{&oi70y_BJ{s3=Y6s%rZAYrKySpErBIIQGfIo)CTea4S@av z9b5ZbWoCgu?m#}yANCsn(i6Y42|#m1F7LP<^hN^gRdP9BEKoV$tiDIh4>LUdt#dno zjnp|{h7mN>%ebG8!fqKwXciRYa&)`@z+?YzqZ%Nf@*f4JxWvE7`Y$0I%{0SQ+%I7I z6W0F$lY#2{uLey2r_7en z*)4=bg+B6U#z| zEZ$>`uwxe8CkvjWSaHv2I^Sv7ug$6*B=-dn33dY@E%3QTnbRJ{KB4l+%QP~pkUoxvfr~u24@GW8`=D& zg+^r@p$ziuH4D;mEE#UnJ#90`_n+}8U*b{m?AVv|BtwCg_Ga3FT|r4cS^2BfFt%X_ z<}1wS-cMoYU}w7fM5A=~v7h5=6lYr7y^-2y#Jf-~^nOYy>Dt`q1~~>w49nnrHLp8I z3Jfn<-a+ZLu8zAJbD_jrqwIxxI7j?UofyNJdmJ6o2*IOMM_e|~Rr$Hgq{_qv4f;OK zW)Bm79x!~R)1A{!q{P9T{bHMsn}kY4sRAe&d+2d=)^U@eVWHVW5$!Kx4$pF(t|ts_xDrr1Lnz-!M!5pVTk+HI`&X75q_J!B&=nXvHx{@- z4u2k;(=L>MQyOlnlGfAc#rppI_O8OwN>MaTB;8=Pn7>BZB%lr3%(GSHR@a^l?Bd4StxZ-IyPK{e~4-efY=$t1H2GavR}D`pL?yH(ckmF&oRoGiL} zv>@)fT0v)iV|MnU{b31yWYR0mV=tf7@|~8N;F)ItTkpA~3*@Tir&Jo`*!$oeIg76( z63>7d~mW(jECA)dC(sj-Db<6P!;ms?k2f~$`b`6&c>y)?MA3aq0G%`6OTYlKTiV{E7lozd5N%JoMxAPlnEJh$>7 z{XJ3V>GDdQ;Q1HQj-8gc>2>}PeL|R1)T|+8&G3=C(M`_L+!e;^M`sI9#^_o0eFTN= z244qBSGyZ@-8xY;sEwc!4(PqHz* z-3DYP7X$QTsa><4?g+)KdVD1_Bt=j_i6rVeqqE=?Le?&Kv9$hK6&tuk(Zh=3XkC-= zIr{RXLkF@)*-ZUDRFm0-7FjwSZ!=hj)m^viiCzZBnhIb{(SGHQ@es_xnQP8Mmwd0Q zm=<{5=;XVjJFzltso@uR?*Pp zZ?{2UEdNo zO|D5Q#AWJRZHS6yiKI6RCBNV8qj{!Dv6Q#z0c?tol4p6tuN;QN+0_vhgO;X0t$EnK z_N~L}35&T#u18-;x$%_#wMAU_NV%tRb+zB;Jp*DCWweV61Sc8XbPkPqU!`guu6E2& zyQc=$H*_9D7nWkEZ}kxU7H22-bPGG8KUm9*>@(I^dA!?;E-XnaNc7#QLhExNHq&N@K}fWp%MkvMjer7 zxu*esc=m+nxzf(#Iu#{iC+}_Da4glT2z{#KCuiSA89qW2@^Js14}zj~8r6eEa~$c8 zGj3^HpQz82bJkQbIPkiSA^rLJ3+OY6EOH5Pm(;0GatYK+p?Le1z3O2mwA1(N6{>k` zQ-WjiNuLwYYXA2p)~C-SUTvHjLzP;x znA~cG65$V_JHwE<&J3-Kr|wE9ZXf;YHJZkCUp_Wt|_GvAI?TJ2D`D?DN3jMfTOB0-Vv%t}7JzxA$M9U=c(P-&&IqI}>bG zzaG+e7WHU7nc4sb5w}S`ZXe|g0qH& zwG0FGu?Gh~sKqFklsz^LZy#yU6I9H!IX$vYZ}Ldt!9$A0vIFz#LT}y#oPGYUsf6q@iaD}WWK;Y_t^Fl|K@`e4t%lLLO?isFnUK+BO zi8T4w$Uy>SsSfTiILvA+E-9_gwarrwWw^>ByigD#{X#VyW^VLwsnK3sDMb58X_I3 zO78*LG--kg(os4Hi1ZpPfHVULA}GB}??_cTB7|P0h7N(yLJRRNcexw)J==ZGJ#W8v z?m7DpejzJabI!~)=Nw~vV~lT{HKD#2RDJh@`HJ@F*i~bNL#_mr{G}z8TUP)b?Uy2d zC*pUp-b-IVJDx(8I&_x1sw%1<`1cH&w^ub&u-zoA2`FE2kI#aLCDz#2W_ z-WaYcJ9-v{(ylMuh98_7flZ0S{4K3wPC2Vd`S2jYuo22R+JzuV{VeSL{!$<5^=8Ph ztE;&4p&&)C_p=0D-ivY)hS|}NNJGz*_~o_C;EAVy<}sTz9fcli4>Ift)O!aAlE47H zgVTm33V(+Cj?G}X@kkxGvUFhX$?8xj|}fs~>GAcFJU{xSmdbRgfCX@%H)Zn+Lp^F+0*n_D;X%v@!hf-x zY?|!9aW4FFPMqSR`~p($hyJXZV1jc1(+wgXc;p5-;NhGDSpwuXcJa1>Fm^z4ok|}mw+Qy;ZSN!_0w!P1*I>)BB!EO($T%D zqd@E~WVaFK?R`w!DP}L=ZB1#YhiHjqHtwJIT-^{G1PK*&8pSCmPCk)wufvFD+Qca*1mB z$lOhY=`AfKQ+jfMJxPp8mqE!HZ^jCNh|jO)Wj@J_n_rCz(ojP#=K99@nAFiFw}?qa z;(0i0f24X26Sk}3YS?Q7Y_d~y*V@3u5^Q4?7-9mjw-#G+^dE}f%T{jQ;`y|2N{g3& zBth-1H+5Zq8R1+^(}j${TtmHO@wzv0j+2f~=UPajFURa^)<@8onMYKLI#%T)(2a(J zlJc2|+`SAb6}i4L=C^ux!bIhr=Dl%x%A(;ptqfbG+*axX9rW}EmM=+UF1r$KQ~C~C zKyIf!zxko=JP>20zSZQ$Z*){{XL;*|rrTz)he?V;#1S$+hc@3Gk?vFu5Sg5W5T~FU zmuHtrjlt^RTl#+T!_^`xjG-0GpYBs4J_)oT7~&tv+z@xXiQqk|$Ja0?u7#1e3z0mH zI~od<)VyEET4?CiG@Fs0)t)9H!?;PrB+5ty%IIn1VF_g2JR^A#*j3&d76+%3NrIir zHcm&I^S-Qr4j0o|+Zi-nD}~y8W+6T!(d0*8$kkhG+Cj5481^Q(z?K=BqlK&6YMtF< z&$FJOgg1(_hpN2{5{^BJ^*FWmhF5pjVTFBan8L= zhbOaoc^W)J_gTmrIyVQ-R z*PX$fNR@8;d%c(r+j)he+!BajP&_3CampoiW$@M2F$Jeb2~l=h<4a!FA%*M@R?X%yxxhzepXKC8L;oyq|&n<634|b zz-QP>dxm*FarC0ROooW;=0+rg-hFC339h;6QZRC~?%+bmBVYtqK?mfynZvTJAtN}m zb)G_howmv?P>_NCdul;@U#(l&Y_;K8?K5FD!OA1Stx?VBzLs#TPR&LjmNo87d1a}g z$SQtg$=Qn|7Lm7(mTEtQ(-ZfKs{h$@Z6q)Kf23WM{E#8Z2ukfZfZ^ zuCqWkwpb#^JLyCDrR6S;C*J}kox@0_7E;)qbef2c&IQ|h17yaB{D#BsI!!O*T&q*8 z^ajg_YeOsar~;)M&B3`^0?O3o!BbZ}D%x1WuhS6;qei)$^m}SlrXVNu$0ytu8gaB0 z-E0cMh7ygDjT!0rlU(fyBl+X8crGUGEMC{2C(&Zmg7rn`YxFUnUg`OMsuS~#t?-Ag z=RAu!7^Dt<9!=A4Y%a3Pl~Atn`1#$~yM<1!x7p8BJ6J38NN=_Tp%a^>se4i!h}{nc z2@k7ZcO0qPc2XGXPN+S^-HjC0Hgm@~%dp<9SSOGi3=G7b8FP2>rPm)_F0O??iA3HP zXrL)zizKItyK~_}{{DH87|EkhosWry3#fa7t;Kr|NUjeU5hflgt@!$Zt4sF*p!O{8 zgeN2>*}!MN&}sdSiDT48>1{oj&`3fnIeJjQzvQfmqs@wYM_Ou6!GJT&W6qa*&k+e2 zh87_+UXY7cuEKia2;%ZY$Lamdh}1Uml6nd~bzlz6v+xVXBhU{k z1s#s8EoH(12mFgjg+!zNH2E+y?lTej%4bf4LW$ohC<>(};X3Qc`-IjhjBJ9S!GfLl z$H+6>BbSl40?5gL6zy!Y^ZTG2%Fjn~B8A6F%c3vX12MQ9KjoA8K zq)62LtVX%ZF_^*fgD5xtb`CPNLZp{q=*M~{zoa+dL?T?rfntE9!R|)$^oTQ}eB7AV zTZjOYBiM3HbA6On^$Y zzIOR@nbdPX*s(mN$4h)QJG=2ld;f+bN=Bwu^zHK1eogr+e^|FJlM3sf4_kQ(c=Dqk zW&@53M%%=LX)2sQE9k}sHpzmu#O66s4ldcSLELf>>t=w!iyVfw+@QJ!1 zmS65DbZTL4OKiL%QE$qXV%+cfn;WI%LpK+4pdxheAc)~3ykmKG(xMIlOsAELmn;ximr3Px{ z&*U+Gbm)=BZNi!V9#0~K!FKV!Q`|MbH#6xhr5D!#ZQQA04Qc_tiuH0TXaz8m)gq&b z4>0G^!~m0;eoCZLPvpe1;!*#7CeQ@TneRK*ntHaK?bq%Y_;Oct_NvF@GiR4|a#=-$yaZX~XI7}05O#)S%>B0cC+d@AJFce4b7 z0nR6^r=O@;-MKxcWv`(0UbgVA3XXer3X9ET2EzM;qS(#|0u$<|gb1c87|EiZo4?%} zYP&71{PFynZ99`!5Q`>vsL{1YBacsf`gHqBQl#Sxy?XRBo3CDt5`c%79Z)9wC-;7E zfghX&EM{`p#xm&b7T^k-AzwO~ctCbjD2%h!IA8*N4I(Yd%UBcURy=bjO(zUzRB28C zK7ZvQCj!e@Rgl1n>noeMkm1`h_Q3HANX^F|$gn>19FWAa5`5Sb4mc?p`iI^u==;Jj zvk(Opcgb{rUxW*{8nu2vrD15nJ8IR{>8OB{JMVK2`21?bk_==WO*2uI0dygbftOgQCiY$aWv2l<03ajc|SLEO}@ zqXGDA-BeMyFlxXYk~e9c247bs6b3kP)gf(V91Dvog^*~MmzX8nlMh;wwS>;4N}U3! zeb0)Nfb8x6Vnph2#>27R%aENPU7b;QiC=G=gr>7ebpl9LVo>UaVCjt$zHIQO-X58= zdvQ>50Lo&YvBIkT6Cc~qO>pO*nOZeH_=c2IrGfc%{T>0X(~#9E;lhf`kFk{_IjKqK zJX^6qslAmafEmD+fBQH}rkglg6}X%C1+?A)+qebL!kfTsmLU@rK+y{xfPYByC?^Z% zIf=sp7bs=uwzUZ``N>Bsu*1zs>}|+5x2C7Vx7~jBg5|u*e@HksX2w}8`*>~N3a-ss zU%Rda0Y%f-;ml9Q*>>ovYt_3oUaY zS=R^nonHdfFIo6rc67D`hdMWKJM~BV4meKu@$Z5rQYoMODZ~0Fe$k(H>+!`K-6iMY zMgDPx<>be)VGqUp6UcQ~iPk?d@x9t4$DZLg&kwKO=+vRhXAej*z?c*xx@FW3-Lxk^ z;u@+y{%mrZVE1{LjW^am^iAjSo>^PpF;wrJ;7ARNNfdBoc`15>BGL<__Q15y{rX!j zor`tbKE`K1UO=FQI7?G$3+-wnLtohKb3tB`rYbYNJ<-*4>Kwl^D)@;0CjxVXsvMzoKce5;Z?~ z@y-R=50o44)X7N$TCT?krA68~Tkr21`)Dj_TyVmsN=|>Io-|q3%ob}ABERmuK$o_! zH=xGuUIFGA?DX29^b=A+OaHVydB0hnM<;PIB1H-pc;P>A&@{c^!f4h4Hci{V{i~v270ATb`Gsw4}!C=-#@5CtyE2W>k z^IQ=p_20w^hNIc!TM_dc={1~c_x&%7S^Yr^-;;m;gy{VKt+Z~F8(W!QRce5WY9e1p zOlWSkfV?yupWH!nj-iqZUW*%0J}$9j=$fj`zu zbotNP2ZF1|Au5C1JThH2DmbIoyzliB1fMnZ29k&lzOn?FR?1r}_^C<1=A;ViH)S!6 zQ(s2vzsWTw6CClTO5J-U>w4csna4ChDrCMOk*Vc%z$y_H%?Ig`{DKTAb#)Tt_R_Y{ zwCC-NOs6TnGkY|pcIseLwfbWHcNwKFrOzd-dljNBFDXn9LZi4Yc7SdX4+k_hZY?DU z^Dof~?uzVE?eeh>rY3hfcv#mcA0kkvq;G9JR$vxmp|E2_GweLw(y!bvxFch zpjlw3F?|ke>T@%NHY4V69tW6$nEN&4xqMHAtOh?W^BB0>FOS`LJk;1XT07o&sWtw= zW?|Kf2Noo>Z|2_7`AKQOYsfQ~bQbRmve!g{g_z0@R;~rZ?y19rG{WDaAurCz95%=h?)>StQQ*(02P*5B1IgWjnOdz) zTm8FEl%qD<4NX2lawTGHy)Hy&C7wPEV2^x{&#Bg2(c5TyD<*N?hs!Hl&pL zHeM#o75bdua_}i1L8un~R>w)8`LO6!`+R9Bu}TXDs0W+JQpZ^Kxbb@NP)bEzpw{El z31?YFUf$m=x!&r`O%?g9mFTT2nc(%M%$`!7^3^(wkCF@F?K%~{(uRrx(s;Ywf>)+Q z6qN>{VNJX~?-4ddV^Srd7$Ao&%)2#9-+kS<-y51mm`EXKIzz(*WQrHZO*5A~HdN^6 zW=U>REq_$u+`6s-D$1RtHgJz=ta5lZ-Cy8semXky&Pr#h!HB^`X^Qz9IuEwkJ0+Lk zQ6BAZx<_IsQpD3?(Dw`nb}oB~8Y`Zd*y-12`F4C1`mPs;W_i6Hdm1d%-thR73=NA* zi>K3jf$`;wao&zn1O3A7EICAWg2BnNrezn*HdVYUPrm45PxlaM2{rNtB@l-3egWwX z*q43)$LTG-m{h~W`1uV7fuo=Cid-Q0^w49$c*bQueZ~QQv2V}YmXxkmy_5BVqNC-* zauf#{fhu=Vww9Ycq5t-Zy3vZQkIwi5cYJ+lnRg$g<&CPUZnQG=)^Bd-W^j9W(BH;%OS+uv z@U1Z7w=JUIHPWoExf=UIcZ_V`W2|B|y-wB6?1T9|yo{lm$kFw+YeUg>Rle-~rJC}L z{=Zij>^Ij53g*8F4!cC}*q2UmgCGvqQ%?e&w)Cdm;0>WSvfCZ$9C(G#v(UVi#DIF7 zs35czh$T;)xwr^`*C9b-T0;6l>=mS+2yoX#cWn+W>P1}qU=|u<+y;*@cBEx^Pg+jge1eUkE4c$$(CS z-nT$WuJb=U?}3lMqywhXUz zt1C#2J&uaT$+AUX`i;CA-N-JvjPXMHeTSH5i6$Rf2W5@zzJQKBA*3(AH!B>d#13$& zy5Ik-!ZI{y&(ri;IB00$Sz^=!@U5ZvS9ft`{_(td*2`sz!{`8~qelsM9Z{oKqtkIT zUd4wm>fb7oAM$tu8O8oD&~lA&p~d?Zm?D_v1ldFH;Ro-eSF0g70;3}1a{$4mqzT5C ze}m6&eT}X3(9GDrJY}AofU`fgj@yk7qRfUH|{1Htac7Ex_~i zBfHPn*y*nfKwn)^|LPa;_9HgB28~BjF96Lf&>8Re3Qh2aB6+|ss-7qOxUu^1Az?L7TE?5dmpfk=25fyfj1A%w?t~4VLsF=PLFE|-q_UoE|p;((6d#&;;yb) zTP2%#w&`A7#ck$_b$|I>!L$sWs->|wTVD$hOfo~UnG;+ggg&D{t$mXM;D{#)&W zajLe8*$X)Op&Ex7)WyaoZRoIel{sc)s*iW-b=RQ)eU4B=-N3z9qMNU2TAcJnjLB=n zOg(b+I(^%PqdvwYF7{Cdy-NCFyyVoYwnsO}Ium0=FFlCMYyi96LYqkwR~MrvveQR@ z01Ca&Cl#=czhR-&3g7qg9xzOGkAA4baz45QhHli195NQ(z4&<9$A0Ke(L`tAGv*39 zHi?HKg51(KhKVv?G)t`5C!!m$U8(AOK}pMzdX1-Ma~z#q2Q|k&>!}f?mDRc=Rz`B1L%88Ts_)=Ff5)+O;iQ?F*5IPJ^^uLId~35Y(Fc06kfpVhmjis>9bAMV zQO^lZ=0*xSM$m@m%lgz@UOg|~Hm7?-(oFn~NseXkh9eqg(CaKE-m0M{x_oieQ!k_9 zbh=BXK_jnWka-Y|bE*UM=hQU>9USedEo_!g83D%d)hAZ zIOD3xfxzvSUqWsE*6TlP!k!4`NcuDW&b!U9KjHCZawj)J56qvc%eLgnekf2BtE9uQ zJbZVJYYMi^8@fbk^MNxkCNiT@&>}%|LE)KmrasXY~fZCzg96Jl&!3hUo z>lFLRzR`?t54RmYvM3G?f=zY|EtpdX)(lpGk9wf3q+v+hlW5G?7FJUwp6xN5`LXm zwzknY5V?}__>iVe^Tg-+-9eTX`T^kM5@&)Nf0_0o~M$TAM z+qXXX&R*)ZenB5~Jf}?Kv36TWLVAL*{v4v`u_uSvcpo?4h^HEWHTtr5o6(eRN)D04 z%Ha&#Mdr&R_ghxxy-{C4CxLpw5kTB!!TYvS+aR~eJ^Sh0J6AiX=@yithiCeB%-s@IT%O6P>?DP4P^M->JrjQGms4BR;%gdd+h=n{aJjefTD(IL z7c6GVc|$<}|CQyPHx_zvsaggfm%G+=t@|cwYjS~|>;~cfTl@K6{o2<|5fB~OqtP|_ruK;1Q%FFA$1h6RWlCI)(orO7$DQFpTj z+j5n;kUwFN)p9in(E9kEj}flUNoed(Ygd;}F@wRWaM`|dA@sRC3N0B!T=H(K)DRH! zxRN`|>rQ{ubFwqC)R-CJtO3{Dv;1%Xu0@U-O^#U!SB0%sW#sd$vGX%ZT+-1N_~?37 zj8n|WBfqxpO04%C!Drmgdo$LIN&(5)Au83SF53KKO+C@kr;)uf$_muT*X8vDtAZW+ zqJo{acY03>Usx(pw&uRRd!;pzTSNT zo^>MesrWeTJrrZJxbo&#cp3DszTf|Ujqm4kh63a!eWaKNcH9TV7aIY+^I={8yB4V` zV9ozMBm0-pj{Y=3`6p>}za1}$=%IQJ5I-A&INSmPSLjkv;sr;i`Jn5cAXxhtc)s`s zD!~dXPFYQ)2_!0k_@`BpSp%>SBCAWCWXh1>GBm zSP#G!uX+H1UqCDi(mBZtme_|*1wu`~z3!8F8^{oCUO3tZf#P{e`|_g@PS`rcLapOZY9() z6b^&g7&v2+&WALB?dzu?heFOSUG8#uLs)Zp_TKeAb*jv@}BB)>W1T&v9B^@uN~ z=%$HDZAZD-NsxPIp8ibPT%4^q6yP@E4ao|l^-=$oIsx3XvKSAo3NqS5YUH(7EBB)=eSol#l+bz zxuV`0$yaeqa=N}@LPpLezAnhSx#F-Qqdke(5m?yUHBZuYcTj&kNL5|a%UJI=u;X|x zg`enhY($*0#EVi#1h|yDPEvk`4obovxUJp4<73iwu}i-!SX44`CpPK?HO$%1jYO{5 zsMnWNK``;WflAi0IMuWJ&RNd;Yum9ss&C)&hk|u=Wxw_q9VHZOi@XhNa z7&~1)kqJ`v$x~_CpXvG>z(p!55^kq_q|WPT1GGj#JCi5IblrW`_ztyaoU4objX#FJ ziV9SXf^vMUbZJ?|doNmGQUHaV5)|bwJ(I^L#C3MtE0#B@_Oba^TBOEKfP#yOn~5bn z-Sd?6OI+32<@Fds3fetc21@}%EY zNb!(?WPb_BN_Z3rBRZH{$1hM}F)#V%e%~ z2WX1v=ca&ga-bDd8WRGV_e%mr~QjwLQUk8p5XT*8`~vBNt!@l>#hr3=l1y zL^#5b5emO~3s!syNREy%-lO9K(i;>pKR~v6SH6Jo0g{H9tP>c;?4O47?5Ak}qHKU^ z_+LL9O9IIQ298yY3^B~^Tj((Q$9W<8Ixm@uX&>lv0^e~5GQ0HW3ak-6 z4>i< znz#{^fjyYP27==_3Uory93ft zm+gTK<0c2%Wh1Iy_{i(V3K%+kY(%JOP)B&hh3-O66<4&gYPOD+zLbcQMt*#I$$%%0 zpkC96Z-yk4fi0w2Z!jdyx=8&%(vc-M?)Lb?Xr>v_ZST~>^i@K8seP0K?xI7e5aQwz zEki`w(t+7f6_d@2<~)`Z6&-#QhYY8FsHwkHm-x-HRYHxGJ}knpZQ&%nsU&<;e0$+@ zj}fumhQQ74`z|6qrD1FmIdHnV=RK$hGL!f7Iuh;YTXReys{`ZuNS$X`R%@h$;k`!H zszE{RQhm!3GS-?ztHbKXA`=R;S)X!msXmk}B9D>d0(8 zGt%!0)%FU$d_W}UjOv0_`S`&5q-?t{7cr;)&TdKjLwh*oS1z5uX)phqevbdiMNN28 zuP?v!unoJS|0<3qN~_^Xsr9m|Qv z2O2yGwkqji)RH{tAwQN4oi@rE5H)%|ghyKO<&fECZn!{^1s8`Ml4YBCX-S2z!Qq_B zG^Pilbkk&)X(Og9>U!Uk2*w!K&G8ERF0Q;3KQUs{X+D3Kv4YYG>*sx?gbpl4hKRE- zg{TR62aT_o!V`sZET?Lz)CcW$Yz?J&*6niZOOhjKqD?EhP>=A&h7w|1Da#bSy9s!G zsrRfO5vU`uYOwNRd)ApAKCtSo$di$1$(ENZM}M&S=*QtKXK?8 z7eKW8!Bz))hQxkF7XL1U@^}aTDgOh>tJ4%flx0FVY*GzJ0c*JI1}GqaGEw&$3z%>u zd-6c&??M`6w-4qIK?egUJwge(o&ivga3ta5NLA|hmk5rwzcTDF#lW_DNe-9l8-N1< zfbq4~ah`xO<0G=C2_)mDPhz*9;0_#sAXO0buO7cMtHkkiD!5NUA0BLqbxw@*aM$4O79_X?2<|R{03o;pC%C%@ceilo+vn_a z?vK0g`mv^0KULM$-PPUIRquRORhGp-eS-=B05Ig`py~hsPzP2vM}~vF6}vm#VL!xf z(mHM$j+SnoCN35L33EqN3y8eEiIs)Ag^9Vh^N@uw002*8qp9PjqogQg=4j7m@(&H0 zm%S5=8vqa%^KvpVv$Jr6m|9rbIEYZ4wRTcLY|KTd-t#JPC^<=4Slh_?xL9cTC~KPe z*qI5MQ;CT}guR4d0_-i^OdwwNA01qUyhNz}C07Vm{->Iq3i2-!H#-rk|58dvNfjdH z=wbokW#eEqc27@FHcxIgM;9x0PC-FIb`CCfE-qFW1*@yKgPVyLtAi`` zza>B|T+Li;oZM_29U%WmG%iho`?5gSQWWla(;p*t_Vg{QyOX`1`auSkqu`qFSbkTHl z{P^!tRJC?=b9A+Kbb?4}@Un42luXQQ9R8{K7l)FPki3Jdn~8&&g*;S*3MPZi#>QL- z$}PYNg~|wU2na}Wa!PYcN=ZvYr3Ik;G7>WUQasRq^FkfX-0dwK-2TmL{=d9}|B?3} zW3YFEwG6dzvGK4lmvM2lhy1J4LN@|G60+txvP~eb%=*`P8l5 z{>k#}c)P*3)t265D?3-JXnJkA$#MO8J@C(HrUYY@B-@*dt(`T#_+043#y4Qnn`DTJ zmb_C^Qk3ewDw_-~0~JAjBqRhG=dDBpF|r)$=HwRqNzUTgd`L^+E4KUJPBV1WFdZIN z>GaAJWw}(s-k~ozYFUaj_~H^^i2xSyi4*!aSzl*m6?qvNxzuLt{6IGglvhWkERmpC zOiD`qAk6JA>TDSaCAwc~a?CWXj%XMaKFcuS7?wgp^kc*~x`*EQZj>!H)%U$PSG#`F zRd==n&i5IJQhA!iGJhnxW&%OrP$m+Rt8~8RcZjOIe;Z-?0qSaEWFJhSwSp^|=n?uOwqZ*KI6YCIh-=}Hln_5b z>XWtRcdQ+lCPwA7P}z0B65%AsZ2~to?XDV){zmo7KrjSXS_m=C+{S}tWarT#6qn>C zpgKNM+A4?)tf|oUobhq=rEr^`hWXPxOC;o(>a&*A!0!$*va_Y-!ym028EeeFiGeAf zCo*OCy*e<_d$5kY74L;t3_`w=2D@9#*(=rXBD5IAswis7e@bJGgh&k{uMj=-e6I1V zBxU6jQvw!z*h4{eo@S+9niux~(0FoB%n z6{JS}wXz%*k@nH#zy)EObHwEJlk42LANk_*Ee)*O!GmtPtT(o^Y@6!(O>VFU&-ZB+ ze0c&oI$zQvgq}gPZ2=M>VhEU&&->f@Qd?*DXSv6c7E_=^*)`Ag6p>(cTk+p zwO00as)4;FJDVb$1n2+>OnP{})>wbiFci(u=dZO2@Ya*aQttOo^Bh}zvns&#yzC>v zvY@v8)8Es#RkjPp1KBT>1~AlCR`;w>;6WH;m4#LZ3HN+!e?=pNPqG;s2jd~I{IqlQ z%?v^aK^C8=)LevXI-CbFyP>X&=7Wr>vi4 zzefXM2LJE5!Bt&rT0=9-t!w8+rM+-{8T?BQ zb-2|pP{5XeE&`Iq1Fm{&wY>i4;3%Ey)`cBkiJM7l#pv5UW`phvVWEMOgj3RE+1%n5 zPFr}0euenNW*>icCxm95SnL# z@`l3v^|y=sKf%9yc&E;kV*W#|&s9EL_wjOMS`}B zCNL{WnB72W?=5M907t8Ks1G{j$bbU_Fwo!5|G34L7H?89XgT zCS*D2{`NL51!<;JPPc4(HgYr9JKW^ua+>|4dX4uxcD}-`I+_j_ucd2@zi}kkC}o^} zygm0WL?d=mGKo@{mHRd_+h6PI89Nm?yTiomU5E#zDO&+Om`nYR$^gPW`$$fmYOACX zic!dfP3h&g>aE`49lVYtQpc=@5 z&dkgj>qnaxb&#G;jfSQXD-$M(tzG*ydiucc@=k^Htq?$Os%;el9KYt3av9VR%o`3)QGN*Ue-b!u= z-TC?Q$FsBD>O{9(^U(|x^5@SVq`9^EwK=->pqX>F4IqD8=YcS|;U{MX{JTkDVUQwS zi)b3Vd^rjAszBIx906iz<1zjgYbxZ9#>Xw{kutbtUq=J0S4;CReN&p{W|)?!%t5gf>O>SUY`f-y2z+(LPjl@8%%x`4JBKR z=J2AUZg{l5UJcQctQi;?7+?fNBhum&ZpQB4Jo?V9J=M3KVTMvekc4%K-bgy3v?1nP z$fAO-Z9kd(6^BeM-TDa2#*qqQfvVY_RWp&Qwt(xDhIAiNM{Ni}$cGod{y1jnCn3*% z%58z)IisVkW}SK`fcI;Q+DT}N*kS=;8$C*zIBOVX-Dk^s?-r3>YEK^3+u3rMS1`-y z8eBz$h@!i8!tZ_lrCX4COk30MWpJ(;7DoHawyI|$#Mh=(x>Y3{LM`x;p@OdPAe(OZqjnFG^8A|H7{yV{WNi$QG&HRkP;uD#9zz{lt#3rdk zJJ-gc73BnKx^I0E-0l)FORaVFtHJqt5p{WB7JH!7*rrq4eOhOaQD9s0Z)+YpnqK$; zL}hZiGUJb>#ij8Kq?|LJfqN%SO^&Om{eeaox-1gxLvM6=zyfeP9eiiTV{nJ~TUy?J z;^$+6zF*8R1RAO09bX+~lb?8=G~uKzj*8H{3%-in>si^Wg-~<0{7L%q#L2JyHmDi; z*&T>FWGq;^V)~$~AbfYyp^5t}ChG9ew#jk%wlK$B?6ps-udfeaWm&&y z&h>$1?m8ys?P;6Y%Ke3u&-#=`DWBt`c)5C=JBjMh(M=2SmW#>)EwfVr zb-C}#e8KZ<0A1+Tq$zRVe!7F$#=8!-)I@1K`C+YLUj9`c&o$w!u2r_X5)2Y7ashzH zN3X4@UMdJetLJH(!^0)T8I4}sj)Ih}f`L-(r~{G^1pA!~Y(<%DftpRN3;9%X1U?uT zq*@VmzxNx(KPfeJZgR~~OACMPI~)|CzB)4_{Cm3R>@HW-{|zEM za$C1CQQN4nD8tJms<4+VQjBct$N{OEPMLOPF#k{iz;O4JQ)}TmYXz&lQhii?n zurQ9XMCl+P9!_!+qT`!|k9-@RO0ls)EXFD37FLy7Oy$QBpH;$~gRB%4715RGIR^j= za5P$&*S=s6iE!}FPv4wJUt8!=G9w3L5Z9`;Oe*`U(JoO;uZ-ktG!=+bxBT{nj=Iy<+)~Lf0Zr4NN zLLW!5-s;OAzMaU#>2tkb{N9M{ulvznZmzI5*9!6q!kcta|Nh-JHXmznO;t^W-+?20 zBM<-EE@cU|uidacnOVD9|GV2k5XDs`dnXG!Gg_s-b*sbJinm%5x)bSgz3=#5ZUCbH zIS7Hksof9@Do;UyC2TsR^*)x3?ewN(Rol+Srd*NDD*oXt=l*5NV#M?>U-$W9)7`R2 zt0z2YA^@C*i)PH#tXMf2MVoY-m4(mzUSHIX;4L}I#OWyinrgXbVou@p^)*SrC2k)A z#LmI?@vg-^C>$F;Oyw(&&1O%S8hln<8tLm+{W!^+LskO=x4blkl!OFu>HIbb8()!@ zNInIaNX~(&IvD#PgmBlf%|Cj|XBBiZF`|?dAGfz+QHwN)!(B;76*gE@z+8VXzE^3P z))!X)5^biDLcCHwouRMud1zpTec$_NT3c+q!|XRvhK}UzN;fo=89$)g$m`E_y8F@J zle>qWW#c@0<6xC+Q4vN)#_OR|gI0cr1@Hl7DE{BxV_c0=mF-19L?3SI#Ii$2V`C#e z9kG4>gtMOc!@)G|z&lKGf%}!Kz&8t0Cr=*hsaRs@_8SqR7e7Vtt(Y#+E@NfP;_n=q zUN7?_fX6HZXx9=fsE_q(crBp%bQ)+B+v1(hGlF=(Yz&baT@P{GH;g9bF!*B{NM z8KJN)Z3gsYc7B{GW7H@O7X?~g53d_3#~}|CnFaGXECudE%%nhbF`dG%&$lPb-3!+~ z!ui0W5P}4S>Tsh%I3)j|CQLl{F1N@6Z35U$yuD-i)R>UHYp}AOR~R!Slq~0D^w<>j z3+#mD4oS{~FS9RqmQk~ed0u2_Pb5JGR3By&QLy4rTC!t$M3_cim77~y*fut7z6n18 zr#*@SuxBWLUJw`>1Cdv1Kgw{RZ9UU^6=|VYSTrqq!TD~+v8BNrpe`!XXWS5s%D$G} z!g%%Aex!iE8vybsbeS7zIH2Fe?5xGnX&ead=MB8w+1uJcv1b-E1)dIZBw5Fo=V-&= zE^{tiY)|xW4bvq`(cUrVO}=-cw-%tg_pKY!UuFK5b;eQrtj zAi9DB>e^umz)Kktz+B5q$aG_r}#|2~EGYItMM=m_5C@OVkz9g*+oXScnY6dwKTh338NdA_*%!K*; zgqIITLH^)lkq$tk@cYbPWYtc;k42=@bPuFeo1b#>Q$JzrEQjrEpc=Sdl)+;Sf$24zkIxxf<|PH)2QIPqrUa-koMY@zdPqB}It zH&G;F@>htJuM@geSFl3y&{ijB$N_1dqA1Xv`<*QggsiXGNrca>zCJPj>EvYEYPJk6z} z8Wsnv5e3Ms^^a8nRCJ{|)bn?PyhTx|3W&7fjKv8f)EI-5I1@GZ_xB%M|IpCVrNW++ z6rB6JI9hS4_+|g=LAAgx_Ov0hfV=E;Bo2o6V?0%s2ShmDviT$y^VwU_Jm@-G~cr$-c$a z)Qm9N*%}{~y;AKP!<5qKG*?O8@;)Q8E9#gNln{k?>M`;OS1YI15ovLzr>t$dJRoeV zJez7(R^x;2!%ZiFHNWlAC~kbzuq=u2h`rvLf$a|sUKf?$Uf}aRXAJJveJL2Mc5X?) zYPE;sUvhHkQ`7HGFYzYxl%%#)N)ktEl#&TryA13VQ%K{5-z9 zT3c9<=T84^bg7Im_ojX6#c1~9P4j4M@dTdA1qLm+d@bR?pKEV)7(W%sJFzG()~?i= zuMjLU%2zsc~Qk0+ewDZzmzgUi!2r(<%8} z&&LxI5Xcy&=j5z4ZIZ>wZN}Z+Dkpyqo(af3OVxA4@Ycgg#!368156lRz;ST+*i)-R z-WU{Cbh#oZ(*Re+Q@=MxEaogp^FxLo1FGbw6nP09uv3c}ZyR6_>`%;lTVO&tvlGj;XfETTG^iYIj8ywHT+!9?mle*rCF zRY=!hiuaNtMybKMQuGtg@;P0(e*3ZR>8h7+&~qN0t^inmO}lJQkna3L`>&n;^Z^iu zi$Y73Rr2@6uoDLlYO#GvTzS=>e~1S&^c?tz(0{GhnM-jkASA}sO#Ce@tr(Q^G-NUT&i0o1pfqbv)>B;{K5RB#O;zAXRfQK)=8)})%7@iHRa&9sBXvbjf ze=Yh83Hmt{`1;<`){EfQQBS1WL%WNL<@n;Vd?jP?pii`58}5TgFckFj*RLDVGH19* zj@&ewy`oBQ_4riX=hkIJ=VKQJ1T>KGL;KZft3&5i97PelLNO{h>X3L9bWBEY_;tOq zu8tCn+&h6nohs+%?2MO*h!o0V=1#cpYWI%Y%!oiH=EvFNMvn2B1Lo8e$f3u}2=FaP z!T%x!y^L8vkXlM4>MdLCh%D*BIRrgATT&DCLjCloxYwr_d&6N_j?Y38qeGgd41%po z$GyRu)G;f2pyK%C@h!IQ$mE$dZ7hQjwTsY2gnY1>m)_QfC>OE5%EtT7b*wM<9Z2Ll z-oKa@!9Vw(X85^ui_`NL2i+FP{1>s0Tf4Zvrl%0oV!=h}vJl+T?RHheN2OeaXm?WN zIhBkYRMk{z*7l?qkaxsjgN=8#(B0WQ4HH(Yb3!dCrr~APZJ35eR|b-^h#tndSJj?9 z1l-s6y~vv0UtY;rwPtE{kd>CuGJ7dA&kNRm%P{aSg`WZE0e;UvTE$J@f zDM)L3Xj^K}E!3Gs55x?OjdeiB`U|@`7IgR`3#P0Q(~$9DZtegWnXP~mItdOn6V4bo zIUTvWr9ehzqBImuyZfT{dGW{{0UY+}0;ivj(Q69GFHnbr%ghQV-)G}1Sl!k>xqG`M z0{%2s9jC~_z_YRpv}%1R?PlBOCYz9hCVdquIDl_fKS+7c+H7)TTMH+7LC=-Wf*0r^ zUp&A;gsz@S%q5qTm7OeAtM68ZTwPHW6|+IUW1wlvE1g96i>PNfH!*Q?Y6|DU`rK6u zUznZpbQC>NQAO?Av5tDshEXBsN^+OrHI9z(=$-?Io^V}6VO*&$K}lb^yRv=ZOLEaoQj{q% zI#9CsO)P0?J|ZTDyu3W6l@(1cIQAYBewZ1jsr@92NPe@3Jp zAvX4*Y^cM^qm!8o$dV`-iORd0))LP6;Bb-6SI>;CisCYv#4`5M36Qt1_#u0MB3=D^ z`jX{XkHp~S*v%4;mr9v$wyU{y@w8j);QibmV+|nwX*Z(3o|@N&ce!qLE3VP6N6f)?Jl$1&3S4D3fz1>90L^A8k z!>$Gfn(dRv=)i8H>u2%j3(`OYB1JPtAoh!CSkS~rw@`v0>QA2Ls-JKNq}8#7m?YNb z;d((SX^_s9LHU_gqUOrk(AqeX5Y+0TX&)!ig>FohN)oHL*ZsxivM#MJWWY<+%(M^L zpOt6X46}x;+DN&5voWhEkmc03aduG%CDL*E0>jhi?HdDHt=I{W1xPWY`%F&`-?TZl z<>v!+qj%EtN$-99g~tmJxv`L&*Vj8W`P0)s2xZ4fh;JB{DrYeX`tZ2fU4~rOjlyOj zz25S>GbqF7p*Rf|2(;nL%*I4|J~}*vv$w%O4fK)o5=#3;d~4V94S`ywRtcV{p{1n- zjVP8()Gd-s0y(#+q~x*|-nyuvs;aUc*9V#GABQ)P$W&in-`SayT9Krr`@(EMlK9%h z>20eJFu!06&3|9`J-x{Cz!wlmknsa2Z8|P8)5qXyCADGNp}vCLxSeRMz{a-B7)wHy zkvVf4fnZ~+=qDO~!$=cCh@kUS%F2)?tLRp2nW4NPNKrddnlN~gEl~j~LJVp9*t&|D z{IpP4cz#t^IPjZJGrb8WqL(zcO{%Y=r8CMJ`E?OEQ`v_O$A zx@f)YoP>mAef_7+W`Q*9lIlPHMwh2Wpi0;TQdUzQaJslxBkHYQJ*TfKCbT$)k47k| za!>LpXZ$PYRW<)>UoGaw-s$P7oI3WIxZAitC6;wibf%a6ZvpNnmq(K4 z1DHLbY$uELR!4Kyc`k*~(IyDtV>>%LZXZ|@D>xjQ}|t)!!-$QU?3DoRTupwq>ZNaHI~hTeeSymX>xAG_M^ z2-ysGZoGhh73q?xmLzOGYsSPWB{+U_za>`}SECB^s!5u>HT^!NM){BAe{W9su6)CHghW7`@$JnRIu_h zx&V*qH*2*viFYJ#QGnq$&!at~Paag}@WW9E1UKc$r4FqR1lA`r4cr|Ro3}W=-MDO`gy*xZJ z9Lx?|4_$J@)Gh^+4L!TMwO(rXq!9|$V`F1AH8qxP0(o-lelNW8Y7-DK)K5XMz?pNt zCIfntoQ=&vBDJh!x(l&{>ADdeZvJ*Mo}7_~sI#qNn;$m+E}!=WYhxS(stn~F-bFP! zN#zNf$Awmw?w5SFEoECB3oKY#u2!(kkh0_RQv;W>I>o$TVnA3nwq`&Qm6X34UK1mx zHikL3&QfS~2yt<^AVxI(Yf*l|3$ZSMN6_sYqC+hDEe*R7(+!~0|2^`PH6$EQDiRy| z1O0@CjvklBvu`+M;;Kvn zb>cf6L#<>_UhwzeB%Rjkd?(|s>Cee@KrvIW&3($8w-BKY%II63jK24VD1@>diK>cf zdV0&J*{#o_6x3r#gWi@hf3g-`dyd4eDLaj!MS7He>=x(QlJBKxm+#vV!>DDIgG5_J zK5~Ql!7=P!GPl4hv`YGrnDE9H_7Uo+7=8wCxSOSw`?OKEA4tk{67R46ysW71p4X2* zYRqi0xC=EslAT?rxIHWc15i+hcvpDS{6LNQ!)u)x$}|Xb8e4s8NcgD`NCQ=Bpn$Nu zu}pYv&_Gr=8XQnePejtXK)rGSh0i#{P}FWN*3Kd3ne;x;)D&k-NEla9izSXFZV8D{ z=<(h$0BxsWevoRvNOEyLBW+&algv06cAQ7M2A_IBQ4b0V3h<;9X3(({x5ZrNcE#FX zJT@d^^nz}=WVo(90B^_xt`oCXvd;H{9HI%>;Y53QRr8z5XAke+_3Z5Jt-D<*9%~z#O!MFZ2F1K-hJ&GBx+UWz9O%1M5~ z1X9@y96RVH{kP?VDlboKRNVmCf;L&pu~x0{NJ4FjLgJJ{<&pHdl-A(_ z2?5;zvP>wU8%a?qnBirAYs>NO80Gy2dNGX!ZjYhGjs>-m|4XA)uFvE7=C9iG@GrQj z7RKAtB`Qx3Qdq#z8lm?e^bB0@Tc!5A6XxxjmZ{o}?p6cERM**?&KLwe(5o!!pg0Qg z_zxQb!M`qfoiBH^#F|ZZhf^4_4Nq%VocFQ~*?dz zm~ycsia*MqP#S=d8+L#yN1~zMK`}D{+xGR5UaSuYE#h?J5Ava;w$&+;}H)D?WTxv2^9^ z^JY0cE-vnx{}b6rSPZ%o26#x%@#)}2EO5eF5dqov^0Sy*LsLY5aE&+0Pq3}ERPoE4 z{b&~NB%_!7AoVuRE8nlKcFMiXM+w$xd=<(Ig^R^mZ{b}PI>&OcwMpDihK!&#uKD|D z&Wo&8^wL&}#|i{s!Q_>Ilu1Ξ`%Qfw*PWx4QdB+_^Z74?c@X(bGsDLPWkwfBR)y z$5j%b>!bV4;-Nsn*Y{~LsvZ^r=T8?B*AstNs+`VOVXbMZZYt-ktEbz-UX|tKsq-@^ zj_XH8wlX~f$2HSejP9wK8N432ulVZeJa3t;E_a5?`)4!@6RpH>YOC@)Ixh8#7eCfi zR*sId%PA|5jgE|rjVQ!}>h9N{IP zGlKAdpi!bM&qvhrknwc!#<^|!BkZgy7xt_vDG}m1IXG59waDPCF7tA;v_w1!mXib5 z5$w3`F&!NpZs_HWAN2E>`S>_Eusw@`nPtY6=rj!M4cxV~EOwg{oeq5_)R3Q?c;M`o~A)|Hb7q7 zX`|1kQCH`ay1ItsH~2^uSJ(RSJ*;2Q@01V1;4dGZxkYWxL6Q4Ht~sU4jR{sApf zBj*A4c?YBF%&wpNhX&~AU!85a2;aZ9EV>_!@zs}F8A@fbC}=zAeC9c(ZYMZ^(&O|;Uwq>t&tb(=6CG?n{X8{hz?${ z`rh6#lLO}ntbwVmUqR>Lm`2h<%VtwtZd^%WH!qx0%{ZG|hEHAlf+sWkDoUlQhW!(# z{l@A<(wGL{2y+LL6k~UmNJo+I`Je+3>_eoT0gMvdip{@?F6<Xj+=na_90G?8 zO?b6^2*}UOsUWr2oZM_-`J4;{TQ{#p&XlTQog42QJ`3as!(|?kmM+fD$vuAU!nZm6 zDt359&!R0hIsO7?{#`zC{{4I8(@&Fj`1QptlQet@ zysbqW1@el3+SMZyFQkx&jFLBB4l;=53myt48=I$;Z!5Wg^5vRI2*gY#smLxUKp-;M z#USeowkm+&Z@aXggMIClzd*Dss4NrBU3gL!c0)y}k0*08R+Tr?GoARHc&52@wzewc zNN{b31y;<&>x)Hj?qR{+=`{3VX>*;@WOivh`=XneqpV3N<6!TkK56gnrqRe+VJAV4 zPjPT=YL0i7rA)V^eNj4Euf3gifJb>}mzjvTF!pmAd7|S@#x# zaGKxhj>}`D$vzA;`nc^D${`49E(!*K2!17^xO^g-yFPyNdEeudMF!8{W+gZ>V$D5* z>fQIod2_o@LzIKXn4mD9#X?-11_e$@3_^mu!ITe&0;ki%RxE$1Eyp?Bddrg9_~@m- z62nk-;v=htTZ9%B5!Dnp*ez7??A}BDZ+wk)h=#AG(S;`yLXcf-fWSdL*Vwe+U=Eg> z%&FO=-Dba!etLy=gJ|;Lnd-0>UTU|V1~3fic19~f8Q||xL7qC09}b7pOldUYYPx@g z_`aQtWJz>m2NZf4XjwAcCkaY~q3Eu^zyh_MnP273;MAp-qhat*@9VQ^#E48tV*a#h zjN-`2jD7=LjttxNOm;1Ndc}Bjf)eit`mUfDfs;Fj*IQE3NKQEY8}Z!ecBr8F zAeb)aQQAz8^ZMHrY8P%vTSU`9P)c-~rHz*<;j6};+aroOLj^qU7o8YU5fNerEG92R z7}_xJy}WAV`qxaKU)CZx%L}3*ZFG8-do@ai_E?%aeuJvVgkZdcZ2A&9F-}f-Iviv1 zgm=1+btXA-X2IB+m)rGxG}@)ka62c|?mooyW_R_bLZ`9)#9O1G5#c_tF{fV&e!c zgCHT-B*+QI8PhjtQ?LmmRQa&~h!;Aknx&sqF29Sp{Zz7OFnJQsNd`%zNx6RgLt>VR zBoCHgL!kgl8Gd!}vg_c~V$R>jMEdbf_B|MuZIo7 z@QFs2meDXb`EjWVuNX?x<_=rARq;WT`A&r}BVteefYs0wI}EI%|mRgT!< z1ANjIcJKudD`J>#FbbqsB^xAH`a3ANWatWLFu-uc@F2<$U(0h+N_0JcJdL70*(4)r z8$4VI5*(6@gyYPSzUKNpJ?M>KdKj8o3KIlD<$N@D z?rB_H!m=eLu_LvxCtp%G)?~)iJ7y3PGsx~GlXa__){}WJawZ1uQiA`Y&!nqL)Kntg z#G;4GP6NS*CYZp7B$(S2$E(Oa%Lkt(3k{TF)KJH=31uR)94F@cf>P2M_5?S#smrK? z-~<-qg^Te{+{B#}D!@i!LZ^?UCn4xs-xG|WV2MY%-z|uoUp%k^+NPTtyC0mhD@O?2 zp5#ML;n|&Cou9+|4)l$LKVGse&waev@V~@^n*h5J&u-!EUh1#*pldmY2&+-n0wh2W zn?w>tL|RF(#INhLCRVJHz|7|A+RoDK@sDMASctBO&mVH+rIn#I IlExwb2c@Ohp8x;= literal 0 HcmV?d00001 diff --git a/docs/compute/drivers/hpcloud.rst b/docs/compute/drivers/hpcloud.rst new file mode 100644 index 0000000000..26450f3ad3 --- /dev/null +++ b/docs/compute/drivers/hpcloud.rst @@ -0,0 +1,44 @@ +HP Cloud Computer Driver Documentation +====================================== + +`HP Cloud`_ is a public cloud computing service offered by HP. + +.. figure:: /_static/images/provider_logos/hpcloud.png + :align: center + :width: 300 + :target: https://www.hpcloud.com + +HP Cloud driver is based on the OpenStack one. For more information +information and OpenStack specific documentation, please refer to +:doc:`OpenStack Compute Driver Documentation ` page. + +Examples +-------- + +1. Instantiating the driver +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unlike other OpenStack based providers, HP cloud also requires you to specify +tenant name when connecting to their cloud. You can do that by passing +``tenant_name`` argument to the driver constructor as shown in the code example +bellow. + +This attribute represents a project name and can be obtained in the `HP Cloud +console`_ as shown in the picture bellow. + +.. figure:: /_static/images/misc/hp_cloud_console_projects.jpg + :align: center + :width: 800 + +.. literalinclude:: /examples/compute/openstack/hpcloud_native.py + :language: python + +API Docs +-------- + +.. autoclass:: libcloud.compute.drivers.hpcloud.HPCloudNodeDriver + :members: + :inherited-members: + +.. _`HP Cloud`: https://www.hpcloud.com +.. _`HP Cloud console`: https://horizon.hpcloud.com diff --git a/docs/examples/compute/openstack/hpcloud_native.py b/docs/examples/compute/openstack/hpcloud_native.py new file mode 100644 index 0000000000..592d18914a --- /dev/null +++ b/docs/examples/compute/openstack/hpcloud_native.py @@ -0,0 +1,13 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + + +USERNAME = 'your account username' +PASSWORD = 'your account password' +TENANT_NAME = 'project name' +REGION = 'region-b.geo-1' + +cls = get_driver(Provider.HPCLOUD) +driver = cls(USERNAME, PASSWORD, tenant_name=TENANT_NAME, + region=REGION) +print(driver.list_nodes()) diff --git a/libcloud/compute/drivers/hpcloud.py b/libcloud/compute/drivers/hpcloud.py new file mode 100644 index 0000000000..97de03e925 --- /dev/null +++ b/libcloud/compute/drivers/hpcloud.py @@ -0,0 +1,99 @@ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +HP Public cloud driver which is esentially just a small wrapper around +OpenStack driver. +""" + +from libcloud.compute.types import Provider, LibcloudError +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection +from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver + + +__all__ = [ + 'HPCloudNodeDriver' +] + +ENDPOINT_ARGS_MAP = { + 'region-a.geo-1': { + 'service_type': 'compute', + 'name': 'Compute', + 'region': 'region-a.geo-1' + }, + 'region-b.geo-1': { + 'service_type': 'compute', + 'name': 'Compute', + 'region': 'region-b.geo-1' + }, +} + +AUTH_URL_TEMPLATE = 'https://%s.identity.hpcloudsvc.com:35357/v2.0/tokens' + + +class HPCloudConnection(OpenStack_1_1_Connection): + _auth_version = '2.0_password' + + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) + super(HPCloudConnection, self).__init__(*args, **kwargs) + + def get_endpoint(self): + if not self.get_endpoint_args: + raise LibcloudError( + 'HPCloudConnection must have get_endpoint_args set') + + if '2.0_password' in self._auth_version: + ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) + + public_url = ep.get('publicURL', None) + + if not public_url: + raise LibcloudError('Could not find specified endpoint') + + return public_url + + +class HPCloudNodeDriver(OpenStack_1_1_NodeDriver): + name = 'HP Public Cloud (Helion)' + website = 'http://www.hpcloud.com/' + connectionCls = HPCloudConnection + type = Provider.HPCLOUD + + def __init__(self, key, secret, tenant_name, secure=True, + host=None, port=None, region='region-b.geo-1', **kwargs): + """ + Note: tenant_name argument is required for HP cloud. + """ + self.tenant_name = tenant_name + super(HPCloudNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, + region=region, + **kwargs) + + def _ex_connection_class_kwargs(self): + endpoint_args = ENDPOINT_ARGS_MAP[self.region] + + kwargs = self.openstack_connection_kwargs() + kwargs['region'] = self.region + kwargs['get_endpoint_args'] = endpoint_args + kwargs['ex_force_auth_url'] = AUTH_URL_TEMPLATE % (self.region) + kwargs['ex_tenant_name'] = self.tenant_name + + return kwargs diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 87a46b7214..2fc280bd55 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -79,6 +79,8 @@ ('libcloud.compute.drivers.rackspace', 'RackspaceNodeDriver'), Provider.RACKSPACE_FIRST_GEN: ('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'), + Provider.HPCLOUD: + ('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'), Provider.VPSNET: ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), Provider.LINODE: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 561474e0bc..290f38e82f 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -120,6 +120,9 @@ class Provider(object): IKOULA = 'ikoula' OUTSCALE_SAS = 'outscale_sas' + # OpenStack based providers + HPCLOUD = 'hpcloud' + # Deprecated constants which are still supported EC2_US_EAST = 'ec2_us_east' EC2_EU = 'ec2_eu_west' # deprecated name From c12b808bca6e9bcf12ba8087920f2040511448c8 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 14 May 2014 14:02:03 +0200 Subject: [PATCH 049/315] Also test it on OS X. --- .travis.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.travis.yml b/.travis.yml index d302529826..302c91d0f9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,5 +1,9 @@ + language: python python: 2.7 +os: + - linux + - osx env: - TOX_ENV=lint - TOX_ENV=py26 From 2018fa0a4cfac5fcbf538d68662db4a5c398118e Mon Sep 17 00:00:00 2001 From: Roel Van Nyen Date: Wed, 14 May 2014 17:32:44 +0200 Subject: [PATCH 050/315] add m3 instances for ec2 Oregon Closes #292 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 494336936d..f39deea2f0 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -425,6 +425,10 @@ 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', 'c1.medium', 'c1.xlarge', 'g2.2xlarge', From f3792b2dca835c548bdbce0da2eb71bfc9463b72 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Mon, 28 Apr 2014 18:18:27 +0400 Subject: [PATCH 051/315] GCE: fix adding node metadata at node creation Closes #283 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 0b340b318e..fe4dee4bcf 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1196,6 +1196,10 @@ def create_node(self, name, size, image, location=None, image=image, use_existing=use_existing_disk) + if ex_metadata is not None: + ex_metadata = {"items": [{"key": k, "value": v} + for k, v in ex_metadata.items()]} + request, node_data = self._create_node_req(name, size, image, location, ex_network, ex_tags, ex_metadata, From 87878a9225ea5b94dba1de6512d21c2b16808bdb Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 16 May 2014 21:25:31 +0200 Subject: [PATCH 052/315] Fix container name encoding in iterate_container_objects and get_container_cdn_url method in the CloudFiles driver. Reported by Brian Metzler part of LIBCLOUD-552. --- CHANGES.rst | 12 ++++++++++++ libcloud/storage/drivers/cloudfiles.py | 11 +++++++---- libcloud/test/storage/test_cloudfiles.py | 14 ++++++++++++++ 3 files changed, 33 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index ca519adbb8..d5c4120bff 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -138,6 +138,18 @@ Compute constant. [Tomaz Muraus] +Storage +~~~~~~~ + +- Fix container name encoding in the iterate_container_objects and + get_container_cdn_url method in the CloudFiles driver. Previously, those + methods would throw an exception if user passed in a container name which + contained a whitespace. + + Reported by Brian Metzler. + (LIBCLOUD-552) + [Tomaz MUraus] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index a828490934..52b999fc7f 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -280,8 +280,8 @@ def get_object(self, container_name, object_name): raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_container_cdn_url(self, container): - container_name = container.name - response = self.connection.request('/%s' % (container_name), + container_name_encoded = self._encode_container_name(container.name) + response = self.connection.request('/%s' % (container_name_encoded), method='HEAD', cdn_request=True) @@ -290,7 +290,7 @@ def get_container_cdn_url(self, container): return cdn_url elif response.status == httplib.NOT_FOUND: raise ContainerDoesNotExistError(value='', - container_name=container_name, + container_name=container.name, driver=self) raise LibcloudError('Unexpected status code: %s' % (response.status)) @@ -691,7 +691,10 @@ def iterate_container_objects(self, container, ex_prefix=None): params['prefix'] = ex_prefix while True: - response = self.connection.request('/%s' % (container.name), + container_name_encoded = \ + self._encode_container_name(container.name) + response = self.connection.request('/%s' % + (container_name_encoded), params=params) if response.status == httplib.NO_CONTENT: diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index 2878c1ee1c..84ec615931 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -177,6 +177,13 @@ def test_list_container_objects(self): self.assertEqual(obj.size, 1160520) self.assertEqual(obj.container.name, 'test_container') + def test_list_container_object_name_encoding(self): + CloudFilesMockHttp.type = 'EMPTY' + container = Container(name='test container 1', extra={}, + driver=self.driver) + objects = self.driver.list_container_objects(container=container) + self.assertEqual(len(objects), 0) + def test_list_container_objects_with_prefix(self): CloudFilesMockHttp.type = 'EMPTY' container = Container( @@ -841,6 +848,13 @@ def _v1_MossoCloudFS_test_container_EMPTY(self, method, url, body, headers): self.base_headers, httplib.responses[httplib.OK]) + def _v1_MossoCloudFS_test_20container_201_EMPTY(self, method, url, body, headers): + body = self.fixtures.load('list_container_objects_empty.json') + return (httplib.OK, + body, + self.base_headers, + httplib.responses[httplib.OK]) + def _v1_MossoCloudFS_test_container(self, method, url, body, headers): headers = copy.deepcopy(self.base_headers) if method == 'GET': From 68c12def610e97c35154118ba8f177bbac4baf13 Mon Sep 17 00:00:00 2001 From: Rashit Azizbaev Date: Fri, 16 May 2014 00:34:27 +0400 Subject: [PATCH 053/315] Added support for openstack compute availability zone. Closes #295 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 7 +++++++ libcloud/compute/drivers/openstack.py | 8 ++++++++ ...ervers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json | 3 ++- libcloud/test/compute/test_openstack.py | 13 +++++++++++++ 4 files changed, 30 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d5c4120bff..3132f36bec 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -138,6 +138,13 @@ Compute constant. [Tomaz Muraus] +- Allow user to specify availability zone when creating an OpenStack node by + passing "ex_availability_zone" argument to the create_node method. + Note: This will only work if the OpenStack installation is running + availability zones extension. + (GITHUB-295) + [syndicut] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 2aca45c024..27f1acec5d 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1247,6 +1247,9 @@ def create_node(self, **kwargs): :keyword ex_disk_config: Name of the disk configuration. Can be either ``AUTO`` or ``MANUAL``. :type ex_disk_config: ``str`` + + :keyword ex_availability_zone: Nova availability zone for the node + :type ex_availability_zone: ``str`` """ server_params = self._create_args_to_params(None, **kwargs) @@ -1318,6 +1321,9 @@ def _create_args_to_params(self, node, **kwargs): {})) } + if 'ex_availability_zone' in kwargs: + server_params['availability_zone'] = kwargs['ex_availability_zone'] + if 'ex_keyname' in kwargs: server_params['key_name'] = kwargs['ex_keyname'] @@ -2048,6 +2054,8 @@ def _to_node(self, api_node): updated=api_node['updated'], key_name=api_node.get('key_name', None), disk_config=api_node.get('OS-DCF:diskConfig', None), + availability_zone=api_node.get('OS-EXT-AZ:availability_zone', + None), ), ) diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json index f857f5214c..d972cfd66c 100644 --- a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json @@ -45,7 +45,8 @@ "metadata": { "My Server Name" : "Apache1" }, - "OS-DCF:diskConfig": "AUTO" + "OS-DCF:diskConfig": "AUTO", + "OS-EXT-AZ:availability_zone": "testaz" } } diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index ef5dfc7824..6f5af3c3cd 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -1076,6 +1076,19 @@ def test_create_node_with_ex_keyname_and_ex_userdata(self): self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') self.assertEqual(node.extra['key_name'], 'devstack') + def test_create_node_with_availability_zone(self): + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + availability_zone='testaz') + self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra['password'], 'racktestvJq7d3') + self.assertEqual(node.extra['metadata']['My Server Name'], 'Apache1') + self.assertEqual(node.extra['availability_zone'], 'testaz') + def test_create_node_with_ex_disk_config(self): OpenStack_1_1_MockHttp.type = 'EX_DISK_CONFIG' image = NodeImage( From 771c8fb0d7d03b61461deacecabfa3efec9934a4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 16 May 2014 23:32:02 +0200 Subject: [PATCH 054/315] Update changelog. --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 3132f36bec..602781518f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -142,7 +142,7 @@ Compute passing "ex_availability_zone" argument to the create_node method. Note: This will only work if the OpenStack installation is running availability zones extension. - (GITHUB-295) + (GITHUB-295, LIBCLOUD-555) [syndicut] Storage From ca6ad2ad08c1db37acc98e4a62a401336771c3b5 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 16 May 2014 23:35:24 +0200 Subject: [PATCH 055/315] LIBCLOUD-553: Add support for the Outscale INC cloud. This commits add the support for Outscale INC a parent company of Outscale SAS working under the American laws. Closes #293 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +- .../_supported_methods_block_storage.rst | 2 + .../_supported_methods_image_management.rst | 2 + ..._supported_methods_key_pair_management.rst | 2 + docs/compute/_supported_methods_main.rst | 2 + docs/compute/_supported_providers.rst | 2 + docs/compute/drivers/outscale_inc.rst | 31 +++ docs/compute/drivers/outscale_sas.rst | 6 +- libcloud/compute/drivers/ec2.py | 177 ++++++++++++++---- libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 2 + libcloud/data/pricing.json | 69 +++++++ 12 files changed, 265 insertions(+), 37 deletions(-) create mode 100644 docs/compute/drivers/outscale_inc.rst diff --git a/CHANGES.rst b/CHANGES.rst index 602781518f..73bf6b6790 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -130,8 +130,9 @@ Compute (GITHUB-287) [earthgecko] -- Add a new driver for Outscale cloud (http://www.outscale.com). - (GITHUB-285) +- Add a new driver for Outscale SAS and Outscale INC cloud + (http://www.outscale.com). + (GITHUB-285, GITHUB-293, LIBCLOUD-536, LIBCLOUD-553) [Benoit Canet] - Add new driver for HP Public Cloud (Helion) available via Provider.HPCLOUD diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index ffbd0bc48f..b371fdeacb 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -47,6 +47,7 @@ Provider list volumes create volume destroy volume `OpenNebula (v3.8)`_ yes yes yes yes yes no no `OpenStack`_ yes yes yes yes yes no no `Opsource`_ no no no no no no no +`Outscale INC`_ yes yes yes yes yes yes yes `Outscale SAS`_ yes yes yes yes yes yes yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes yes no no `Rackspace Cloud (First Gen)`_ yes yes yes yes yes no no @@ -107,6 +108,7 @@ Provider list volumes create volume destroy volume .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 9dcb206834..b1ec17b965 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -47,6 +47,7 @@ Provider list images get image create image delete `OpenNebula (v3.8)`_ yes no no no no `OpenStack`_ yes yes no no no `Opsource`_ yes no no no no +`Outscale INC`_ yes yes yes yes yes `Outscale SAS`_ yes yes yes yes yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes no `Rackspace Cloud (First Gen)`_ yes yes yes yes no @@ -107,6 +108,7 @@ Provider list images get image create image delete .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index d37d9875d8..7467c500c3 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -47,6 +47,7 @@ Provider list key pairs get key pair create key pai `OpenNebula (v3.8)`_ no no no no no no `OpenStack`_ no no no no no no `Opsource`_ no no no no no no +`Outscale INC`_ yes yes yes yes no yes `Outscale SAS`_ yes yes yes yes no yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes no yes `Rackspace Cloud (First Gen)`_ no no no no no no @@ -107,6 +108,7 @@ Provider list key pairs get key pair create key pai .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index c2f728eec1..5c3bd08348 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -47,6 +47,7 @@ Provider list nodes create node reboot node destroy `OpenNebula (v3.8)`_ yes yes yes yes yes yes no `OpenStack`_ yes no yes yes yes yes no `Opsource`_ yes yes yes yes yes yes yes +`Outscale INC`_ yes yes yes yes yes yes yes `Outscale SAS`_ yes yes yes yes yes yes yes `Rackspace Cloud (Next Gen)`_ yes yes yes yes yes yes yes `Rackspace Cloud (First Gen)`_ yes yes yes yes yes yes yes @@ -107,6 +108,7 @@ Provider list nodes create node reboot node destroy .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index 5960b8cccf..5ca15348b7 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -47,6 +47,7 @@ Provider Documentation `OpenNebula (v3.8)`_ OPENNEBULA :mod:`libcloud.compute.drivers.opennebula` :class:`OpenNebulaNodeDriver` `OpenStack`_ :doc:`Click ` OPENSTACK :mod:`libcloud.compute.drivers.openstack` :class:`OpenStackNodeDriver` `Opsource`_ OPSOURCE :mod:`libcloud.compute.drivers.opsource` :class:`OpsourceNodeDriver` +`Outscale INC`_ :doc:`Click ` OUTSCALE_INC :mod:`libcloud.compute.drivers.ec2` :class:`OutscaleINCNodeDriver` `Outscale SAS`_ :doc:`Click ` OUTSCALE_SAS :mod:`libcloud.compute.drivers.ec2` :class:`OutscaleSASNodeDriver` `Rackspace Cloud (Next Gen)`_ :doc:`Click ` RACKSPACE :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceNodeDriver` `Rackspace Cloud (First Gen)`_ RACKSPACE_FIRST_GEN :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceFirstGenNodeDriver` @@ -107,6 +108,7 @@ Provider Documentation .. _`OpenNebula (v3.8)`: http://opennebula.org/ .. _`OpenStack`: http://openstack.org/ .. _`Opsource`: http://www.opsource.net/ +.. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com diff --git a/docs/compute/drivers/outscale_inc.rst b/docs/compute/drivers/outscale_inc.rst new file mode 100644 index 0000000000..b765ad264e --- /dev/null +++ b/docs/compute/drivers/outscale_inc.rst @@ -0,0 +1,31 @@ +Outscale INC Driver Documentation +================================= + +`Outscale INC`_ provides an IaaS platform allowing +developers to benefit from all the flexibility of the Cloud. +This IaaS platform relies on TINA OS, its Cloud manager which purpose is to +provide great performances on the Cloud. +TINA OS is an own developed software with APIs compatible with AWS EC2 (TM). + +.. figure:: /_static/images/provider_logos/outscale.png + :align: center + :width: 300 + :target: https://www.outscale.com/ + +Outscale users can start virtual machines in the following regions: + +* EU West (Paris France) Region +* US East (Boston US) Region +* (Soon) US East (New Jersey) Region +* (Soon) Asia (Hong Kong) Region + +Outscale INC is an american company: prices are in $. + +API Docs +-------- + +.. autoclass:: libcloud.compute.drivers.ec2.OutscaleINCNodeDriver + :members: + :inherited-members: + +.. _`Outscale INC`: https://www.outscale.com/ diff --git a/docs/compute/drivers/outscale_sas.rst b/docs/compute/drivers/outscale_sas.rst index 03e7d4e3c2..80a1fcd9e0 100644 --- a/docs/compute/drivers/outscale_sas.rst +++ b/docs/compute/drivers/outscale_sas.rst @@ -1,5 +1,5 @@ -Outscale Driver Documentation -============================= +Outscale SAS Driver Documentation +================================= `Outscale SAS`_ provides an IaaS platform allowing developers to benefit from all the flexibility of the Cloud. @@ -24,7 +24,7 @@ Outscale SAS is an european company: prices are in €. API Docs -------- -.. autoclass:: libcloud.compute.drivers.ec2.OutscaleNodeDriver +.. autoclass:: libcloud.compute.drivers.ec2.OutscaleSASNodeDriver :members: :inherited-members: diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index f39deea2f0..9962b2d73f 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -49,6 +49,7 @@ 'INSTANCE_TYPES', 'OUTSCALE_INSTANCE_TYPES', 'OUTSCALE_SAS_REGION_DETAILS', + 'OUTSCALE_INC_REGION_DETAILS', 'DEFAULT_EUCA_API_VERSION', 'EUCA_NAMESPACE', @@ -59,6 +60,7 @@ 'EucNodeDriver', 'OutscaleSASNodeDriver', + 'OutscaleINCNodeDriver', 'EC2NodeLocation', 'EC2ReservedNode', @@ -877,7 +879,7 @@ """ The function manipulating Outscale cloud regions will be overriden because Outscale instances types are in a separate dict so also declare Outscale cloud -regions in another constant. +regions in some other constants. """ OUTSCALE_SAS_REGION_DETAILS = { 'eu-west-3': { @@ -964,6 +966,91 @@ } +OUTSCALE_INC_REGION_DETAILS = { + 'eu-west-1': { + 'endpoint': 'api.eu-west-1.outscale.com', + 'api_name': 'osc_inc_eu_west_1', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'eu-west-3': { + 'endpoint': 'api-ppd.outscale.com', + 'api_name': 'osc_inc_eu_west_3', + 'country': 'FRANCE', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + }, + 'us-east-1': { + 'endpoint': 'api.us-east-1.outscale.com', + 'api_name': 'osc_inc_us_east_1', + 'country': 'USA', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'c1.medium', + 'c1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'nv1.small', + 'nv1.medium', + 'nv1.large', + 'nv1.xlarge', + 'cc1.4xlarge', + 'cc2.8xlarge', + 'm3.xlarge', + 'm3.2xlarge', + 'cr1.8xlarge', + 'os1.8xlarge' + ] + } +} + + """ Define the extra dictionary for specific resources """ @@ -4633,6 +4720,32 @@ class OutscaleNodeDriver(BaseEC2NodeDriver): 'stopped': NodeState.STOPPED } + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', region_details=None, **kwargs): + if hasattr(self, '_region'): + region = self._region + + if region_details is None: + raise ValueError('Invalid region_details argument') + + if region not in region_details.keys(): + raise ValueError('Invalid region: %s' % (region)) + + self.region_name = region + self.region_details = region_details + details = self.region_details[region] + self.api_name = details['api_name'] + self.country = details['country'] + + self.connectionCls.host = details['endpoint'] + + self._not_implemented_msg =\ + 'This method is not supported in the Outscale driver' + + super(BaseEC2NodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) + def create_node(self, **kwargs): """ Create a new Outscale node. The ex_iamprofile keyword is not supported. @@ -4810,36 +4923,6 @@ def ex_detach_network_interface(self, attachment_id, force=False): """ raise NotImplementedError(self._not_implemented_msg) - -class OutscaleSASNodeDriver(OutscaleNodeDriver): - """ - Outscale SAS node driver - """ - name = 'Outscale SAS' - type = Provider.OUTSCALE_SAS - - def __init__(self, key, secret=None, secure=True, host=None, port=None, - region='us-east-1', **kwargs): - if hasattr(self, '_region'): - region = self._region - - if region not in OUTSCALE_SAS_REGION_DETAILS.keys(): - raise ValueError('Invalid region: %s' % (region)) - - details = OUTSCALE_SAS_REGION_DETAILS[region] - self.region_name = region - self.api_name = details['api_name'] - self.country = details['country'] - - self.connectionCls.host = details['endpoint'] - - self._not_implemented_msg =\ - 'This method is not supported in the Outscale driver' - - super(OutscaleNodeDriver, self).__init__(key=key, secret=secret, - secure=secure, host=host, - port=port, **kwargs) - def list_sizes(self, location=None): """ List available instance flavors/sizes @@ -4849,7 +4932,7 @@ def list_sizes(self, location=None): :rtype: ``list`` of :class:`NodeSize` """ available_types =\ - OUTSCALE_SAS_REGION_DETAILS[self.region_name]['instance_types'] + self.region_details[self.region_name]['instance_types'] sizes = [] for instance_type in available_types: @@ -4859,3 +4942,33 @@ def list_sizes(self, location=None): attributes.update({'price': price}) sizes.append(NodeSize(driver=self, **attributes)) return sizes + + +class OutscaleSASNodeDriver(OutscaleNodeDriver): + """ + Outscale SAS node driver + """ + name = 'Outscale SAS' + type = Provider.OUTSCALE_SAS + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', region_details=None, **kwargs): + super(OutscaleSASNodeDriver, self).__init__( + key=key, secret=secret, secure=secure, host=host, port=port, + region=region, region_details=OUTSCALE_SAS_REGION_DETAILS, + **kwargs) + + +class OutscaleINCNodeDriver(OutscaleNodeDriver): + """ + Outscale INC node driver + """ + name = 'Outscale INC' + type = Provider.OUTSCALE_INC + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region='us-east-1', region_details=None, **kwargs): + super(OutscaleINCNodeDriver, self).__init__( + key=key, secret=secret, secure=secure, host=host, port=port, + region=region, region_details=OUTSCALE_INC_REGION_DETAILS, + **kwargs) diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 2fc280bd55..3de579c0b9 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -143,6 +143,8 @@ ('libcloud.compute.drivers.ikoula', 'IkoulaNodeDriver'), Provider.OUTSCALE_SAS: ('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'), + Provider.OUTSCALE_INC: + ('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 290f38e82f..b2edf2d460 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -75,6 +75,7 @@ class Provider(object): :cvar EXOSCALE: Exoscale driver. :cvar IKOULA: Ikoula driver. :cvar OUTSCALE_SAS: Outscale SAS driver. + :cvar OUTSCALE_INC: Outscale INC driver. """ DUMMY = 'dummy' EC2 = 'ec2_us_east' @@ -119,6 +120,7 @@ class Provider(object): EXOSCALE = 'exoscale' IKOULA = 'ikoula' OUTSCALE_SAS = 'outscale_sas' + OUTSCALE_INC = 'outscale_inc' # OpenStack based providers HPCLOUD = 'hpcloud' diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 6034ec49e4..e8aa7bf3c2 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -490,6 +490,75 @@ "cr1.8xlarge": "3.750", "os1.8xlarge": "6.400", "os1.8xlarge": "6.400" + }, + "osc_inc_eu_west_1": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.120", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.410", + "m2.2xlarge": "0.820", + "m2.4xlarge": "1.640", + "nv1.small": "5.220", + "nv1.medium": "5.250", + "nv1.large": "5.490", + "nv1.xlarge": "5.610", + "cc1.4xlarge": "1.300", + "cc2.8xlarge": "2.400", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.500", + "os1.8xlarge": "4.310", + "os1.8xlarge": "4.310" + }, + "osc_inc_eu_west_3": { + "t1.micro": "0.040", + "m1.small": "0.090", + "m1.medium": "0.120", + "m1.large": "0.360", + "m1.xlarge": "0.730", + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "m2.xlarge": "0.410", + "m2.2xlarge": "0.820", + "m2.4xlarge": "1.640", + "nv1.small": "5.220", + "nv1.medium": "5.250", + "nv1.large": "5.490", + "nv1.xlarge": "5.610", + "cc1.4xlarge": "1.300", + "cc2.8xlarge": "2.400", + "m3.xlarge": "0.780", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.500", + "os1.8xlarge": "4.310", + "os1.8xlarge": "4.310" + }, + "osc_inc_us_east_1": { + "t1.micro": "0.020", + "m1.small": "0.060", + "m1.medium": "0.180", + "m1.large": "0.240", + "m1.xlarge": "0.730", + "c1.medium": "0.150", + "c1.xlarge": "0.580", + "m2.xlarge": "0.410", + "m2.2xlarge": "1.020", + "m2.4xlarge": "2.040", + "nv1.small": "5.190", + "nv1.medium": "5.250", + "nv1.large": "5.490", + "nv1.xlarge": "5.610", + "cc1.4xlarge": "1.610", + "cc2.8xlarge": "2.400", + "m3.xlarge": "0.500", + "m3.2xlarge": "1.560", + "cr1.8xlarge": "3.500", + "os1.8xlarge": "6.400", + "os1.8xlarge": "6.400" } }, "storage": {}, From 02854546c46488d6b5f8e05b20e64314368752af Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 16 May 2014 23:58:39 +0200 Subject: [PATCH 056/315] Re-generate supported providers list. --- docs/compute/_supported_methods_block_storage.rst | 2 ++ docs/compute/_supported_methods_image_management.rst | 2 ++ docs/compute/_supported_methods_key_pair_management.rst | 2 ++ docs/compute/_supported_methods_main.rst | 2 ++ docs/compute/_supported_providers.rst | 2 ++ 5 files changed, 10 insertions(+) diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index b371fdeacb..36aa971aa8 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -35,6 +35,7 @@ Provider list volumes create volume destroy volume `Google Compute Engine`_ yes yes yes yes yes yes yes `GoGrid`_ no no no no no no no `HostVirtual`_ no no no no no no no +`HP Public Cloud (Helion)`_ yes yes yes yes yes no no `IBM SmartCloud Enterprise`_ yes yes yes yes yes no no `Ikoula`_ yes yes yes yes yes no no `Joyent`_ no no no no no no no @@ -96,6 +97,7 @@ Provider list volumes create volume destroy volume .. _`Google Compute Engine`: https://cloud.google.com/ .. _`GoGrid`: http://www.gogrid.com/ .. _`HostVirtual`: http://www.vr.org +.. _`HP Public Cloud (Helion)`: http://www.hpcloud.com/ .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index b1ec17b965..5516774240 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -35,6 +35,7 @@ Provider list images get image create image delete `Google Compute Engine`_ yes no no no no `GoGrid`_ yes no no no no `HostVirtual`_ yes no no no no +`HP Public Cloud (Helion)`_ yes yes yes yes no `IBM SmartCloud Enterprise`_ yes no yes no no `Ikoula`_ yes no no no no `Joyent`_ yes no no no no @@ -96,6 +97,7 @@ Provider list images get image create image delete .. _`Google Compute Engine`: https://cloud.google.com/ .. _`GoGrid`: http://www.gogrid.com/ .. _`HostVirtual`: http://www.vr.org +.. _`HP Public Cloud (Helion)`: http://www.hpcloud.com/ .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index 7467c500c3..938ceeb8c6 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -35,6 +35,7 @@ Provider list key pairs get key pair create key pai `Google Compute Engine`_ no no no no no no `GoGrid`_ no no no no no no `HostVirtual`_ no no no no no no +`HP Public Cloud (Helion)`_ yes yes yes yes no yes `IBM SmartCloud Enterprise`_ no no no no no no `Ikoula`_ yes yes yes yes no yes `Joyent`_ no no no no no no @@ -96,6 +97,7 @@ Provider list key pairs get key pair create key pai .. _`Google Compute Engine`: https://cloud.google.com/ .. _`GoGrid`: http://www.gogrid.com/ .. _`HostVirtual`: http://www.vr.org +.. _`HP Public Cloud (Helion)`: http://www.hpcloud.com/ .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index 5c3bd08348..3510369672 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -35,6 +35,7 @@ Provider list nodes create node reboot node destroy `Google Compute Engine`_ yes yes yes yes yes yes no `GoGrid`_ yes yes yes yes yes yes yes `HostVirtual`_ yes yes yes yes yes yes yes +`HP Public Cloud (Helion)`_ yes yes yes yes yes yes yes `IBM SmartCloud Enterprise`_ yes yes yes yes yes yes no `Ikoula`_ yes yes yes yes yes yes yes `Joyent`_ yes yes yes yes yes yes yes @@ -96,6 +97,7 @@ Provider list nodes create node reboot node destroy .. _`Google Compute Engine`: https://cloud.google.com/ .. _`GoGrid`: http://www.gogrid.com/ .. _`HostVirtual`: http://www.vr.org +.. _`HP Public Cloud (Helion)`: http://www.hpcloud.com/ .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index 5ca15348b7..0528390679 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -35,6 +35,7 @@ Provider Documentation `Google Compute Engine`_ :doc:`Click ` GCE :mod:`libcloud.compute.drivers.gce` :class:`GCENodeDriver` `GoGrid`_ GOGRID :mod:`libcloud.compute.drivers.gogrid` :class:`GoGridNodeDriver` `HostVirtual`_ HOSTVIRTUAL :mod:`libcloud.compute.drivers.hostvirtual` :class:`HostVirtualNodeDriver` +`HP Public Cloud (Helion)`_ :doc:`Click ` HPCLOUD :mod:`libcloud.compute.drivers.hpcloud` :class:`HPCloudNodeDriver` `IBM SmartCloud Enterprise`_ IBM :mod:`libcloud.compute.drivers.ibm_sce` :class:`IBMNodeDriver` `Ikoula`_ :doc:`Click ` IKOULA :mod:`libcloud.compute.drivers.ikoula` :class:`IkoulaNodeDriver` `Joyent`_ JOYENT :mod:`libcloud.compute.drivers.joyent` :class:`JoyentNodeDriver` @@ -96,6 +97,7 @@ Provider Documentation .. _`Google Compute Engine`: https://cloud.google.com/ .. _`GoGrid`: http://www.gogrid.com/ .. _`HostVirtual`: http://www.vr.org +.. _`HP Public Cloud (Helion)`: http://www.hpcloud.com/ .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com From 819f1671ff7aeccc8996b2d77a98be4b72ed20c9 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Thu, 15 May 2014 21:12:01 +0400 Subject: [PATCH 057/315] Allow user to pass filters to ex_list_networks method in the EC2 driver. Tomaz: Fix tests and lint. Closes #294 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 +++ libcloud/compute/drivers/ec2.py | 35 +++++++++++++++++++++- libcloud/test/compute/test_ec2.py | 48 +++++++++++++++++++++++++++++++ 3 files changed, 86 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 73bf6b6790..ff51f6167e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -146,6 +146,10 @@ Compute (GITHUB-295, LIBCLOUD-555) [syndicut] +- Allow user to pass filters to ex_list_networks method in the EC2 driver. + (GITHUB-294) + [zerthimon] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 9962b2d73f..1f94d79ed2 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2342,15 +2342,48 @@ def ex_register_image(self, name, description=None, architecture=None, ) return image - def ex_list_networks(self): + def ex_list_networks(self, network_ids=None, filters=None): """ Return a list of :class:`EC2Network` objects for the current region. + :param network_ids: Return only networks matching the provided + network IDs. If not specified, a list of all + the networks in the corresponding region + is returned. + :type network_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain networks. + :type filters: ``dict`` + :rtype: ``list`` of :class:`EC2Network` """ params = {'Action': 'DescribeVpcs'} + if network_ids: + for network_idx, network_id in enumerate(network_ids): + network_idx += 1 # We want 1-based indexes + network_key = 'VpcId.%s' % network_idx + params[network_key] = network_id + + if filters: + for filter_idx, filter_data in enumerate(filters.items()): + filter_idx += 1 # We want 1-based indexes + filter_name, filter_values = filter_data + filter_key = 'Filter.%s.Name' % filter_idx + params[filter_key] = filter_name + + if isinstance(filter_values, (list, tuple)): + for value_idx, value in enumerate(filter_values): + value_idx += 1 # We want 1-based indexes + value_key = 'Filter.%s.Value.%s' % (filter_idx, + value_idx) + params[value_key] = value + else: + value_key = 'Filter.%s.Value.1' % filter_idx + params[value_key] = filter_values + return self._to_networks( self.connection.request(self.path, params=params).object ) diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 0a2cf1eb28..3f531cde29 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -919,6 +919,21 @@ def test_ex_list_networks(self): self.assertEqual('available', vpcs[1].extra['state']) self.assertEqual('dopt-7eded312', vpcs[1].extra['dhcp_options_id']) + def test_ex_list_networks_network_ids(self): + EC2MockHttp.type = 'network_ids' + network_ids = ['vpc-532335e1'] + + # We assert in the mock http method + self.driver.ex_list_networks(network_ids=network_ids) + + def test_ex_list_networks_filters(self): + EC2MockHttp.type = 'filters' + filters = {'dhcp-options-id': 'dopt-7eded312', # matches two networks + 'cidr': '192.168.51.0/24'} # matches one network + + # We assert in the mock http method + self.driver.ex_list_networks(filters=filters) + def test_ex_create_network(self): vpc = self.driver.ex_create_network('192.168.55.0/24', name='Test VPC', @@ -1349,6 +1364,39 @@ def _DescribeVpcs(self, method, url, body, headers): body = self.fixtures.load('describe_vpcs.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _network_ids_DescribeVpcs(self, method, url, body, headers): + expected_params = { + 'VpcId.1': 'vpc-532335e1' + } + self.assertUrlContainsQueryParams(url, expected_params) + + body = self.fixtures.load('describe_vpcs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _filters_DescribeVpcs(self, method, url, body, headers): + expected_params_1 = { + 'Filter.1.Name': 'dhcp-options-id', + 'Filter.1.Value.1': 'dopt-7eded312', + 'Filter.2.Name': 'cidr', + 'Filter.2.Value.1': '192.168.51.0/24' + } + + expected_params_2 = { + 'Filter.1.Name': 'cidr', + 'Filter.1.Value.1': '192.168.51.0/24', + 'Filter.2.Name': 'dhcp-options-id', + 'Filter.2.Value.1': 'dopt-7eded312' + } + + try: + self.assertUrlContainsQueryParams(url, expected_params_1) + except AssertionError: + # dict ordering is not guaranteed + self.assertUrlContainsQueryParams(url, expected_params_2) + + body = self.fixtures.load('describe_vpcs.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _CreateVpc(self, method, url, body, headers): body = self.fixtures.load('create_vpc.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) From 7c9674863a0aaf3a52895505b4b38c892042d9de Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 21 May 2014 14:00:38 +0200 Subject: [PATCH 058/315] Allow user to force auth version by passing "ex_force_auth_version" kwarg to OpenStack based driver constructor even if "_auth_version" class attribute is set. --- libcloud/common/openstack.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 158a9f5f8c..26bf6920b5 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -487,9 +487,11 @@ def __init__(self, user_id, key, secure=True, super(OpenStackBaseConnection, self).__init__( user_id, key, secure=secure, timeout=timeout) + if ex_force_auth_version: + self._auth_version = ex_force_auth_version + self._ex_force_base_url = ex_force_base_url self._ex_force_auth_url = ex_force_auth_url - self._auth_version = self._auth_version or ex_force_auth_version self._ex_force_auth_token = ex_force_auth_token self._ex_tenant_name = ex_tenant_name self._ex_force_service_type = ex_force_service_type From be258dc9d8100697f27078639dc24b4f256d436d Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 21 May 2014 14:07:41 +0200 Subject: [PATCH 059/315] Fix OpenStack Swift driver so it works correctly. --- libcloud/storage/drivers/cloudfiles.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index 52b999fc7f..6591c7dab5 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -186,11 +186,17 @@ class OpenStackSwiftConnection(CloudFilesConnection): Connection class for the OpenStack Swift endpoint. """ + # TODO: Reverse the relationship - Swift -> CloudFiles + def __init__(self, *args, **kwargs): super(OpenStackSwiftConnection, self).__init__(*args, **kwargs) self._service_type = self._ex_force_service_type or 'object-store' self._service_name = self._ex_force_service_name or 'swift' - self._service_region = self._ex_force_service_region.upper() + + if self._ex_force_service_region: + self._service_region = self._ex_force_service_region.upper() + else: + self._service_region = None def get_endpoint(self, *args, **kwargs): if '2.0' in self._auth_version: @@ -200,10 +206,10 @@ def get_endpoint(self, *args, **kwargs): region=self._service_region) elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): endpoint = self.service_catalog.get_endpoint( - name=self._service_name, region=self._region_name) + name=self._service_name, region=self._service_region) - if self.endpoint_url in endpoint: - return endpoint[self.endpoint_url] + if PUBLIC_ENDPOINT_KEY in endpoint: + return endpoint[PUBLIC_ENDPOINT_KEY] else: raise LibcloudError('Could not find specified endpoint') From 0a08bc6e1eea68407cc6abaee25bce87351c4050 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 21 May 2014 15:03:39 +0200 Subject: [PATCH 060/315] Reverse the relationship between CloudFiles and OpenStackSwift connection class. --- libcloud/storage/drivers/cloudfiles.py | 112 +++++++++++++++++-------- 1 file changed, 76 insertions(+), 36 deletions(-) diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index 6591c7dab5..212d46ce7b 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -99,7 +99,71 @@ class CloudFilesRawResponse(CloudFilesResponse, RawResponse): pass -class CloudFilesConnection(OpenStackBaseConnection): +class OpenStackSwiftConnection(OpenStackBaseConnection): + """ + Connection class for the OpenStack Swift endpoint. + """ + + responseCls = CloudFilesResponse + rawResponseCls = CloudFilesRawResponse + + auth_url = AUTH_URL + _auth_version = '1.0' + + # TODO: Reverse the relationship - Swift -> CloudFiles + def __init__(self, user_id, key, secure=True, **kwargs): + # Ignore this for now + kwargs.pop('use_internal_url', None) + super(OpenStackSwiftConnection, self).__init__(user_id, key, + secure=secure, + **kwargs) + self.api_version = API_VERSION + self.accept_format = 'application/json' + + self._service_type = self._ex_force_service_type or 'object-store' + self._service_name = self._ex_force_service_name or 'swift' + + if self._ex_force_service_region: + self._service_region = self._ex_force_service_region.upper() + else: + self._service_region = None + + def get_endpoint(self, *args, **kwargs): + if '2.0' in self._auth_version: + endpoint = self.service_catalog.get_endpoint( + service_type=self._service_type, + name=self._service_name, + region=self._service_region) + elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): + endpoint = self.service_catalog.get_endpoint( + name=self._service_name, region=self._service_region) + + if PUBLIC_ENDPOINT_KEY in endpoint: + return endpoint[PUBLIC_ENDPOINT_KEY] + else: + raise LibcloudError('Could not find specified endpoint') + + def request(self, action, params=None, data='', headers=None, method='GET', + raw=False, cdn_request=False): + if not headers: + headers = {} + if not params: + params = {} + + self.cdn_request = cdn_request + params['format'] = 'json' + + if method in ['POST', 'PUT'] and 'Content-Type' not in headers: + headers.update({'Content-Type': 'application/json; charset=UTF-8'}) + + return super(OpenStackSwiftConnection, self).request( + action=action, + params=params, data=data, + method=method, headers=headers, + raw=raw) + + +class CloudFilesConnection(OpenStackSwiftConnection): """ Base connection class for the Cloudfiles driver. """ @@ -181,39 +245,6 @@ def request(self, action, params=None, data='', headers=None, method='GET', raw=raw) -class OpenStackSwiftConnection(CloudFilesConnection): - """ - Connection class for the OpenStack Swift endpoint. - """ - - # TODO: Reverse the relationship - Swift -> CloudFiles - - def __init__(self, *args, **kwargs): - super(OpenStackSwiftConnection, self).__init__(*args, **kwargs) - self._service_type = self._ex_force_service_type or 'object-store' - self._service_name = self._ex_force_service_name or 'swift' - - if self._ex_force_service_region: - self._service_region = self._ex_force_service_region.upper() - else: - self._service_region = None - - def get_endpoint(self, *args, **kwargs): - if '2.0' in self._auth_version: - endpoint = self.service_catalog.get_endpoint( - service_type=self._service_type, - name=self._service_name, - region=self._service_region) - elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): - endpoint = self.service_catalog.get_endpoint( - name=self._service_name, region=self._service_region) - - if PUBLIC_ENDPOINT_KEY in endpoint: - return endpoint[PUBLIC_ENDPOINT_KEY] - else: - raise LibcloudError('Could not find specified endpoint') - - class CloudFilesStorageDriver(StorageDriver, OpenStackDriverMixin): """ CloudFiles driver. @@ -875,8 +906,17 @@ class OpenStackSwiftStorageDriver(CloudFilesStorageDriver): name = 'OpenStack Swift' connectionCls = OpenStackSwiftConnection - def __init__(self, *args, **kwargs): - super(OpenStackSwiftStorageDriver, self).__init__(*args, **kwargs) + # TODO: Reverse the relationship - Swift -> CloudFiles + + def __init__(self, key, secret=None, secure=True, host=None, port=None, + region=None, **kwargs): + super(OpenStackSwiftStorageDriver, self).__init__(key=key, + secret=secret, + secure=secure, + host=host, + port=port, + region=region, + **kwargs) class CloudFilesUKStorageDriver(CloudFilesStorageDriver): From de69f0428ddc8b981623c464d23d5a65435562d9 Mon Sep 17 00:00:00 2001 From: Chris DeRamus Date: Thu, 22 May 2014 08:01:49 -0400 Subject: [PATCH 061/315] LIBCLOUD-560: Add support for new resourcetypes in the 4.3 version of Cloudstack's listResourceLimits response Closes #298 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/cloudstack.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 0cd48e8e8e..9af9995c97 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -1809,6 +1809,9 @@ def ex_limits(self): CloudStack uses integers as the resource type so we will convert them to a more human readable string using the resource map + A list of the resource type mappings can be found at + http://goo.gl/17C6Gk + :return: dict :rtype: ``dict`` """ @@ -1825,11 +1828,18 @@ def ex_limits(self): 4: 'max_images', 5: 'max_projects', 6: 'max_networks', - 7: 'max_vpc' + 7: 'max_vpc', + 8: 'max_cpu', + 9: 'max_memory', + 10: 'max_primary_storage', + 11: 'max_secondary_storage' } for limit in result.get('resourcelimit', []): - resource = resource_map[int(limit['resourcetype'])] + # We will ignore unknown types + resource = resource_map.get(int(limit['resourcetype']), None) + if not resource: + continue limits[resource] = int(limit['max']) return limits From 3aca6ae03b6a7e7e202ddb5869983343ad9cb24a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 May 2014 11:15:08 +0200 Subject: [PATCH 062/315] Fix for new versions of CloudStack which have fixed the delete security group rule response. --- libcloud/common/cloudstack.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/libcloud/common/cloudstack.py b/libcloud/common/cloudstack.py index efd31c8277..6c0c30ee67 100644 --- a/libcloud/common/cloudstack.py +++ b/libcloud/common/cloudstack.py @@ -153,7 +153,12 @@ def _sync_request(self, command, action=None, params=None, data=None, command = command.lower() - if command not in ['revokesecuritygroupingress']: + # Work around for older verions which don't return "response" suffix + # in delete ingress rule response command name + if command == 'revokesecuritygroupingress' and \ + not 'revokesecuritygroupingressresponse' in result.object: + command = command + else: command = command + 'response' if command not in result.object: From af535b119f2927649aa5664d0bc00f6dbac36c67 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 May 2014 12:44:17 +0200 Subject: [PATCH 063/315] Fix lint. --- libcloud/common/cloudstack.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libcloud/common/cloudstack.py b/libcloud/common/cloudstack.py index 6c0c30ee67..b13ca09dcd 100644 --- a/libcloud/common/cloudstack.py +++ b/libcloud/common/cloudstack.py @@ -155,9 +155,9 @@ def _sync_request(self, command, action=None, params=None, data=None, # Work around for older verions which don't return "response" suffix # in delete ingress rule response command name - if command == 'revokesecuritygroupingress' and \ - not 'revokesecuritygroupingressresponse' in result.object: - command = command + if (command == 'revokesecuritygroupingress' and not + 'revokesecuritygroupingressresponse' in result.object): + command = command else: command = command + 'response' From 0283f3c3bda0ce181c9060bd34926fcdb24db090 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 23 May 2014 13:04:21 +0200 Subject: [PATCH 064/315] Fix lint. --- libcloud/common/cloudstack.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/common/cloudstack.py b/libcloud/common/cloudstack.py index b13ca09dcd..b23d4539ee 100644 --- a/libcloud/common/cloudstack.py +++ b/libcloud/common/cloudstack.py @@ -155,8 +155,8 @@ def _sync_request(self, command, action=None, params=None, data=None, # Work around for older verions which don't return "response" suffix # in delete ingress rule response command name - if (command == 'revokesecuritygroupingress' and not - 'revokesecuritygroupingressresponse' in result.object): + if (command == 'revokesecuritygroupingress' and + 'revokesecuritygroupingressresponse' not in result.object): command = command else: command = command + 'response' From 074e86ccf898b7083d7de0932fca7492af255456 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Wed, 21 May 2014 18:54:44 +0400 Subject: [PATCH 065/315] LIBCLOUD-559 Add ex_get_security_groups method to the EC2 driver. Closes #297 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 226 +++++++++++++++++++++++++++++--- 1 file changed, 211 insertions(+), 15 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 1f94d79ed2..621a6df6d2 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -64,6 +64,7 @@ 'EC2NodeLocation', 'EC2ReservedNode', + 'EC2SecurityGroup', 'EC2Network', 'EC2NetworkSubnet', 'EC2NetworkInterface', @@ -1403,6 +1404,20 @@ 'transform_func': str } }, + 'security_group': { + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + }, + 'description': { + 'xpath': 'groupDescription', + 'transform_func': str + }, + 'owner_id': { + 'xpath': 'ownerId', + 'transform_func': str + } + }, 'snapshot': { 'volume_id': { 'xpath': 'volumeId', @@ -1591,6 +1606,25 @@ def __repr__(self): return (('') % (self.id)) +class EC2SecurityGroup(object): + """ + Represents information about a Security group + + Note: This class is EC2 specific. + """ + + def __init__(self, id, name, ingress_rules, egress_rules, extra=None): + self.id = id + self.name = name + self.ingress_rules = ingress_rules + self.egress_rules = egress_rules + self.extra = extra or {} + + def __repr__(self): + return ((' Date: Sun, 25 May 2014 20:32:08 +0200 Subject: [PATCH 066/315] Support for Google-Containers Recently Google have released a google VM Image that comes with docker preinstalled and supports docker provisioning via metadata. Toady in libcloud both debian and centos images are supported by matching against the VM name and then injecting the appropiate project name (`debian-cloud` and `centos-cloud`). This adds support for `google-containers`. Closes #299 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/gce.py | 2 ++ 2 files changed, 7 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index ff51f6167e..47ff16aa4f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -150,6 +150,11 @@ Compute (GITHUB-294) [zerthimon] +- Allow user to retrieve container images using ex_get_image method in the + Google Compute Engine driver. + (GITHUB-299, LIBCLOUD-562) + [Magnus Andersson] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index fe4dee4bcf..b2b76639a5 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -2305,6 +2305,8 @@ def ex_get_image(self, partial_name): image = self._match_images('debian-cloud', partial_name) elif partial_name.startswith('centos'): image = self._match_images('centos-cloud', partial_name) + elif partial_name.startswith('container-vm'): + image = self._match_images('google-containers', partial_name) return image From 60b8176814d75ee7db9e31f10ccc9dad191bfed7 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Mon, 26 May 2014 18:19:12 +0200 Subject: [PATCH 067/315] LIBCLOUD-566: Fix casing in defining keypair private key --- libcloud/compute/drivers/cloudstack.py | 4 ++-- .../compute/fixtures/cloudstack/createSSHKeyPair_default.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 9af9995c97..2b69d84e44 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -1968,8 +1968,8 @@ def _to_key_pairs(self, data): def _to_key_pair(self, data): key_pair = KeyPair(name=data['name'], fingerprint=data['fingerprint'], - public_key=data.get('publicKey', None), - private_key=data.get('privateKey', None), + public_key=data.get('publickey', None), + private_key=data.get('privatekey', None), driver=self) return key_pair diff --git a/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json b/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json index 2d0740d151..a4892a70a1 100644 --- a/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json +++ b/libcloud/test/compute/fixtures/cloudstack/createSSHKeyPair_default.json @@ -1 +1 @@ -{ "createsshkeypairresponse" : { "keypair" : {"name":"test-keypair","fingerprint":"51:9f:81:30:ec:82:0c:e5:8c:81:ac:14:27:d0:e5:e2","privateKey":"-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQDMaSZY4v228AWWcXYLoojgaZ+K8SbuI8YoPDEi9UWcww5mWSTx\nVl6Ksb8YPFxL6+3/unlfr4zK1LksxgN8XRuZr+YBFGphUB6a5EcyshkXi3mfAE7d\n6a26ah6ySXFK9GmZoXcJqQ1xLC9rKGPL7tWgHmbX1lCbN6QinV0mZVEHNwIDAQAB\nAoGACXQngN7mqwpIx99xfTJEMFTSOyPSEBt5c6zs/NfpI0nmJZej3MGI19NGqkFI\nZ35+4F/ocyN0WIEkG00BJkRMHWdPNd+YnVSuVgEyGCD8hDvBbUEQrmdZ0VfQt+2q\nd52g573s6D6Skk/SZHGi3yHca4H52c3EpLJzThxUmJSSqmECQQD0loEIiQzQaap3\n/Gce7nZeLCSNXf0Q5aKFQv/X22srw6YvJ9/25cLahiFtQUadId9VUXSYTgEKX0ST\nB2CZ4UJxAkEA1fK/PT+YIHaiQIiCK/xTnoIuTvdXmH0IozolRxGAKpQZNvaMpKgn\nvXU84/yztekEPG0pKmCm7CZUZoGdfiJoJwJALwUsAy8NtpdJvU1ZqbmgKdSEpmS2\nPORYjRPnSWEWRlCThyc8SCO9hPMaQ/2zjIuxep5xMsJ0MsFD1pwpdwu2EQJBAMrG\nEZ7ZQTOzfMAxIT7THeWjeIR7RNhP2PnrSB19Zr30M5m2P0Jn5ZJZJWbnwOPuf4dN\n5rA1fr9e7KtiuYQs1A0CQQCT06qHdHaQr78A6YTEbDVr0M57qVrdsm5xyXzCmpMy\n9LxXAACghjHbjF//FEOjNG21IutbCg6cNIRz5EM8+MD+\n-----END RSA PRIVATE KEY-----\n"} } } +{ "createsshkeypairresponse" : { "keypair" : {"name":"test-keypair","fingerprint":"51:9f:81:30:ec:82:0c:e5:8c:81:ac:14:27:d0:e5:e2","privatekey":"-----BEGIN RSA PRIVATE KEY-----\nMIICXQIBAAKBgQDMaSZY4v228AWWcXYLoojgaZ+K8SbuI8YoPDEi9UWcww5mWSTx\nVl6Ksb8YPFxL6+3/unlfr4zK1LksxgN8XRuZr+YBFGphUB6a5EcyshkXi3mfAE7d\n6a26ah6ySXFK9GmZoXcJqQ1xLC9rKGPL7tWgHmbX1lCbN6QinV0mZVEHNwIDAQAB\nAoGACXQngN7mqwpIx99xfTJEMFTSOyPSEBt5c6zs/NfpI0nmJZej3MGI19NGqkFI\nZ35+4F/ocyN0WIEkG00BJkRMHWdPNd+YnVSuVgEyGCD8hDvBbUEQrmdZ0VfQt+2q\nd52g573s6D6Skk/SZHGi3yHca4H52c3EpLJzThxUmJSSqmECQQD0loEIiQzQaap3\n/Gce7nZeLCSNXf0Q5aKFQv/X22srw6YvJ9/25cLahiFtQUadId9VUXSYTgEKX0ST\nB2CZ4UJxAkEA1fK/PT+YIHaiQIiCK/xTnoIuTvdXmH0IozolRxGAKpQZNvaMpKgn\nvXU84/yztekEPG0pKmCm7CZUZoGdfiJoJwJALwUsAy8NtpdJvU1ZqbmgKdSEpmS2\nPORYjRPnSWEWRlCThyc8SCO9hPMaQ/2zjIuxep5xMsJ0MsFD1pwpdwu2EQJBAMrG\nEZ7ZQTOzfMAxIT7THeWjeIR7RNhP2PnrSB19Zr30M5m2P0Jn5ZJZJWbnwOPuf4dN\n5rA1fr9e7KtiuYQs1A0CQQCT06qHdHaQr78A6YTEbDVr0M57qVrdsm5xyXzCmpMy\n9LxXAACghjHbjF//FEOjNG21IutbCg6cNIRz5EM8+MD+\n-----END RSA PRIVATE KEY-----\n"} } } From 1f34dd14404d05208120cb1b18dc522129a7496f Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Mon, 26 May 2014 18:22:23 +0200 Subject: [PATCH 068/315] Update CHANGES file to reflect fix of LIBCLOUD-566 --- CHANGES.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 47ff16aa4f..8be428f94f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -22,6 +22,10 @@ General Compute ~~~~~~~ +- Fix create_key_pair method which was not returning private key. + (LIBCLOUD-566) + [Sebastien Goasguen] + - Map "Stopped" node state in the CloudStack driver to NodeState.STOPPED instead of NodeState.TERMINATED, "Stopping" to NodeState.PENDING instead of NodeState.TERMINATED and "Expunging" to NodeState.PENDING instead of From 0452d98fd858c277a4c0289b697defbdb50b3fab Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Mon, 26 May 2014 18:41:02 +0200 Subject: [PATCH 069/315] Fixing flake8 error --- libcloud/compute/drivers/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 621a6df6d2..f50cca698f 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -4144,7 +4144,7 @@ def _to_security_group_rule(self, element): xpath='toPort', namespace=NAMESPACE) - # get security groups + # get security groups elements = element.findall(fixxpath( xpath='groups/item', namespace=NAMESPACE From 4ffa70380ee76ecf0b4b3947d90b6e086085a44b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 27 May 2014 14:21:55 +0200 Subject: [PATCH 070/315] Add new driver for Kili public cloud. Kili public cloud is a public cloud provider in Africa powered by OpenStack. --- CHANGES.rst | 3 + .../_supported_methods_block_storage.rst | 2 + .../_supported_methods_image_management.rst | 2 + ..._supported_methods_key_pair_management.rst | 2 + docs/compute/_supported_methods_main.rst | 2 + docs/compute/_supported_providers.rst | 2 + docs/compute/drivers/kili.rst | 35 ++++++++ .../compute/openstack/kilicloud_native.py | 10 +++ libcloud/compute/drivers/kili.py | 87 +++++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 1 + 11 files changed, 148 insertions(+) create mode 100644 docs/compute/drivers/kili.rst create mode 100644 docs/examples/compute/openstack/kilicloud_native.py create mode 100644 libcloud/compute/drivers/kili.py diff --git a/CHANGES.rst b/CHANGES.rst index 8be428f94f..e1e0a39ddc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -159,6 +159,9 @@ Compute (GITHUB-299, LIBCLOUD-562) [Magnus Andersson] +- Add new driver for Kili public cloud (http://kili.io/) + [Tomaz Muraus] + Storage ~~~~~~~ diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index 36aa971aa8..1776d0aa5d 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -39,6 +39,7 @@ Provider list volumes create volume destroy volume `IBM SmartCloud Enterprise`_ yes yes yes yes yes no no `Ikoula`_ yes yes yes yes yes no no `Joyent`_ no no no no no no no +`Kili Public Cloud`_ yes yes yes yes yes no no `KTUCloud`_ yes yes yes yes yes no no `Libvirt`_ no no no no no no no `Linode`_ no no no no no no no @@ -101,6 +102,7 @@ Provider list volumes create volume destroy volume .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com +.. _`Kili Public Cloud`: http://kili.io/ .. _`KTUCloud`: https://ucloudbiz.olleh.com/ .. _`Libvirt`: http://libvirt.org/ .. _`Linode`: http://www.linode.com/ diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 5516774240..07343740cb 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -39,6 +39,7 @@ Provider list images get image create image delete `IBM SmartCloud Enterprise`_ yes no yes no no `Ikoula`_ yes no no no no `Joyent`_ yes no no no no +`Kili Public Cloud`_ yes yes yes yes no `KTUCloud`_ yes no no no no `Libvirt`_ no no no no no `Linode`_ yes no no no no @@ -101,6 +102,7 @@ Provider list images get image create image delete .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com +.. _`Kili Public Cloud`: http://kili.io/ .. _`KTUCloud`: https://ucloudbiz.olleh.com/ .. _`Libvirt`: http://libvirt.org/ .. _`Linode`: http://www.linode.com/ diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index 938ceeb8c6..f474959c7e 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -39,6 +39,7 @@ Provider list key pairs get key pair create key pai `IBM SmartCloud Enterprise`_ no no no no no no `Ikoula`_ yes yes yes yes no yes `Joyent`_ no no no no no no +`Kili Public Cloud`_ yes yes yes yes no yes `KTUCloud`_ yes yes yes yes no yes `Libvirt`_ no no no no no no `Linode`_ no no no no no no @@ -101,6 +102,7 @@ Provider list key pairs get key pair create key pai .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com +.. _`Kili Public Cloud`: http://kili.io/ .. _`KTUCloud`: https://ucloudbiz.olleh.com/ .. _`Libvirt`: http://libvirt.org/ .. _`Linode`: http://www.linode.com/ diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index 3510369672..5fab3493b0 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -39,6 +39,7 @@ Provider list nodes create node reboot node destroy `IBM SmartCloud Enterprise`_ yes yes yes yes yes yes no `Ikoula`_ yes yes yes yes yes yes yes `Joyent`_ yes yes yes yes yes yes yes +`Kili Public Cloud`_ yes yes yes yes yes yes yes `KTUCloud`_ yes yes yes yes yes yes yes `Libvirt`_ yes no yes yes no no no `Linode`_ yes yes yes yes yes yes yes @@ -101,6 +102,7 @@ Provider list nodes create node reboot node destroy .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com +.. _`Kili Public Cloud`: http://kili.io/ .. _`KTUCloud`: https://ucloudbiz.olleh.com/ .. _`Libvirt`: http://libvirt.org/ .. _`Linode`: http://www.linode.com/ diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index 0528390679..53c7099dc7 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -39,6 +39,7 @@ Provider Documentation `IBM SmartCloud Enterprise`_ IBM :mod:`libcloud.compute.drivers.ibm_sce` :class:`IBMNodeDriver` `Ikoula`_ :doc:`Click ` IKOULA :mod:`libcloud.compute.drivers.ikoula` :class:`IkoulaNodeDriver` `Joyent`_ JOYENT :mod:`libcloud.compute.drivers.joyent` :class:`JoyentNodeDriver` +`Kili Public Cloud`_ :doc:`Click ` KILI :mod:`libcloud.compute.drivers.kili` :class:`KiliCloudNodeDriver` `KTUCloud`_ KTUCLOUD :mod:`libcloud.compute.drivers.ktucloud` :class:`KTUCloudNodeDriver` `Libvirt`_ :doc:`Click ` LIBVIRT :mod:`libcloud.compute.drivers.libvirt_driver` :class:`LibvirtNodeDriver` `Linode`_ LINODE :mod:`libcloud.compute.drivers.linode` :class:`LinodeNodeDriver` @@ -101,6 +102,7 @@ Provider Documentation .. _`IBM SmartCloud Enterprise`: http://ibm.com/services/us/en/cloud-enterprise/ .. _`Ikoula`: http://express.ikoula.co.uk/cloudstack .. _`Joyent`: http://www.joyentcloud.com +.. _`Kili Public Cloud`: http://kili.io/ .. _`KTUCloud`: https://ucloudbiz.olleh.com/ .. _`Libvirt`: http://libvirt.org/ .. _`Linode`: http://www.linode.com/ diff --git a/docs/compute/drivers/kili.rst b/docs/compute/drivers/kili.rst new file mode 100644 index 0000000000..71a93e24a0 --- /dev/null +++ b/docs/compute/drivers/kili.rst @@ -0,0 +1,35 @@ +Kili Cloud Computer Driver Documentation +======================================== + +`Kili Cloud`_ is a public cloud provider based in Africa. + +Kili Cloud driver is based on the OpenStack one. For more information +information and OpenStack specific documentation, please refer to +:doc:`OpenStack Compute Driver Documentation ` page. + +Examples +-------- + +1. Instantiating the driver +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Unlike other OpenStack based providers, Kili cloud also requires you to specify +tenant name when connecting to their cloud. You can do that by passing +``tenant_name`` argument to the driver constructor as shown in the code example +bellow. + +This attribute represents a company name you have entered while signing up for +Kili. + +.. literalinclude:: /examples/compute/openstack/kilicloud_native.py + :language: python + +API Docs +-------- + +.. autoclass:: libcloud.compute.drivers.kili.KiliCloudNodeDriver + :members: + :inherited-members: + +.. _`Kili Cloud`: http://kili.io/ +.. _`Kili Cloud dashboard`: https://dash.kili.io diff --git a/docs/examples/compute/openstack/kilicloud_native.py b/docs/examples/compute/openstack/kilicloud_native.py new file mode 100644 index 0000000000..2c83e12fb6 --- /dev/null +++ b/docs/examples/compute/openstack/kilicloud_native.py @@ -0,0 +1,10 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +USERNAME = 'your account username' +PASSWORD = 'your account password' +TENANT_NAME = 'project name' + +cls = get_driver(Provider.KILI) +driver = cls(USERNAME, PASSWORD, tenant_name=TENANT_NAME) +print(driver.list_nodes()) diff --git a/libcloud/compute/drivers/kili.py b/libcloud/compute/drivers/kili.py new file mode 100644 index 0000000000..de610676e6 --- /dev/null +++ b/libcloud/compute/drivers/kili.py @@ -0,0 +1,87 @@ +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +HP Public cloud driver which is esentially just a small wrapper around +OpenStack driver. +""" + +from libcloud.compute.types import Provider, LibcloudError +from libcloud.compute.drivers.openstack import OpenStack_1_1_Connection +from libcloud.compute.drivers.openstack import OpenStack_1_1_NodeDriver + +__all__ = [ + 'KiliCloudNodeDriver' +] + +ENDPOINT_ARGS = { + 'service_type': 'compute', + 'name': 'nova', + 'region': 'RegionOne' +} + +AUTH_URL = 'https://api.kili.io/keystone/v2.0/tokens' + + +class KiliCloudConnection(OpenStack_1_1_Connection): + _auth_version = '2.0_password' + + def __init__(self, *args, **kwargs): + self.region = kwargs.pop('region', None) + self.get_endpoint_args = kwargs.pop('get_endpoint_args', None) + super(KiliCloudConnection, self).__init__(*args, **kwargs) + + def get_endpoint(self): + if not self.get_endpoint_args: + raise LibcloudError( + 'KiliCloudConnection must have get_endpoint_args set') + + if '2.0_password' in self._auth_version: + ep = self.service_catalog.get_endpoint(**self.get_endpoint_args) + else: + raise LibcloudError( + 'Auth version "%s" not supported' % (self._auth_version)) + + public_url = ep.get('publicURL', None) + + if not public_url: + raise LibcloudError('Could not find specified endpoint') + + return public_url + + +class KiliCloudNodeDriver(OpenStack_1_1_NodeDriver): + name = 'Kili Public Cloud' + website = 'http://kili.io/' + connectionCls = KiliCloudConnection + type = Provider.HPCLOUD + + def __init__(self, key, secret, tenant_name, secure=True, + host=None, port=None, **kwargs): + """ + Note: tenant_name argument is required for Kili cloud. + """ + self.tenant_name = tenant_name + super(KiliCloudNodeDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, + **kwargs) + + def _ex_connection_class_kwargs(self): + kwargs = self.openstack_connection_kwargs() + kwargs['get_endpoint_args'] = ENDPOINT_ARGS + kwargs['ex_force_auth_url'] = AUTH_URL + kwargs['ex_tenant_name'] = self.tenant_name + + return kwargs diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 3de579c0b9..87b3f321d0 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -81,6 +81,8 @@ ('libcloud.compute.drivers.rackspace', 'RackspaceFirstGenNodeDriver'), Provider.HPCLOUD: ('libcloud.compute.drivers.hpcloud', 'HPCloudNodeDriver'), + Provider.KILI: + ('libcloud.compute.drivers.kili', 'KiliCloudNodeDriver'), Provider.VPSNET: ('libcloud.compute.drivers.vpsnet', 'VPSNetNodeDriver'), Provider.LINODE: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index b2edf2d460..9f7a308cce 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -124,6 +124,7 @@ class Provider(object): # OpenStack based providers HPCLOUD = 'hpcloud' + KILI = 'kili' # Deprecated constants which are still supported EC2_US_EAST = 'ec2_us_east' From 1ad8e03967e15e93c7fe72ee75f0fdce29f73999 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 28 May 2014 16:29:17 +0200 Subject: [PATCH 071/315] Add "timeout" argument to ParamikoSSHClient.run method. If this argument is specified, run method will throw if the command doesn't finish in the provded time period. --- CHANGES.rst | 12 +++++++++++ libcloud/compute/ssh.py | 47 +++++++++++++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 7 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e1e0a39ddc..3d97feea42 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -162,6 +162,18 @@ Compute - Add new driver for Kili public cloud (http://kili.io/) [Tomaz Muraus] +- Add "timeout" argument to the ParamikoSSHClient.run method. If this argument + is specified and the command passed to run method doesn't finish in the + defined timeout, `SSHCommandTimeoutError` is throw and the connection to the + remote server is closed. + + Note #1: If timed out happens, this functionality doesn't guarantee that the + underlying command will be stopped / killed. The way it works it simply + closes a connect to the remote server. + [Tomaz Muraus] + + Note #2: "timeout" argument is only available in the Paramiko SSH client. + Storage ~~~~~~~ diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index 68a4da487f..fe9d856001 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -41,11 +41,30 @@ from libcloud.utils.logging import ExtraLogFormatter from libcloud.utils.py3 import StringIO +__all__ = [ + 'BaseSSHClient', + 'ParamikoSSHClient', + 'ShellOutSSHClient', + + 'SSHCommandTimeoutError' +] + # Maximum number of bytes to read at once from a socket CHUNK_SIZE = 1024 +class SSHCommandTimeoutError(Exception): + """ + Exception which is raised when an SSH command times out. + """ + def __init__(self, cmd, timeout): + self.cmd = cmd + self.timeout = timeout + message = 'Command didn\'t finish in %s seconds' % (timeout) + super(SSHCommandTimeoutError, self).__init__(message) + + class BaseSSHClient(object): """ Base class representing a connection over SSH/SCP to a remote node. @@ -92,8 +111,8 @@ def connect(self): """ Connect to the remote node over SSH. - :return: True if the connection has been successfully established, - False otherwise. + :return: True if the connection has been successfuly established, False + otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -128,8 +147,8 @@ def delete(self, path): :type path: ``str`` :keyword path: File path on the remote node. - :return: True if the file has been successfully deleted, - False otherwise. + :return: True if the file has been successfuly deleted, False + otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -151,8 +170,8 @@ def close(self): """ Shutdown connection to the remote node. - :return: True if the connection has been successfully closed, - False otherwise. + :return: True if the connection has been successfuly closed, False + otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -285,10 +304,14 @@ def delete(self, path): sftp.close() return True - def run(self, cmd): + def run(self, cmd, timeout=None): """ Note: This function is based on paramiko's exec_command() method. + + :param timeout: How long to wait (in seconds) for the command to + finish (optional). + :type timeout: ``float`` """ extra = {'_cmd': cmd} self.logger.debug('Executing command', extra=extra) @@ -299,6 +322,7 @@ def run(self, cmd): transport = self.client.get_transport() chan = transport.open_session() + start_time = time.time() chan.exec_command(cmd) stdout = StringIO() @@ -320,6 +344,15 @@ def run(self, cmd): exit_status_ready = chan.exit_status_ready() while not exit_status_ready: + current_time = time.time() + elapsed_time = (current_time - start_time) + + if timeout and (elapsed_time > timeout): + # TODO: Is this the right way to clean up? + chan.close() + + raise SSHCommandTimeoutError(cmd=cmd, timeout=timeout) + if chan.recv_ready(): data = chan.recv(CHUNK_SIZE) From 91f8df844a1dc0d288b969d89465b707a541efd6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 28 May 2014 17:16:32 +0200 Subject: [PATCH 072/315] Add __repr__ and __str__ methods. --- libcloud/compute/ssh.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index fe9d856001..af9c43a529 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -64,6 +64,13 @@ def __init__(self, cmd, timeout): message = 'Command didn\'t finish in %s seconds' % (timeout) super(SSHCommandTimeoutError, self).__init__(message) + def __repr__(self): + return ('' % + (self.cmd, self.timeout)) + + def __str__(self): + return self.message + class BaseSSHClient(object): """ From 21a0d069d0a138ab42ce4403ab553c7fc3e4bbb6 Mon Sep 17 00:00:00 2001 From: Csaba Hoch Date: Thu, 10 Apr 2014 11:33:33 +0200 Subject: [PATCH 073/315] Fix various bugs and add various improvements: - Fix typos in docstrings and comments - Add import_key_pair_from_string to dummy compute driver. - Fix ec2.import_key_pair_from_string for Python 3 - Fix publickey._to_md5_fingerprint for Python 3 - Fix publickey.get_pubkey_ssh2_fingerprint for Python 3 - ec2 driver: Make cidr_ips argument mandatory - Add "floating IP" functions to the OpenStack provider Closes #301 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 18 +++++++ libcloud/compute/base.py | 4 +- libcloud/compute/drivers/dummy.py | 9 ++++ libcloud/compute/drivers/ec2.py | 16 +++--- libcloud/compute/drivers/openstack.py | 74 ++++++++++++++++++++++++--- libcloud/test/test_utils.py | 16 ++++++ libcloud/utils/publickey.py | 7 ++- libcloud/utils/py3.py | 29 ++++++++++- 8 files changed, 153 insertions(+), 20 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 3d97feea42..51cb59fca3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -19,6 +19,14 @@ General (LIBCLOUD-552) [Tomaz Muraus] +- Fix Python 3 compatibility bugs in the following functions: + + * import_key_pair_from_string in the EC2 driver + * publickey._to_md5_fingerprint + * publickey.get_pubkey_ssh2_fingerprint + (GITHUB-301) + [Csaba Hoch] + Compute ~~~~~~~ @@ -174,6 +182,16 @@ Compute Note #2: "timeout" argument is only available in the Paramiko SSH client. +- Make "cidrs_ips" argument in the ex_authorize_security_group_egress method in + the EC2 driver mandatory. + (GITHUB-301) + [Csaba Hoch] + +- Add extension methods for manging floating IPs (ex_get_floating_ip, + ex_create_floating_ip, ex_delete_floating_ip) to the Openstack 1.1 driver. + (GITHUB-301) + [Csaba Hoch] + Storage ~~~~~~~ diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 06a9249db6..d971afa35a 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -135,7 +135,7 @@ class Node(UuidMixin): >>> node.name 'dummy-1' - the node keeps a reference to its own driver which means that we + The node keeps a reference to its own driver which means that we can work on nodes from different providers without having to know which is which. @@ -146,7 +146,7 @@ class Node(UuidMixin): >>> node2.driver.creds 72 - Althrough Node objects can be subclassed, this isn't normally + Although Node objects can be subclassed, this isn't normally done. Instead, any driver specific information is stored in the "extra" attribute of the node. diff --git a/libcloud/compute/drivers/dummy.py b/libcloud/compute/drivers/dummy.py index f8e6fe16ca..982433525b 100644 --- a/libcloud/compute/drivers/dummy.py +++ b/libcloud/compute/drivers/dummy.py @@ -24,6 +24,7 @@ from libcloud.common.base import ConnectionKey from libcloud.compute.base import NodeImage, NodeSize, Node from libcloud.compute.base import NodeDriver, NodeLocation +from libcloud.compute.base import KeyPair from libcloud.compute.types import Provider, NodeState @@ -326,6 +327,14 @@ def create_node(self, **kwargs): self.nl.append(n) return n + def import_key_pair_from_string(self, name, key_material): + key_pair = KeyPair(name=name, + public_key=key_material, + fingerprint='fingerprint', + private_key='private_key', + driver=self) + return key_pair + def _ip_to_int(ip): return socket.htonl(struct.unpack('I', socket.inet_aton(ip))[0]) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index f50cca698f..5f9fb9bcca 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -28,7 +28,7 @@ except ImportError: from xml.etree import ElementTree as ET -from libcloud.utils.py3 import b, basestring +from libcloud.utils.py3 import b, basestring, ensure_string from libcloud.utils.xml import fixxpath, findtext, findattr, findall from libcloud.utils.publickey import get_pubkey_ssh2_fingerprint @@ -1962,8 +1962,8 @@ def create_node(self, **kwargs): if 'auth' in kwargs: auth = self._get_and_check_auth(kwargs['auth']) - params['KeyName'] = \ - self.ex_find_or_import_keypair_by_key_material(auth.pubkey) + key = self.ex_find_or_import_keypair_by_key_material(auth.pubkey) + params['KeyName'] = key['keyName'] if 'ex_keyname' in kwargs: params['KeyName'] = kwargs['ex_keyname'] @@ -2173,7 +2173,7 @@ def create_key_pair(self, name): return key_pair def import_key_pair_from_string(self, name, key_material): - base64key = base64.b64encode(b(key_material)) + base64key = ensure_string(base64.b64encode(b(key_material))) params = { 'Action': 'ImportKeyPair', @@ -2600,8 +2600,8 @@ def ex_create_security_group(self, name, description, vpc_id=None): Group. :type description: ``str`` - :param description: Optional identifier for VPC networks - :type description: ``str`` + :param vpc_id: Optional identifier for VPC networks + :type vpc_id: ``str`` :rtype: ``dict`` """ @@ -2760,7 +2760,7 @@ def ex_authorize_security_group_ingress(self, id, from_port, to_port, return element == 'true' def ex_authorize_security_group_egress(self, id, from_port, to_port, - cidr_ips=None, group_pairs=None, + cidr_ips, group_pairs=None, protocol='tcp'): """ Edit a Security Group to allow specific egress traffic using @@ -4590,7 +4590,7 @@ def _get_common_security_group_params(self, group_id, protocol, :rtype: ``dict`` """ - params = {'GroupId': id, + params = {'GroupId': group_id, 'IpPermissions.1.IpProtocol': protocol, 'IpPermissions.1.FromPort': from_port, 'IpPermissions.1.ToPort': to_port} diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 27f1acec5d..5a85201a60 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1758,7 +1758,7 @@ def ex_delete_security_group(self, security_group): resp = self.connection.request('/os-security-groups/%s' % (security_group.id), method='DELETE') - return resp.status == httplib.NO_CONTENT + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) def ex_create_security_group_rule(self, security_group, ip_protocol, from_port, to_port, cidr=None, @@ -2168,6 +2168,63 @@ def ex_list_floating_ip_pools(self): return self._to_floating_ip_pools( self.connection.request('/os-floating-ip-pools').object) + def _to_floating_ips(self, obj): + ip_elements = obj['floating_ips'] + return [self._to_floating_ip(ip) for ip in ip_elements] + + def _to_floating_ip(self, obj): + return OpenStack_1_1_FloatingIpAddress(obj['id'], obj['ip'], self, + obj['instance_id']) + + def ex_list_floating_ips(self): + """ + List floating IPs + + :rtype: ``list`` of :class:`OpenStack_1_1_FloatingIpAddress` + """ + return self._to_floating_ips( + self.connection.request('/os-floating-ips').object) + + def ex_get_floating_ip(self, ip): + """ + Get specified floating IP + + :param ip: floating IP to get + :type ip: ``str`` + + :rtype: :class:`OpenStack_1_1_FloatingIpAddress` + """ + floating_ips = self.ex_list_floating_ips() + ip_obj, = [x for x in floating_ips if x.ip_address == ip] + return ip_obj + + def ex_create_floating_ip(self): + """ + Create new floating IP + + :rtype: :class:`OpenStack_1_1_FloatingIpAddress` + """ + resp = self.connection.request('/os-floating-ips', + method='POST', + data={}) + data = resp.object['floating_ip'] + id = data['id'] + ip_address = data['ip'] + return OpenStack_1_1_FloatingIpAddress(id, ip_address, self) + + def ex_delete_floating_ip(self, ip): + """ + Delete specified floating IP + + :param ip: floating IP to remove + :type ip::class:`OpenStack_1_1_FloatingIpAddress` + + :rtype: ``bool`` + """ + resp = self.connection.request('/os-floating-ips/%s' % ip.id, + method='DELETE') + return resp.status in (httplib.NO_CONTENT, httplib.ACCEPTED) + def ex_attach_floating_ip_to_node(self, node, ip): """ Attach the floating IP to the node @@ -2276,7 +2333,7 @@ def get_floating_ip(self, ip): """ Get specified floating IP from the pool - :param ip: floating IP to remove + :param ip: floating IP to get :type ip: ``str`` :rtype: :class:`OpenStack_1_1_FloatingIpAddress` @@ -2320,11 +2377,12 @@ class OpenStack_1_1_FloatingIpAddress(object): Floating IP info. """ - def __init__(self, id, ip_address, pool, node_id=None): + def __init__(self, id, ip_address, pool, node_id=None, driver=None): self.id = str(id) self.ip_address = ip_address self.pool = pool self.node_id = node_id + self.driver = driver def delete(self): """ @@ -2332,8 +2390,12 @@ def delete(self): :rtype: ``bool`` """ - return self.pool.delete_floating_ip(self) + if self.pool is not None: + return self.pool.delete_floating_ip(self) + elif self.driver is not None: + return self.driver.ex_delete_floating_ip(self) def __repr__(self): - return ('' - % (self.id, self.ip_address, self.pool)) + return ('' + % (self.id, self.ip_address, self.pool, self.driver)) diff --git a/libcloud/test/test_utils.py b/libcloud/test/test_utils.py index 1d900d4b0e..58b350662e 100644 --- a/libcloud/test/test_utils.py +++ b/libcloud/test/test_utils.py @@ -33,6 +33,8 @@ from libcloud.utils.py3 import PY3 from libcloud.utils.py3 import StringIO from libcloud.utils.py3 import b +from libcloud.utils.py3 import bchr +from libcloud.utils.py3 import hexadigits from libcloud.utils.py3 import urlquote from libcloud.compute.types import Provider from libcloud.compute.providers import DRIVERS @@ -262,6 +264,20 @@ def test_get_secure_random_string(self): value = get_secure_random_string(size=i) self.assertEqual(len(value), i) + def test_hexadigits(self): + self.assertEqual(hexadigits(b('')), []) + self.assertEqual(hexadigits(b('a')), ['61']) + self.assertEqual(hexadigits(b('AZaz09-')), + ['41', '5a', '61', '7a', '30', '39', '2d']) + + def test_bchr(self): + if PY3: + self.assertEqual(bchr(0), b'\x00') + self.assertEqual(bchr(97), b'a') + else: + self.assertEqual(bchr(0), '\x00') + self.assertEqual(bchr(97), 'a') + class NetworkingUtilsTestCase(unittest.TestCase): def test_is_public_and_is_private_subnet(self): diff --git a/libcloud/utils/publickey.py b/libcloud/utils/publickey.py index d9e59b92e4..86c6ec30c1 100644 --- a/libcloud/utils/publickey.py +++ b/libcloud/utils/publickey.py @@ -16,6 +16,9 @@ import base64 import hashlib +from libcloud.utils.py3 import hexadigits +from libcloud.utils.py3 import bchr + __all__ = [ 'get_pubkey_openssh_fingerprint', 'get_pubkey_ssh2_fingerprint', @@ -32,7 +35,7 @@ def _to_md5_fingerprint(data): hashed = hashlib.md5(data).digest() - return ":".join(x.encode("hex") for x in hashed) + return ":".join(hexadigits(hashed)) def get_pubkey_openssh_fingerprint(pubkey): @@ -53,7 +56,7 @@ def get_pubkey_ssh2_fingerprint(pubkey): k = importKey(pubkey) derPK = DerSequence([k.n, k.e]) bitmap = DerObject('BIT STRING') - bitmap.payload = chr(0x00) + derPK.encode() + bitmap.payload = bchr(0x00) + derPK.encode() der = DerSequence([algorithmIdentifier, bitmap.encode()]) return _to_md5_fingerprint(der.encode()) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 797d317c68..1f7d229ec6 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -14,7 +14,7 @@ # limitations under the License. # Libcloud Python 2.x and 3.x compatibility layer -# Some methods bellow are taken from Django PYK3 port which is licensed under 3 +# Some methods below are taken from Django PYK3 port which is licensed under 3 # clause BSD license # https://bitbucket.org/loewis/django-3k @@ -83,16 +83,33 @@ def b(s): else: raise TypeError("Invalid argument %r for b()" % (s,)) + def ensure_string(s): + if isinstance(s, str): + return s + elif isinstance(s, bytes): + return s.decode('utf-8') + else: + raise TypeError("Invalid argument %r for ensure_string()" % (s,)) + def byte(n): # assume n is a Latin-1 string of length 1 return ord(n) u = str + def bchr(s): + """Take an integer and make a 1-character byte string.""" + return bytes([s]) + def dictvalues(d): return list(d.values()) def tostring(node): return ET.tostring(node, encoding='unicode') + + def hexadigits(s): + # s needs to be a byte string. + return [format(x, "x") for x in s] + else: import httplib # NOQA from StringIO import StringIO # NOQA @@ -125,13 +142,17 @@ def tostring(node): method_type = types.MethodType - b = bytes = str + b = bytes = ensure_string = str def byte(n): return n u = unicode + def bchr(s): + """Take an integer and make a 1-character byte string.""" + return chr(s) + def next(i): return i.next() @@ -146,6 +167,10 @@ def urlquote(s, safe='/'): s = s.encode('utf8') return _urlquote(s, safe) + def hexadigits(s): + # s needs to be a string. + return [x.encode("hex") for x in s] + if PY25: import posixpath From 415ae9e47578c2d8ccc5b9e38474362ac91763a5 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Fri, 30 May 2014 03:20:27 -0400 Subject: [PATCH 074/315] Add blank line after bullet list to fix build --- CHANGES.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGES.rst b/CHANGES.rst index 51cb59fca3..4ce3855b31 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -24,6 +24,7 @@ General * import_key_pair_from_string in the EC2 driver * publickey._to_md5_fingerprint * publickey.get_pubkey_ssh2_fingerprint + (GITHUB-301) [Csaba Hoch] From e42e878620931d0a921bdbe95ee64b7807b72617 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Fri, 30 May 2014 11:05:00 +0200 Subject: [PATCH 075/315] Fix docstrings in ex_delete_floating_ip OpenStack driver --- libcloud/compute/drivers/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 5a85201a60..e2c505759a 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -2217,7 +2217,7 @@ def ex_delete_floating_ip(self, ip): Delete specified floating IP :param ip: floating IP to remove - :type ip::class:`OpenStack_1_1_FloatingIpAddress` + :type ip: :class:`OpenStack_1_1_FloatingIpAddress` :rtype: ``bool`` """ From dff5c5314790968fa1bdebf66d8b83dae4e64201 Mon Sep 17 00:00:00 2001 From: amastracci Date: Sun, 1 Jun 2014 12:22:28 -0700 Subject: [PATCH 076/315] [LIBCLOUD-569] Removing the overloaded constructor from the rimuhosting base class. Rimu uses gzip'ed responses but the overloaded constructor was not calling the decompress function resuuulting in a MalformedResponseError. All functionality in overloaded constructor already existed in the base clase. Closes #303 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/rimuhosting.py | 13 +++---------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 4ce3855b31..2870bc7127 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -193,6 +193,11 @@ Compute (GITHUB-301) [Csaba Hoch] +- Fix bug in RimuHosting driver which caused driver not to work when the + provider returned compressed (gzip'ed) response. + (LIBCLOUD-569, GITHUB-303) + [amastracci] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/rimuhosting.py b/libcloud/compute/drivers/rimuhosting.py index edb344b27e..4c6b0e126c 100644 --- a/libcloud/compute/drivers/rimuhosting.py +++ b/libcloud/compute/drivers/rimuhosting.py @@ -43,16 +43,9 @@ def __repr__(self): class RimuHostingResponse(JsonResponse): - def __init__(self, response, connection): - self.body = response.read() - self.status = response.status - self.headers = dict(response.getheaders()) - self.error = response.reason - self.connection = connection - - if self.success(): - self.object = self.parse_body() - + """ + Response Class for RimuHosting driver + """ def success(self): if self.status == 403: raise InvalidCredsError() From ffb63c2af95d05888cdda8e0923a0f91636f15c8 Mon Sep 17 00:00:00 2001 From: Pedro Romano Date: Wed, 4 Jun 2014 15:46:47 +0100 Subject: [PATCH 077/315] Update CA_CERTS_PATH to look for updated Homebrew location on MacOS X. Closes #309 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 7 ++++++- libcloud/security.py | 5 ++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 2870bc7127..c3593592a9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -28,10 +28,15 @@ General (GITHUB-301) [Csaba Hoch] +- Update CA_CERTS_PATH to also look for CA cert bundle which comes with + openssl Homebrew formula on OS x (/usr/local/etc/openssl/cert.pem). + (GITHUB-309) + [Pedro Romano] + Compute ~~~~~~~ -- Fix create_key_pair method which was not returning private key. +- Fix create_key_pair method which was not returning private key. (LIBCLOUD-566) [Sebastien Goasguen] diff --git a/libcloud/security.py b/libcloud/security.py index 8be810de32..81d7a51f67 100644 --- a/libcloud/security.py +++ b/libcloud/security.py @@ -42,7 +42,10 @@ # macports: curl-ca-bundle '/opt/local/share/curl/curl-ca-bundle.crt', - # homebrew: curl-ca-bundle + # homebrew: openssl + '/usr/local/etc/openssl/cert.pem', + + # homebrew: curl-ca-bundle (backward compatibility) '/usr/local/opt/curl-ca-bundle/share/ca-bundle.crt', ] From a0fa3da6c54ffdd33db26980371af6c6a60eb977 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 5 Jun 2014 21:49:31 +0200 Subject: [PATCH 078/315] Add "Third Party Drivers" section to the documentation. --- docs/index.rst | 1 + docs/third_party_drivers.rst | 43 ++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 docs/third_party_drivers.rst diff --git a/docs/index.rst b/docs/index.rst index 66d4b9aed2..d0263293ae 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -31,6 +31,7 @@ Main getting_started supported_providers + third_party_drivers compute/index storage/index loadbalancer/index diff --git a/docs/third_party_drivers.rst b/docs/third_party_drivers.rst new file mode 100644 index 0000000000..f9191d1f9d --- /dev/null +++ b/docs/third_party_drivers.rst @@ -0,0 +1,43 @@ +Third Party Drivers +=================== + +Libcloud includes most of the drivers in it's core, but some providers and +developers for various reasons decide to release their driver as a separate +PyPi package. + +This page lists those third party drivers. For documentation and usage examples, +please refer to the third party driver documentation (if available). + +Keep in mind that those drivers are not party of the core and such we can't +guarantee for the quality of those drivers. + +Compute +------- + ++-------------------+---------------------------------+--------------------------------------+ +| Provider | PyPi package | Source code | ++===================+=================================+======================================+ +| `StratusLab`_ | | `StratusLab/libcloud-drivers`_ | ++-------------------+---------------------------------+--------------------------------------+ +| `Snooze`_ | `stratuslab-libcloud-drivers`_ | `snooze-libcloud`_ | ++-------------------+---------------------------------+--------------------------------------+ + +DNS +---- + ++-------------------+--------------------------+--------------------------------------+ +| Provider | PyPi package | Source code | ++===================+==========================+======================================+ +| `DNSMadeEasy`_ | `libcloud-dnsmadeeasy`_ | `moses-palmer/libcloud-dnsmadeeasy`_ | ++-------------------+--------------------------+--------------------------------------+ + +.. _`StratusLab`: http://stratuslab.eu/ +.. _`Snooze`: http://snooze.inria.fr +.. _`snooze-libcloud`: https://github.com/msimonin/snooze-libcloud + +.. _`stratuslab-libcloud-drivers`: https://pypi.python.org/pypi/stratuslab-libcloud-drivers +.. _`StratusLab/libcloud-drivers`: https://github.com/StratusLab/libcloud-drivers + +.. _`DNSMadeEasy`: http://www.dnsmadeeasy.com/ +.. _`libcloud-dnsmadeeasy`: https://pypi.python.org/pypi/libcloud-dnsmadeeasy/1.0 +.. _`moses-palmer/libcloud-dnsmadeeasy`: https://github.com/moses-palmer/libcloud-dnsmadeeasy From f58ef3b891d02a0d6d1dbf378297f094cb5b876a Mon Sep 17 00:00:00 2001 From: doberloh Date: Tue, 3 Jun 2014 13:14:35 -0700 Subject: [PATCH 079/315] Removing ex_ from the front of vps_parameters to avoid overwrites of server memory values. Closes #308 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/rimuhosting.py | 14 +++++++------- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c3593592a9..0935c46a50 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -203,6 +203,11 @@ Compute (LIBCLOUD-569, GITHUB-303) [amastracci] +- Fix issue with overwriting the server memory values in the RimuHosting + driver. + (GUTHUB-308) + [Dustin Oberloh] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/rimuhosting.py b/libcloud/compute/drivers/rimuhosting.py index 4c6b0e126c..acde574336 100644 --- a/libcloud/compute/drivers/rimuhosting.py +++ b/libcloud/compute/drivers/rimuhosting.py @@ -267,9 +267,11 @@ def create_node(self, **kwargs): data = { 'instantiation_options': { - 'domain_name': name, 'distro': image.id + 'domain_name': name, + 'distro': image.id }, 'pricing_plan_code': size.id, + 'vps_parameters': {} } if 'ex_control_panel' in kwargs: @@ -302,22 +304,20 @@ def create_node(self, **kwargs): kwargs['ex_extra_ip_reason'] if 'ex_memory_mb' in kwargs: - if 'vps_parameters' not in data: - data['vps_parameters'] = {} data['vps_parameters']['memory_mb'] = kwargs['ex_memory_mb'] if 'ex_disk_space_mb' in kwargs: - if 'ex_vps_parameters' not in data: - data['vps_parameters'] = {} data['vps_parameters']['disk_space_mb'] = \ kwargs['ex_disk_space_mb'] if 'ex_disk_space_2_mb' in kwargs: - if 'vps_parameters' not in data: - data['vps_parameters'] = {} data['vps_parameters']['disk_space_2_mb'] =\ kwargs['ex_disk_space_2_mb'] + # Don't send empty 'vps_parameters' attribute + if not data['vps_parameters']: + del data['vps_parameters'] + res = self.connection.request( '/orders/new-vps', method='POST', From e02fec65d53ab77040fae2460246d2a16409215e Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Fri, 30 May 2014 12:33:47 +0000 Subject: [PATCH 080/315] update to allow simultaneous authorization for all supported google services Closes #302 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 ++++ libcloud/common/google.py | 37 +++++++++++++++++++---------- libcloud/compute/drivers/gce.py | 11 ++++++--- libcloud/dns/drivers/google.py | 16 ++++++------- libcloud/test/common/test_google.py | 17 +++++++------ libcloud/test/compute/test_gce.py | 3 +++ libcloud/test/dns/test_google.py | 3 +++ 7 files changed, 62 insertions(+), 30 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0935c46a50..f090c8b4fc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -33,6 +33,11 @@ General (GITHUB-309) [Pedro Romano] +- Update Google drivers to allow simultaneous authornization for all the + supported Google Services. + (GITHUB-302) + [Eric Johnson] + Compute ~~~~~~~ diff --git a/libcloud/common/google.py b/libcloud/common/google.py index 9d8000ce34..725c3832aa 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -253,7 +253,7 @@ class GoogleBaseAuthConnection(ConnectionUserAndKey): host = 'accounts.google.com' auth_path = '/o/oauth2/auth' - def __init__(self, user_id, key, scope, + def __init__(self, user_id, key, scopes=None, redirect_uri='urn:ietf:wg:oauth:2.0:oob', login_hint=None, **kwargs): """ @@ -266,9 +266,9 @@ def __init__(self, user_id, key, scope, authentication. :type key: ``str`` - :param scope: A list of urls defining the scope of authentication + :param scopes: A list of urls defining the scope of authentication to grant. - :type scope: ``list`` + :type scopes: ``list`` :keyword redirect_uri: The Redirect URI for the authentication request. See Google OAUTH2 documentation for @@ -279,8 +279,9 @@ def __init__(self, user_id, key, scope, for Installed Application authentication. :type login_hint: ``str`` """ + scopes = scopes or [] - self.scope = " ".join(scope) + self.scopes = " ".join(scopes) self.redirect_uri = redirect_uri self.login_hint = login_hint @@ -329,7 +330,7 @@ def get_code(self): auth_params = {'response_type': 'code', 'client_id': self.user_id, 'redirect_uri': self.redirect_uri, - 'scope': self.scope, + 'scope': self.scopes, 'state': 'Libcloud Request'} if self.login_hint: auth_params['login_hint'] = self.login_hint @@ -426,7 +427,7 @@ def get_new_token(self): # Construct a claim set claim_set = {'iss': self.user_id, - 'scope': self.scope, + 'scope': self.scopes, 'aud': 'https://accounts.google.com/o/oauth2/token', 'exp': int(time.time()) + 3600, 'iat': int(time.time())} @@ -473,7 +474,7 @@ class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection): timeout = 180 def __init__(self, user_id, key, auth_type=None, - credential_file=None, **kwargs): + credential_file=None, scopes=None, **kwargs): """ Determine authentication type, set up appropriate authentication connection and get initial authentication information. @@ -496,6 +497,10 @@ def __init__(self, user_id, key, auth_type=None, :keyword credential_file: Path to file for caching authentication information. :type credential_file: ``str`` + + :keyword scopes: List of OAuth2 scope URLs. The empty default sets + read/write access to Compute, Storage, and DNS. + :type scopes: ``list`` """ self.credential_file = credential_file or '~/.gce_libcloud_auth' @@ -506,16 +511,24 @@ def __init__(self, user_id, key, auth_type=None, auth_type = 'SA' else: auth_type = 'IA' - if 'scope' in kwargs: - self.scope = kwargs['scope'] - kwargs.pop('scope', None) + + # Default scopes to read/write for compute, storage, and dns. Can + # override this when calling get_driver() or setting in secrets.py + self.scopes = scopes + if not self.scopes: + self.scopes = [ + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/devstorage.full_control', + 'https://www.googleapis.com/auth/ndev.clouddns.readwrite', + ] self.token_info = self._get_token_info_from_file() + if auth_type == 'SA': self.auth_conn = GoogleServiceAcctAuthConnection( - user_id, key, self.scope, **kwargs) + user_id, key, self.scopes, **kwargs) elif auth_type == 'IA': self.auth_conn = GoogleInstalledAppAuthConnection( - user_id, key, self.scope, **kwargs) + user_id, key, self.scopes, **kwargs) else: raise GoogleAuthError('auth_type should be \'SA\' or \'IA\'') diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index b2b76639a5..a1cf75e16b 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -68,7 +68,6 @@ class GCEConnection(GoogleBaseConnection): def __init__(self, user_id, key, secure, auth_type=None, credential_file=None, project=None, **kwargs): - self.scope = ['https://www.googleapis.com/auth/compute'] super(GCEConnection, self).__init__(user_id, key, secure=secure, auth_type=auth_type, credential_file=credential_file, @@ -536,7 +535,7 @@ class GCENodeDriver(NodeDriver): } def __init__(self, user_id, key, datacenter=None, project=None, - auth_type=None, **kwargs): + auth_type=None, scopes=None, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. @@ -559,9 +558,14 @@ def __init__(self, user_id, key, datacenter=None, project=None, If not supplied, auth_type will be guessed based on value of user_id. :type auth_type: ``str`` + + :keyword scopes: List of authorization URLs. Default is empty and + grants read/write to Compute, Storage, DNS. + :type scopes: ``list`` """ self.auth_type = auth_type self.project = project + self.scopes = scopes if not self.project: raise ValueError('Project name must be specified using ' '"project" keyword.') @@ -2518,7 +2522,8 @@ def ex_copy_image(self, name, url, description=None): def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, - 'project': self.project} + 'project': self.project, + 'scopes': self.scopes} def _catch_error(self, ignore_errors=False): """ diff --git a/libcloud/dns/drivers/google.py b/libcloud/dns/drivers/google.py index 44a1231db5..ca3e5f06da 100644 --- a/libcloud/dns/drivers/google.py +++ b/libcloud/dns/drivers/google.py @@ -36,13 +36,10 @@ class GoogleDNSConnection(GoogleBaseConnection): responseCls = GoogleDNSResponse def __init__(self, user_id, key, secure, auth_type=None, - credential_file=None, project=None): - self.scope = [ - 'https://www.googleapis.com/auth/ndev.clouddns.readwrite' - ] + credential_file=None, project=None, **kwargs): super(GoogleDNSConnection, self).\ __init__(user_id, key, secure=secure, auth_type=auth_type, - credential_file=credential_file) + credential_file=credential_file, **kwargs) self.request_path = '/dns/%s/projects/%s' % (API_VERSION, project) @@ -65,13 +62,15 @@ class GoogleDNSDriver(DNSDriver): RecordType.TXT: 'TXT', } - def __init__(self, user_id, key, project=None, auth_type=None): + def __init__(self, user_id, key, project=None, auth_type=None, scopes=None, + **kwargs): self.auth_type = auth_type self.project = project + self.scopes = scopes if not self.project: raise ValueError('Project name must be specified using ' '"project" keyword.') - super(GoogleDNSDriver, self).__init__(user_id, key) + super(GoogleDNSDriver, self).__init__(user_id, key, scopes, **kwargs) def iterate_zones(self): """ @@ -304,7 +303,8 @@ def _get_data(self, rtype, last_key, **kwargs): def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, - 'project': self.project} + 'project': self.project, + 'scopes': self.scopes} def _to_zones(self, response): zones = [] diff --git a/libcloud/test/common/test_google.py b/libcloud/test/common/test_google.py index ff082c3579..2e9c701dd8 100644 --- a/libcloud/test/common/test_google.py +++ b/libcloud/test/common/test_google.py @@ -56,11 +56,14 @@ class GoogleBaseAuthConnectionTest(LibcloudTestCase): def setUp(self): GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, GoogleAuthMockHttp) - self.mock_scope = ['https://www.googleapis.com/auth/foo'] - kwargs = {'scope': self.mock_scope} + self.mock_scopes = ['foo', 'bar'] + kwargs = {'scopes': self.mock_scopes} self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, **kwargs) + def test_scopes(self): + self.assertEqual(self.conn.scopes, 'foo bar') + def test_add_default_headers(self): old_headers = {} expected_headers = { @@ -88,8 +91,8 @@ class GoogleInstalledAppAuthConnectionTest(LibcloudTestCase): def setUp(self): GoogleInstalledAppAuthConnection.conn_classes = (GoogleAuthMockHttp, GoogleAuthMockHttp) - self.mock_scope = ['https://www.googleapis.com/auth/foo'] - kwargs = {'scope': self.mock_scope} + self.mock_scopes = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scopes': self.mock_scopes} self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, **kwargs) @@ -128,15 +131,15 @@ class GoogleBaseConnectionTest(LibcloudTestCase): def setUp(self): GoogleBaseAuthConnection.conn_classes = (GoogleAuthMockHttp, GoogleAuthMockHttp) - self.mock_scope = ['https://www.googleapis.com/auth/foo'] - kwargs = {'scope': self.mock_scope, 'auth_type': 'IA'} + self.mock_scopes = ['https://www.googleapis.com/auth/foo'] + kwargs = {'scopes': self.mock_scopes, 'auth_type': 'IA'} self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs) def test_auth_type(self): self.assertRaises(GoogleAuthError, GoogleBaseConnection, *GCE_PARAMS, **{'auth_type': 'XX'}) - kwargs = {'scope': self.mock_scope} + kwargs = {'scopes': self.mock_scopes} if SHA256: kwargs['auth_type'] = 'SA' diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 91fd9d7c9b..80110d0176 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -64,6 +64,9 @@ def setUp(self): kwargs['datacenter'] = self.datacenter self.driver = GCENodeDriver(*GCE_PARAMS, **kwargs) + def test_default_scopes(self): + self.assertEqual(self.driver.scopes, None) + def test_timestamp_to_datetime(self): timestamp1 = '2013-06-26T10:05:19.340-07:00' datetime1 = datetime.datetime(2013, 6, 26, 17, 5, 19) diff --git a/libcloud/test/dns/test_google.py b/libcloud/test/dns/test_google.py index e6d0595bac..a0743005e0 100644 --- a/libcloud/test/dns/test_google.py +++ b/libcloud/test/dns/test_google.py @@ -46,6 +46,9 @@ def setUp(self): kwargs['auth_type'] = 'IA' self.driver = GoogleDNSDriver(*DNS_PARAMS_GOOGLE, **kwargs) + def test_default_scopes(self): + self.assertEqual(self.driver.scopes, None) + def test_list_zones(self): zones = self.driver.list_zones() self.assertEqual(len(zones), 2) From 2a42270d9548e03a2f98f8e178073fed9101edf1 Mon Sep 17 00:00:00 2001 From: Zak Estrada Date: Mon, 9 Jun 2014 15:44:29 -0500 Subject: [PATCH 081/315] [LIBCLOUD-576] SWIFT: removed superfulous call to upper() on service_region Closes #311 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 6 ++++++ libcloud/storage/drivers/cloudfiles.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index f090c8b4fc..324590d17a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -225,6 +225,12 @@ Storage (LIBCLOUD-552) [Tomaz MUraus] +- Fix a bug in the OpenStack Swift driver which prevented the driver to work + with installations where region names in the service catalog werent upper + case. + (LIBCLOUD-576, GITHUB-311) + [Zak Estrada] + Load Balancer ~~~~~~~~~~~~~ diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index 212d46ce7b..bce89a865c 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -124,7 +124,7 @@ def __init__(self, user_id, key, secure=True, **kwargs): self._service_name = self._ex_force_service_name or 'swift' if self._ex_force_service_region: - self._service_region = self._ex_force_service_region.upper() + self._service_region = self._ex_force_service_region else: self._service_region = None From 93c38981d91c23640a91ece503fb63dc7f331b82 Mon Sep 17 00:00:00 2001 From: Zak Estrada Date: Mon, 9 Jun 2014 15:37:57 -0500 Subject: [PATCH 082/315] [LIBCLOUD-575] ex_all_tenants option for list_nodes() in OpenStack Closes #312 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/openstack.py | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 324590d17a..250430def7 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -213,6 +213,10 @@ Compute (GUTHUB-308) [Dustin Oberloh] +- Add ex_all_tenants argument to the list_nodes method in the OpenStack driver. + (GITHUB-312) + [LIBCLOUD-575, Zak Estrada] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index e2c505759a..fd116e8d01 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -218,9 +218,20 @@ def destroy_node(self, node): def reboot_node(self, node): return self._reboot_node(node, reboot_type='HARD') - def list_nodes(self): + def list_nodes(self, ex_all_tenants=False): + """ + List the nodes in a tenant + + :param ex_all_tenants: List nodes for all the tenants. Note: Your user + must have admin privileges for this + functionality to work. + :type ex_all_tenants: ``bool`` + """ + params = {} + if ex_all_tenants: + params = {'all_tenants': 1} return self._to_nodes( - self.connection.request('/servers/detail').object) + self.connection.request('/servers/detail', params=params).object) def create_volume(self, size, name, location=None, snapshot=None): if snapshot: From 33ca3b2e6209aa5347b3f820b35f06327a98ac5d Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Tue, 10 Jun 2014 16:58:31 +0200 Subject: [PATCH 083/315] Add support for network creation on Cloudstack advanced zones Closes #316 Signed-off-by: Tomaz Muraus --- .gitignore | 3 + CHANGES.rst | 6 + libcloud/compute/drivers/cloudstack.py | 175 ++++++++++++++++++ .../cloudstack/createNetwork_default.json | 1 + .../cloudstack/deleteNetwork_default.json | 1 + .../listNetworkOfferings_default.json | 1 + .../queryAsyncJobResult_deleteNetwork.json | 1 + libcloud/test/compute/test_cloudstack.py | 57 ++++++ 8 files changed, 245 insertions(+) create mode 100644 libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json diff --git a/.gitignore b/.gitignore index ce1929343e..dc61a25d46 100644 --- a/.gitignore +++ b/.gitignore @@ -15,3 +15,6 @@ dist/*apache-libcloud* dist/*apache_libcloud* _build/ apache_libcloud.egg-info/ +.project +.pydevproject +.settings diff --git a/CHANGES.rst b/CHANGES.rst index 250430def7..8a4e152b6c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -217,6 +217,12 @@ Compute (GITHUB-312) [LIBCLOUD-575, Zak Estrada] +- Add support for network management for advanced zones + (ex_list_network_offerings, ex_create_network, ex_delete_network) in the + CloudStack driver. + (GITHUB-316) + [Roeland Kuipers] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 2b69d84e44..921b68ef50 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -99,6 +99,22 @@ 'can_use_for_deploy': { 'key_name': 'canusefordeploy', 'transform_func': str + }, + 'gateway': { + 'key_name': 'gateway', + 'transform_func': str + }, + 'netmask': { + 'key_name': 'netmask', + 'transform_func': str + }, + 'vpc_id': { + 'key_name': 'vpcid', + 'transform_func': str + }, + 'project_id': { + 'key_name': 'projectid', + 'transform_func': str } }, 'node': { @@ -453,6 +469,31 @@ def __repr__(self): self.networkofferingid, self.zoneid, self.driver.name)) +class CloudStackNetworkOffering(object): + """ + Class representing a CloudStack Network Offering. + """ + + def __init__(self, name, display_text, guest_ip_type, id, + service_offering_id, for_vpc, driver, extra=None): + self.display_text = display_text + self.name = name + self.guest_ip_type = guest_ip_type + self.id = id + self.service_offering_id = service_offering_id + self.for_vpc = for_vpc + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name, self.display_text, + self.guest_ip_type, self.service_offering_id, self.for_vpc, + self.driver.name)) + + class CloudStackProject(object): """ Class representing a CloudStack Project. @@ -880,6 +921,140 @@ def ex_list_networks(self): return networks + def ex_list_network_offerings(self): + """ + List the available network offerings + + :rtype ``list`` of :class:`CloudStackNetworkOffering` + """ + res = self._sync_request(command='listNetworkOfferings', + method='GET') + netoffers = res.get('networkoffering', []) + + networkofferings = [] + + for netoffer in netoffers: + networkofferings.append(CloudStackNetworkOffering( + netoffer['name'], + netoffer['displaytext'], + netoffer['guestiptype'], + netoffer['id'], + netoffer['serviceofferingid'], + netoffer['forvpc'], + self)) + + return networkofferings + + def ex_create_network(self, display_text, name, network_offering, + location, gateway=None, netmask=None, + network_domain=None, vpc_id=None, project_id=None): + """ + + Creates a Network, only available in advanced zones. + + :param display_text: the display text of the network + :type display_text: ``str`` + + :param name: the name of the network + :type name: ``str`` + + :param network_offering: the network offering id + :type network_offering: :class:'CloudStackNetworkOffering` + + :param location: Zone + :type location: :class:`NodeLocation` + + :param gateway: Optional, the Gateway of this network + :type gateway: ``str`` + + :param netmask: Optional, the netmask of this network + :type netmask: ``str`` + + :param network_domain: Optional, the DNS domain of the network + :type network_domain: ``str`` + + :param vpc_id: Optional, the VPC id the network belongs to + :type vpc_id: ``str`` + + :param project_id: Optional, the project id the networks belongs to + :type project_id: ``str`` + + :rtype: :class:`CloudStackNetwork` + + """ + + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['network'] + + args = { + 'displaytext': display_text, + 'name': name, + 'networkofferingid': network_offering.id, + 'zoneid': location.id, + } + + if gateway is not None: + args['gateway'] = gateway + + if netmask is not None: + args['netmask'] = netmask + + if network_domain is not None: + args['networkdomain'] = network_domain + + if vpc_id is not None: + args['vpcid'] = vpc_id + + if project_id is not None: + args['projectid'] = project_id + + """ Cloudstack allows for duplicate network names, + this should be handled in the code leveraging libcloud + As there could be use cases for duplicate names. + e.g. management from ROOT level""" + + # for net in self.ex_list_networks(): + # if name == net.name: + # raise LibcloudError('This network name already exists') + + result = self._sync_request(command='createNetwork', + params=args, + method='GET') + + result = result['network'] + extra = self._get_extra_dict(result, extra_map) + + network = CloudStackNetwork(display_text, + name, + network_offering.id, + result['id'], + location.id, + self, + extra=extra) + + return network + + def ex_delete_network(self, network, force=None): + """ + + Deletes a Network, only available in advanced zones. + + :param network: The network + :type network: :class: 'CloudStackNetwork' + + :param force: Force deletion of the network? + :type force: ``bool`` + + :rtype: ``bool`` + + """ + + args = {'id': network.id, 'forced': force} + + self._async_request(command='deleteNetwork', + params=args, + method='GET') + return True + def ex_list_projects(self): """ List the available projects diff --git a/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json b/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json new file mode 100644 index 0000000000..9b2d904819 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createNetwork_default.json @@ -0,0 +1 @@ +{ "createnetworkresponse" : { "network" : {"id":"a804d341-996e-4d9a-b2b0-226c648dc6e3","name":"test","displaytext":"test","broadcastdomaintype":"Lswitch","traffictype":"Guest","gateway":"10.1.1.1","netmask":"255.255.255.0","cidr":"10.1.1.0/24","zoneid":"2","zonename":"BETA-SBP-DC-1","networkofferingid":"c348cabe-0208-49e0-91ad-32b88c55fd8c","networkofferingname":"SourceNatNiciraNvpNetwork","networkofferingdisplaytext":"Offering for a Nicira Nvp isolated network with SourceNat","networkofferingconservemode":true,"networkofferingavailability":"Optional","issystem":false,"state":"Allocated","related":"a804d341-996e-4d9a-b2b0-226c648dc6e3","dns1":"8.8.8.8","dns2":"8.8.8.4","type":"Isolated","acltype":"Account","account":"rkuipers_admin","projectid":"d5f1209d-3a28-4dfb-8cc1-884e5d5e1d56","domainid":"4b6e626c-9d50-4480-bf77-daae632c7ffd","domain":"rkuipers","service":[{"name":"Firewall","capability":[{"name":"SupportedProtocols","value":"tcp,udp,icmp","canchooseservicecapability":false},{"name":"SupportedTrafficDirection","value":"ingress, egress","canchooseservicecapability":false},{"name":"MultipleIps","value":"true","canchooseservicecapability":false},{"name":"SupportedEgressProtocols","value":"tcp,udp,icmp, all","canchooseservicecapability":false},{"name":"TrafficStatistics","value":"per public ip","canchooseservicecapability":false}]},{"name":"StaticNat"},{"name":"Lb","capability":[{"name":"LbSchemes","value":"Public","canchooseservicecapability":false},{"name":"SupportedStickinessMethods","value":"[{\"methodname\":\"LbCookie\",\"paramlist\":[{\"paramname\":\"cookie-name\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"mode\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"nocache\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"indirect\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"postonly\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"domain\",\"required\":false,\"isflag\":false,\"description\":\" \"}],\"description\":\"This is loadbalancer cookie based stickiness method.\"},{\"methodname\":\"AppCookie\",\"paramlist\":[{\"paramname\":\"cookie-name\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"length\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"holdtime\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"request-learn\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"prefix\",\"required\":false,\"isflag\":true,\"description\":\" \"},{\"paramname\":\"mode\",\"required\":false,\"isflag\":false,\"description\":\" \"}],\"description\":\"This is App session based sticky method. Define session stickiness on an existing application cookie. It can be used only for a specific http traffic\"},{\"methodname\":\"SourceBased\",\"paramlist\":[{\"paramname\":\"tablesize\",\"required\":false,\"isflag\":false,\"description\":\" \"},{\"paramname\":\"expire\",\"required\":false,\"isflag\":false,\"description\":\" \"}],\"description\":\"This is source based Stickiness method, it can be used for any type of protocol.\"}]","canchooseservicecapability":false},{"name":"SupportedLBIsolation","value":"dedicated","canchooseservicecapability":false},{"name":"SupportedLbAlgorithms","value":"roundrobin,leastconn,source","canchooseservicecapability":false},{"name":"SupportedProtocols","value":"tcp, udp","canchooseservicecapability":false}]},{"name":"SourceNat","capability":[{"name":"RedundantRouter","value":"true","canchooseservicecapability":false},{"name":"SupportedSourceNatTypes","value":"peraccount","canchooseservicecapability":false}]},{"name":"Dns","capability":[{"name":"AllowDnsSuffixModification","value":"true","canchooseservicecapability":false}]},{"name":"Connectivity"},{"name":"Vpn","capability":[{"name":"SupportedVpnTypes","value":"pptp,l2tp,ipsec","canchooseservicecapability":false},{"name":"VpnTypes","value":"removeaccessvpn","canchooseservicecapability":false}]},{"name":"Dhcp","capability":[{"name":"DhcpAccrossMultipleSubnets","value":"true","canchooseservicecapability":false}]},{"name":"UserData"},{"name":"PortForwarding"}],"networkdomain":"rkuipers.local","physicalnetworkid":"e48527a6-6882-4c5f-bce9-c02ecd5ef8c1","restartrequired":false,"specifyipranges":false,"vpcid":"22e8388c-21bf-4b84-8f20-e92a7f550898","canusefordeploy":true,"ispersistent":false,"tags":[],"displaynetwork":true} } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json new file mode 100644 index 0000000000..ad161b20ac --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteNetwork_default.json @@ -0,0 +1 @@ +{ "deletenetworkresponse" : {"jobid":"deleteNetwork"} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json b/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json new file mode 100644 index 0000000000..35d8bcc658 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworkOfferings_default.json @@ -0,0 +1 @@ +{ "listnetworkofferingsresponse" : { "count":2 ,"networkoffering" : [ {"id":"c348cabe-0208-49e0-91ad-32b88c55fd8c","name":"SourceNatNiciraNvpNetwork","displaytext":"Offering for a Nicira Nvp isolated network with SourceNat","tags":"BETA-SBP-DC-1-pSTT","traffictype":"Guest","isdefault":true,"specifyvlan":false,"conservemode":true,"specifyipranges":false,"availability":"Optional","networkrate":-1,"state":"Enabled","guestiptype":"Isolated","serviceofferingid":"01f93707-3a35-44a6-84e9-ea767287a6b2","service":[{"name":"Firewall","provider":[{"name":"VirtualRouter"}]},{"name":"StaticNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"ElasticIp","value":"false","canchooseservicecapability":false},{"name":"AssociatePublicIP","value":"false","canchooseservicecapability":false}]},{"name":"Lb","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedLBIsolation","value":"dedicated","canchooseservicecapability":false},{"name":"ElasticLb","value":"false","canchooseservicecapability":false},{"name":"InlineMode","value":"false","canchooseservicecapability":false}]},{"name":"SourceNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedSourceNatTypes","value":"peraccount","canchooseservicecapability":false},{"name":"RedundantRouter","value":"false","canchooseservicecapability":false}]},{"name":"Dns","provider":[{"name":"VirtualRouter"}]},{"name":"Connectivity","provider":[{"name":"NiciraNvp"}]},{"name":"Vpn","provider":[{"name":"VirtualRouter"}]},{"name":"Dhcp","provider":[{"name":"VirtualRouter"}]},{"name":"UserData","provider":[{"name":"VirtualRouter"}]},{"name":"PortForwarding","provider":[{"name":"VirtualRouter"}]}],"forvpc":false,"ispersistent":false,"egressdefaultpolicy":false}, {"id":"7c09e208-2af5-43d6-9f0b-53868ef788ea","name":"OAT offering for OAT purposes","displaytext":"OAT offering for OAT purposes","tags":"BETA-SBP-DC-1-pSTT","traffictype":"Guest","isdefault":false,"specifyvlan":false,"conservemode":true,"specifyipranges":false,"availability":"Optional","networkrate":-1,"state":"Enabled","guestiptype":"Isolated","serviceofferingid":"01f93707-3a35-44a6-84e9-ea767287a6b2","service":[{"name":"Firewall","provider":[{"name":"VirtualRouter"}]},{"name":"StaticNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"ElasticIp","value":"false","canchooseservicecapability":false},{"name":"AssociatePublicIP","value":"false","canchooseservicecapability":false}]},{"name":"Lb","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedLBIsolation","value":"dedicated","canchooseservicecapability":false},{"name":"ElasticLb","value":"false","canchooseservicecapability":false},{"name":"InlineMode","value":"false","canchooseservicecapability":false}]},{"name":"SourceNat","provider":[{"name":"VirtualRouter"}],"capability":[{"name":"SupportedSourceNatTypes","value":"peraccount","canchooseservicecapability":false},{"name":"RedundantRouter","value":"false","canchooseservicecapability":false}]},{"name":"Dns","provider":[{"name":"VirtualRouter"}]},{"name":"Connectivity","provider":[{"name":"NiciraNvp"}]},{"name":"Vpn","provider":[{"name":"VirtualRouter"}]},{"name":"Dhcp","provider":[{"name":"VirtualRouter"}]},{"name":"UserData","provider":[{"name":"VirtualRouter"}]},{"name":"PortForwarding","provider":[{"name":"VirtualRouter"}]}],"forvpc":false,"ispersistent":false,"egressdefaultpolicy":true,"maxconnections":4096} ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json new file mode 100644 index 0000000000..19494c3595 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteNetwork.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"02c9bf08-6f36-44b1-a57f-df0708f90de4","userid":"6ef2b921-4ecf-4651-8188-f9868db73e73","cmd":"org.apache.cloudstack.api.command.user.network.DeleteNetworkCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2014-06-11T10:09:00+0200","jobid":"65789636-d2c8-484c-9d13-47ad3de384ed"} } \ No newline at end of file diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index b90c9053b9..091d98b0ee 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -196,6 +196,63 @@ def test_ex_list_networks(self): fixture_networks[i]['networkofferingid']) self.assertEqual(network.zoneid, fixture_networks[i]['zoneid']) + def test_ex_list_network_offerings(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listNetworkOfferings_default.json') + fixture_networkoffers = \ + fixture['listnetworkofferingsresponse']['networkoffering'] + + networkoffers = self.driver.ex_list_network_offerings() + + for i, networkoffer in enumerate(networkoffers): + self.assertEqual(networkoffer.id, fixture_networkoffers[i]['id']) + self.assertEqual(networkoffer.name, + fixture_networkoffers[i]['name']) + self.assertEqual(networkoffer.display_text, + fixture_networkoffers[i]['displaytext']) + self.assertEqual(networkoffer.for_vpc, + fixture_networkoffers[i]['forvpc']) + self.assertEqual(networkoffer.guest_ip_type, + fixture_networkoffers[i]['guestiptype']) + self.assertEqual(networkoffer.service_offering_id, + fixture_networkoffers[i]['serviceofferingid']) + + def test_ex_create_network(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'createNetwork_default.json') + + fixture_network = fixture['createnetworkresponse']['network'] + + netoffer = self.driver.ex_list_network_offerings()[0] + location = self.driver.list_locations()[0] + network = self.driver.ex_create_network(display_text='test', + name='test', + network_offering=netoffer, + location=location, + gateway='10.1.1.1', + netmask='255.255.255.0', + network_domain='cloud.local', + vpc_id="2", + project_id="2") + + self.assertEqual(network.name, fixture_network['name']) + self.assertEqual(network.displaytext, fixture_network['displaytext']) + self.assertEqual(network.id, fixture_network['id']) + self.assertEqual(network.extra['gateway'], fixture_network['gateway']) + self.assertEqual(network.extra['netmask'], fixture_network['netmask']) + self.assertEqual(network.networkofferingid, + fixture_network['networkofferingid']) + self.assertEqual(network.extra['vpc_id'], fixture_network['vpcid']) + self.assertEqual(network.extra['project_id'], + fixture_network['projectid']) + + def test_ex_delete_network(self): + + network = self.driver.ex_list_networks()[0] + + result = self.driver.ex_delete_network(network=network) + self.assertTrue(result) + def test_ex_list_projects(self): _, fixture = CloudStackMockHttp()._load_fixture( 'listProjects_default.json') From f8c1a1646dccb885fda10c6de29fad0585802bd1 Mon Sep 17 00:00:00 2001 From: Csaba Hoch Date: Wed, 4 Jun 2014 13:37:02 +0200 Subject: [PATCH 084/315] Fix floating IP initialization The OpenStack_1_1_FloatingIpAddress constructor was called incorrectly: "self" was passed as "pool" instead of "driver". Closes #310 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/openstack.py | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index fd116e8d01..17884797d2 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -2184,8 +2184,11 @@ def _to_floating_ips(self, obj): return [self._to_floating_ip(ip) for ip in ip_elements] def _to_floating_ip(self, obj): - return OpenStack_1_1_FloatingIpAddress(obj['id'], obj['ip'], self, - obj['instance_id']) + return OpenStack_1_1_FloatingIpAddress(id=obj['id'], + ip_address=obj['ip'], + pool=None, + node_id=obj['instance_id'], + driver=self) def ex_list_floating_ips(self): """ @@ -2221,7 +2224,11 @@ def ex_create_floating_ip(self): data = resp.object['floating_ip'] id = data['id'] ip_address = data['ip'] - return OpenStack_1_1_FloatingIpAddress(id, ip_address, self) + return OpenStack_1_1_FloatingIpAddress(id=id, + ip_address=ip_address, + pool=None, + node_id=None, + driver=self) def ex_delete_floating_ip(self, ip): """ @@ -2337,8 +2344,11 @@ def _to_floating_ips(self, obj): return [self._to_floating_ip(ip) for ip in ip_elements] def _to_floating_ip(self, obj): - return OpenStack_1_1_FloatingIpAddress(obj['id'], obj['ip'], self, - obj['instance_id']) + return OpenStack_1_1_FloatingIpAddress(id=obj['id'], + ip_address=obj['ip'], + pool=self, + node_id=obj['instance_id'], + driver=self.connection.driver) def get_floating_ip(self, ip): """ @@ -2364,7 +2374,11 @@ def create_floating_ip(self): data = resp.object['floating_ip'] id = data['id'] ip_address = data['ip'] - return OpenStack_1_1_FloatingIpAddress(id, ip_address, self) + return OpenStack_1_1_FloatingIpAddress(id=id, + ip_address=ip_address, + pool=self, + node_id=None, + driver=self.connection.driver) def delete_floating_ip(self, ip): """ From fcd353d51c9929e4849de47a26a2ab603d8f7216 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Thu, 5 Jun 2014 21:28:02 +0400 Subject: [PATCH 085/315] [LIBCLOUD-574] Add methods for routes and route table management to the EC2 driver. Closes #313 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 10 +- libcloud/compute/drivers/ec2.py | 546 ++++++++++++++++++++++++++++++++ 2 files changed, 555 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 8a4e152b6c..0c8b3064bf 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -218,11 +218,19 @@ Compute [LIBCLOUD-575, Zak Estrada] - Add support for network management for advanced zones - (ex_list_network_offerings, ex_create_network, ex_delete_network) in the + (ex_list_network_offerings, ex_create_network, ex_delete_network) in the CloudStack driver. (GITHUB-316) [Roeland Kuipers] +- Add extension methods for routes and route table management to the EC2 + driver (ex_list_route_tables, ex_create_route_table, ex_delete_route_table, + ex_associate_route_table, ex_dissociate_route_table, + ex_replace_route_table_association, ex_create_route, ex_delete_route, + ex_replace_route) + (LIBCLOUD-574, GITHUB-313) + [Lior Goikhburg] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 5f9fb9bcca..9128c66276 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -68,6 +68,9 @@ 'EC2Network', 'EC2NetworkSubnet', 'EC2NetworkInterface', + 'EC2RouteTable', + 'EC2Route', + 'EC2SubnetAssociation', 'ExEC2AvailabilityZone', 'IdempotentParamError' @@ -1495,6 +1498,12 @@ 'xpath': 'attachmentSet/item/deleteOnTermination', 'transform_func': str } + }, + 'route_table': { + 'vpc_id': { + 'xpath': 'vpcId', + 'transform_func': str + } } } @@ -1730,6 +1739,124 @@ def __repr__(self): return (('') % (self.id)) +class EC2RouteTable(object): + """ + Class which stores information about VPC Route Tables. + + Note: This class is VPC specific. + """ + + def __init__(self, id, routes, subnet_associations, + propagating_gateway_ids, extra=None): + """ + :param id: The ID of the route table. + :type id: ``str`` + + :param routes: A list of routes in the route table. + :type routes: ``list`` of :class:`EC2Route` + + :param subnet_associations: A list of associations between the + route table and one or more subnets. + :type subnet_associations: ``list`` of + :class:`EC2SubnetAssociation` + + :param propagating_gateway_ids: The list of IDs of any virtual + private gateways propagating the + routes. + :type propagating_gateway_ids: ``list`` + """ + + self.id = id + self.routes = routes + self.subnet_associations = subnet_associations + self.propagating_gateway_ids = propagating_gateway_ids + self.extra = extra or {} + + def __repr__(self): + return (('') % (self.id)) + + +class EC2Route(object): + """ + Class which stores information about a Route. + + Note: This class is VPC specific. + """ + + def __init__(self, cidr, gateway_id, instance_id, owner_id, + interface_id, state, origin, vpc_peering_connection_id): + """ + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :param gateway_id: The ID of a gateway attached to the VPC. + :type gateway_id: ``str`` + + :param instance_id: The ID of a NAT instance in the VPC. + :type instance_id: ``str`` + + :param owner_id: The AWS account ID of the owner of the instance. + :type owner_id: ``str`` + + :param interface_id: The ID of the network interface. + :type interface_id: ``str`` + + :param state: The state of the route (active | blackhole). + :type state: ``str`` + + :param origin: Describes how the route was created. + :type origin: ``str`` + + :param vpc_peering_connection_id: The ID of the VPC + peering connection. + :type vpc_peering_connection_id: ``str`` + """ + + self.cidr = cidr + self.gateway_id = gateway_id + self.instance_id = instance_id + self.owner_id = owner_id + self.interface_id = interface_id + self.state = state + self.origin = origin + self.vpc_peering_connection_id = vpc_peering_connection_id + + def __repr__(self): + return (('') % (self.cidr)) + + +class EC2SubnetAssociation(object): + """ + Class which stores information about Route Table associated with + a given Subnet in a VPC + + Note: This class is VPC specific. + """ + + def __init__(self, id, route_table_id, subnet_id, main=False): + """ + :param id: The ID of the subent association in the VPC. + :type id: ``str`` + + :param route_table_id: The ID of a route table in the VPC. + :type route_table_id: ``str`` + + :param subnet_id: The ID of a subnet in the VPC. + :type subnet_id: ``str`` + + :param main: If true, means this is a main VPC route table. + :type main: ``bool`` + """ + + self.id = id + self.route_table_id = route_table_id + self.subnet_id = subnet_id + self.main = main + + def __repr__(self): + return (('') % (self.id)) + + class BaseEC2NodeDriver(NodeDriver): """ Base Amazon EC2 node driver. @@ -3927,6 +4054,306 @@ def ex_detach_internet_gateway(self, gateway, network): return element == 'true' + def ex_list_route_tables(self, route_table_ids=None, filters=None): + """ + Describes one or more of a VPC's route tables. + These are are used to determine where network traffic is directed. + + :param route_table_ids: Return only route tables matching the + provided route table IDs. If not specified, + a list of all the route tables in the + corresponding region is returned. + :type route_table_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain route tables. + :type filters: ``dict`` + + :rtype: ``list`` of :class:`.EC2RouteTable` + """ + params = {'Action': 'DescribeRouteTables'} + + if route_table_ids: + for route_table_idx, route_table_id in enumerate(route_table_ids): + route_table_idx += 1 # We want 1-based indexes + route_table_key = 'RouteTableId.%s' % route_table_idx + params[route_table_key] = route_table_id + + if filters: + params.update(self._build_filters(filters)) + + response = self.connection.request(self.path, params=params) + + return self._to_route_tables(response.object) + + def ex_create_route_table(self, network, name=None): + """ + Create a route table within a VPC. + + :param vpc_id: The VPC that the subnet should be created in. + :type vpc_id: :class:`.EC2Network` + + :rtype: :class: `.EC2RouteTable` + """ + params = {'Action': 'CreateRouteTable', + 'VpcId': network.id} + + response = self.connection.request(self.path, params=params).object + element = response.findall(fixxpath(xpath='routeTable', + namespace=NAMESPACE))[0] + + route_table = self._to_route_table(element) + + if name: + self.ex_create_tags(route_table, {'Name': name}) + + return route_table + + def ex_delete_route_table(self, route_table): + """ + Deletes a VPC route table. + + :param route_table: The route table to delete. + :type route_table: :class:`.EC2RouteTable` + + :rtype: ``bool`` + """ + + params = {'Action': 'DeleteRouteTable', + 'RouteTableId': route_table.id} + + result = self.connection.request(self.path, params=params).object + element = findtext(element=result, + xpath='return', + namespace=NAMESPACE) + + return element == 'true' + + def ex_associate_route_table(self, route_table, subnet): + """ + Associates a route table with a subnet within a VPC. + + Note: A route table can be associated with multiple subnets. + + :param route_table: The route table to associate. + :type route_table: :class:`.EC2RouteTable` + + :param subnet: The subnet to associate with. + :type subnet: :class:`.EC2Subnet` + + :return: Route table association ID. + :rtype: ``str`` + """ + + params = {'Action': 'AssociateRouteTable', + 'RouteTableId': route_table.id, + 'SubnetId': subnet.id} + + result = self.connection.request(self.path, params=params).object + association_id = findtext(element=result, + xpath='associationId', + namespace=NAMESPACE) + + return association_id + + def ex_dissociate_route_table(self, subnet_association): + """ + Dissociates a subnet from a route table. + + :param subnet_association: The subnet association object or + subnet association ID. + :type subnet_association: :class:`.EC2SubnetAssociation` or + ``str`` + + :rtype: ``bool`` + """ + + if isinstance(subnet_association, EC2SubnetAssociation): + subnet_association_id = subnet_association.id + else: + subnet_association_id = subnet_association + + params = {'Action': 'DisassociateRouteTable', + 'AssociationId': subnet_association_id} + + result = self.connection.request(self.path, params=params).object + element = findtext(element=result, + xpath='return', + namespace=NAMESPACE) + + return element == 'true' + + def ex_replace_route_table_association(self, subnet_association, + route_table): + """ + Changes the route table associated with a given subnet in a VPC. + + Note: This method can be used to change which table is the main route + table in the VPC (Specify the main route table's association ID + and the route table to be the new main route table). + + :param subnet_association: The subnet association object or + subnet association ID. + :type subnet_association: :class:`.EC2SubnetAssociation` or + ``str`` + + :param route_table: The new route table to associate. + :type route_table: :class:`.EC2RouteTable` + + :return: New route table association ID. + :rtype: ``str`` + """ + + if isinstance(subnet_association, EC2SubnetAssociation): + subnet_association_id = subnet_association.id + else: + subnet_association_id = subnet_association + + params = {'Action': 'ReplaceRouteTableAssociation', + 'AssociationId': subnet_association_id, + 'RouteTableId': route_table.id} + + result = self.connection.request(self.path, params=params).object + new_association_id = findtext(element=result, + xpath='newAssociationId', + namespace=NAMESPACE) + + return new_association_id + + def ex_create_route(self, route_table, cidr, + internet_gateway=None, node=None, + network_interface=None, vpc_peering_connection=None): + """ + Creates a route entry in the route table. + + :param route_table: The route table to create the route in. + :type route_table: :class:`.EC2RouteTable` + + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :param internet_gateway: The internet gateway to route + traffic through. + :type internet_gateway: :class:`.VPCInternetGateway` + + :param node: The NAT instance to route traffic through. + :type node: :class:`Node` + + :param network_interface: The network interface of the node + to route traffic through. + :type network_interface: :class:`.EC2NetworkInterface` + + :param vpc_peering_connection: The VPC peering connection. + :type vpc_peering_connection: :class:`.VPCPeeringConnection` + + :rtype: ``bool`` + + Note: You must specify one of the following: internet_gateway, + node, network_interface, vpc_peering_connection. + """ + + params = {'Action': 'CreateRoute', + 'RouteTableId': route_table.id, + 'DestinationCidrBlock': cidr} + + if internet_gateway: + params['GatewayId'] = internet_gateway.id + + if node: + params['InstanceId'] = node.id + + if network_interface: + params['NetworkInterfaceId'] = network_interface.id + + if vpc_peering_connection: + params['VpcPeeringConnectionId'] = vpc_peering_connection.id + + result = self.connection.request(self.path, params=params).object + element = findtext(element=result, + xpath='return', + namespace=NAMESPACE) + + return element == 'true' + + def ex_delete_route(self, route_table, cidr): + """ + Deletes a route entry from the route table. + + :param route_table: The route table to delete the route from. + :type route_table: :class:`.EC2RouteTable` + + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :rtype: ``bool`` + """ + + params = {'Action': 'DeleteRoute', + 'RouteTableId': route_table.id, + 'DestinationCidrBlock': cidr} + + result = self.connection.request(self.path, params=params).object + element = findtext(element=result, + xpath='return', + namespace=NAMESPACE) + + return element == 'true' + + def ex_replace_route(self, route_table, cidr, + internet_gateway=None, node=None, + network_interface=None, vpc_peering_connection=None): + """ + Replaces an existing route entry within a route table in a VPC. + + :param route_table: The route table to replace the route in. + :type route_table: :class:`.EC2RouteTable` + + :param cidr: The CIDR block used for the destination match. + :type cidr: ``str`` + + :param internet_gateway: The new internet gateway to route + traffic through. + :type internet_gateway: :class:`.VPCInternetGateway` + + :param node: The new NAT instance to route traffic through. + :type node: :class:`Node` + + :param network_interface: The new network interface of the node + to route traffic through. + :type network_interface: :class:`.EC2NetworkInterface` + + :param vpc_peering_connection: The new VPC peering connection. + :type vpc_peering_connection: :class:`.VPCPeeringConnection` + + :rtype: ``bool`` + + Note: You must specify one of the following: internet_gateway, + node, network_interface, vpc_peering_connection. + """ + + params = {'Action': 'ReplaceRoute', + 'RouteTableId': route_table.id, + 'DestinationCidrBlock': cidr} + + if internet_gateway: + params['GatewayId'] = internet_gateway.id + + if node: + params['InstanceId'] = node.id + + if network_interface: + params['NetworkInterfaceId'] = network_interface.id + + if vpc_peering_connection: + params['VpcPeeringConnectionId'] = vpc_peering_connection.id + + result = self.connection.request(self.path, params=params).object + element = findtext(element=result, + xpath='return', + namespace=NAMESPACE) + + return element == 'true' + def _to_nodes(self, object, xpath): return [self._to_node(el) for el in object.findall(fixxpath(xpath=xpath, @@ -4451,6 +4878,125 @@ def _to_internet_gateway(self, element, name=None): state=state, driver=self.connection.driver, extra={'tags': tags}) + def _to_route_tables(self, response): + return [self._to_route_table(el) for el in response.findall( + fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE)) + ] + + def _to_route_table(self, element): + # route table id + route_table_id = findtext(element=element, + xpath='routeTableId', + namespace=NAMESPACE) + + # Get our tags + tags = self._get_resource_tags(element) + + # Get our extra dictionary + extra = self._get_extra_dict( + element, RESOURCE_EXTRA_ATTRIBUTES_MAP['route_table']) + + # Add tags to the extra dict + extra['tags'] = tags + + # Get routes + routes = self._to_routes(element, 'routeSet/item') + + # Get subnet associations + subnet_associations = self._to_subnet_associations( + element, 'associationSet/item') + + # Get propagating routes virtual private gateways (VGW) IDs + propagating_gateway_ids = [] + for el in element.findall(fixxpath(xpath='propagatingVgwSet/item', + namespace=NAMESPACE)): + propagating_gateway_ids.append(findtext(element=el, + xpath='gatewayId', + namespace=NAMESPACE)) + + return EC2RouteTable(route_table_id, routes, subnet_associations, + propagating_gateway_ids, extra=extra) + + def _to_routes(self, element, xpath): + return [self._to_route(el) for el in element.findall( + fixxpath(xpath=xpath, namespace=NAMESPACE)) + ] + + def _to_route(self, element): + """ + Parse the XML element and return a route object + + :rtype: :class: `EC2Route` + """ + + destination_cidr = findtext(element=element, + xpath='destinationCidrBlock', + namespace=NAMESPACE) + + gateway_id = findtext(element=element, + xpath='gatewayId', + namespace=NAMESPACE) + + instance_id = findtext(element=element, + xpath='instanceId', + namespace=NAMESPACE) + + owner_id = findtext(element=element, + xpath='instanceOwnerId', + namespace=NAMESPACE) + + interface_id = findtext(element=element, + xpath='networkInterfaceId', + namespace=NAMESPACE) + + state = findtext(element=element, + xpath='state', + namespace=NAMESPACE) + + origin = findtext(element=element, + xpath='origin', + namespace=NAMESPACE) + + vpc_peering_connection_id = findtext(element=element, + xpath='vpcPeeringConnectionId', + namespace=NAMESPACE) + + return EC2Route(destination_cidr, gateway_id, instance_id, owner_id, + interface_id, state, origin, vpc_peering_connection_id) + + def _to_subnet_associations(self, element, xpath): + return [self._to_subnet_association(el) for el in element.findall( + fixxpath(xpath=xpath, namespace=NAMESPACE)) + ] + + def _to_subnet_association(self, element): + """ + Parse the XML element and return a route table association object + + :rtype: :class: `EC2SubnetAssociation` + """ + + association_id = findtext(element=element, + xpath='routeTableAssociationId', + namespace=NAMESPACE) + + route_table_id = findtext(element=element, + xpath='routeTableId', + namespace=NAMESPACE) + + subnet_id = findtext(element=element, + xpath='subnetId', + namespace=NAMESPACE) + + main = findtext(element=element, + xpath='main', + namespace=NAMESPACE) + + main = True if main else False + + return EC2SubnetAssociation(association_id, route_table_id, + subnet_id, main) + def _pathlist(self, key, arr): """ Converts a key and an array of values into AWS query param format. From 7d6d373d62f799bdce5007a7026d8047c436bb9e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 18 Jun 2014 08:56:36 +0200 Subject: [PATCH 086/315] Fix ex_list_snapshots for HP Helion cloud. --- CHANGES.rst | 3 +++ libcloud/compute/drivers/openstack.py | 31 ++++++++++++++++----------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 0c8b3064bf..46e804b964 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -231,6 +231,9 @@ Compute (LIBCLOUD-574, GITHUB-313) [Lior Goikhburg] +- Fix ex_list_snapshots for HP Helion OpenStack based driver. + [Tomaz Muraus] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 17884797d2..3742d523b8 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -2084,18 +2084,25 @@ def _to_volume(self, api_node): } ) - def _to_snapshot(self, api_node): - if 'snapshot' in api_node: - api_node = api_node['snapshot'] - - extra = {'volume_id': api_node['volume_id'], - 'name': api_node['display_name'], - 'created': api_node['created_at'], - 'description': api_node['display_description'], - 'status': api_node['status']} - - snapshot = VolumeSnapshot(id=api_node['id'], driver=self, - size=api_node['size'], extra=extra) + def _to_snapshot(self, data): + if 'snapshot' in data: + data = data['snapshot'] + + volume_id = data.get('volume_id', data.get('volumeId', None)) + display_name = data.get('display_name', data.get('displayName', None)) + created_at = data.get('created_at', data.get('createdAt', None)) + description = data.get('display_description', + data.get('displayDescription', None)) + status = data.get('status', None) + + extra = {'volume_id': volume_id, + 'name': display_name, + 'created': created_at, + 'description': description, + 'status': status} + + snapshot = VolumeSnapshot(id=data['id'], driver=self, + size=data['size'], extra=extra) return snapshot def _to_size(self, api_flavor, price=None, bandwidth=None): From 0696e27eb087c30cfb2d403b917407f13c7f3a54 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 18 Jun 2014 08:58:49 +0200 Subject: [PATCH 087/315] Add missing __repr__ method to VolumeSnapshot class. --- libcloud/compute/base.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index d971afa35a..142c2cbc82 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -560,6 +560,10 @@ def destroy(self): """ return self.driver.destroy_volume_snapshot(snapshot=self) + def __repr__(self): + return ('' % + (self.id, self.size, self.driver.name)) + class KeyPair(object): """ From 63dbd4284bfedf051bc168acf32876693d1d2c7e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 18 Jun 2014 15:06:52 +0200 Subject: [PATCH 088/315] Allow user to specify volume type and number of IOPS when creating a new volume in the EC2 driver by passing ex_volume_type and ex_iops argument to the create_volume method. --- CHANGES.rst | 5 ++++ docs/compute/drivers/ec2.rst | 12 +++++++++ .../ec2/create_general_purpose_ssd_volume.py | 8 ++++++ .../ec2/create_provisioned_iops_volume.py | 8 ++++++ libcloud/compute/drivers/ec2.py | 25 +++++++++++++++++-- 5 files changed, 56 insertions(+), 2 deletions(-) create mode 100644 docs/examples/compute/ec2/create_general_purpose_ssd_volume.py create mode 100644 docs/examples/compute/ec2/create_provisioned_iops_volume.py diff --git a/CHANGES.rst b/CHANGES.rst index 46e804b964..18977831f9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -234,6 +234,11 @@ Compute - Fix ex_list_snapshots for HP Helion OpenStack based driver. [Tomaz Muraus] +- Allow user to specify volume type and number of IOPS when creating a new + volume in the EC2 driver by passing ``ex_volume_type`` and ``ex_iops`` + argument to the ``create_volume`` method. + [Tomaz Muraus] + Storage ~~~~~~~ diff --git a/docs/compute/drivers/ec2.rst b/docs/compute/drivers/ec2.rst index d7d6f71c94..b42ca53d51 100644 --- a/docs/compute/drivers/ec2.rst +++ b/docs/compute/drivers/ec2.rst @@ -49,6 +49,18 @@ Allocate, Associate, Disassociate, and Release an Elastic IP .. literalinclude:: /examples/compute/create_ec2_node_and_associate_elastic_ip.py :language: python +Create a general purpose SSD volume +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/ec2/create_general_purpose_ssd_volume.py + :language: python + +Create a provisioned IOPS volume +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/ec2/create_provisioned_iops_volume.py + :language: python + API Docs -------- diff --git a/docs/examples/compute/ec2/create_general_purpose_ssd_volume.py b/docs/examples/compute/ec2/create_general_purpose_ssd_volume.py new file mode 100644 index 0000000000..1389673a8a --- /dev/null +++ b/docs/examples/compute/ec2/create_general_purpose_ssd_volume.py @@ -0,0 +1,8 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +cls = get_driver(Provider.EC2, region='us-east-i1') +driver = cls('access key', 'secret key') + +volume = driver.create_volume(size=100, name='Test GP volume', + ex_volume_type='g2') diff --git a/docs/examples/compute/ec2/create_provisioned_iops_volume.py b/docs/examples/compute/ec2/create_provisioned_iops_volume.py new file mode 100644 index 0000000000..6414790a30 --- /dev/null +++ b/docs/examples/compute/ec2/create_provisioned_iops_volume.py @@ -0,0 +1,8 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +cls = get_driver(Provider.EC2, region='us-east-i1') +driver = cls('access key', 'secret key') + +volume = driver.create_volume(size=100, name='Test IOPS volume', + ex_volume_type='io1', ex_iops=1000) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 9128c66276..22c1a64aea 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2151,18 +2151,39 @@ def destroy_node(self, node): res = self.connection.request(self.path, params=params).object return self._get_terminate_boolean(res) - def create_volume(self, size, name, location=None, snapshot=None): + def create_volume(self, size, name, location=None, snapshot=None, + ex_volume_type='standard', ex_iops=None): """ :param location: Datacenter in which to create a volume in. - :type location: :class:`ExEC2AvailabilityZone` + :type location: :class:`.ExEC2AvailabilityZone` + + :param ex_volume_type: Type of volume to create. + :type ex_volume_type: ``str`` + + :param iops: The number of I/O operations per second (IOPS) + that the volume supports. Only used if ex_volume_type + is io1. + :type iops: ``int`` """ + valid_volume_types = ['standard', 'io1', 'g2'] + params = { 'Action': 'CreateVolume', 'Size': str(size)} + if ex_volume_type and ex_volume_type not in valid_volume_types: + raise ValueError('Invalid volume type specified: %s' % + (ex_volume_type)) + if location is not None: params['AvailabilityZone'] = location.availability_zone.name + if ex_volume_type: + params['VolumeType'] = ex_volume_type + + if ex_volume_type == 'io1' and ex_iops: + params['Iops'] = ex_iops + volume = self._to_volume( self.connection.request(self.path, params=params).object, name=name) From aebdd15aea4f8d81c7fb0245d287653773c06b3e Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Fri, 20 Jun 2014 18:10:40 +0400 Subject: [PATCH 089/315] use existing _pathlist() function to convert key and an array of values into AWS query param format instead of doing this in every function Closes #319 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 22c1a64aea..db4ed76a7e 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2544,10 +2544,7 @@ def ex_list_networks(self, network_ids=None, filters=None): params = {'Action': 'DescribeVpcs'} if network_ids: - for network_idx, network_id in enumerate(network_ids): - network_idx += 1 # We want 1-based indexes - network_key = 'VpcId.%s' % network_idx - params[network_key] = network_id + params.update(self._pathlist('VpcId', network_ids)) if filters: params.update(self._build_filters(filters)) @@ -2719,10 +2716,7 @@ def ex_get_security_groups(self, group_ids=None, params = {'Action': 'DescribeSecurityGroups'} if group_ids: - for id_idx, group_id in enumerate(group_ids): - id_idx += 1 # We want 1-based indexes - id_key = 'GroupId.%s' % (id_idx) - params[id_key] = group_id + params.update(self._pathlist('GroupId', group_ids)) if group_names: for name_idx, group_name in enumerate(group_names): @@ -4095,10 +4089,7 @@ def ex_list_route_tables(self, route_table_ids=None, filters=None): params = {'Action': 'DescribeRouteTables'} if route_table_ids: - for route_table_idx, route_table_id in enumerate(route_table_ids): - route_table_idx += 1 # We want 1-based indexes - route_table_key = 'RouteTableId.%s' % route_table_idx - params[route_table_key] = route_table_id + params.update(self._pathlist('RouteTableId', route_table_ids)) if filters: params.update(self._build_filters(filters)) From 2bd37b68757be4eaebecf9e70e716519a7b4207b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pablo=20Ordu=C3=B1a?= Date: Thu, 19 Jun 2014 19:34:35 +0200 Subject: [PATCH 090/315] Fix ex_unpause_node in openstack ex_unpause_node in the OpenStack driver was calling the 'pause' action instead of the 'unpause' action. Closes #317 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/openstack.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 18977831f9..839a570abd 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -239,6 +239,10 @@ Compute argument to the ``create_volume`` method. [Tomaz Muraus] +- Fix ex_unpause_node method in the OpenStack driver. + (GITHUB-317) + [Pablo Orduña] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 3742d523b8..c573febcbe 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -2311,7 +2311,7 @@ def ex_pause_node(self, node): def ex_unpause_node(self, node): uri = '/servers/%s/action' % (node.id) - data = {'pause': None} + data = {'unpause': None} resp = self.connection.request(uri, method='POST', data=data) return resp.status == httplib.ACCEPTED From 7afbfbb55e8d301fb0c62dd03dcf1d117dbb1e4a Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Fri, 20 Jun 2014 17:27:49 +0400 Subject: [PATCH 091/315] Allow nodes to be launched into a specific subnet by adding subnet parameter to create_node() method of ec2 driver Closes #318 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/ec2.py | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 839a570abd..a6e6f25eed 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -243,6 +243,11 @@ Compute (GITHUB-317) [Pablo Orduña] +- Allow user to launch EC2 node in a specific VPC subnet by passing + ``ex_subnet`` argument to the create_node method. + (GITHUB-318) + [Lior Goikhburg] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index db4ed76a7e..1f86789940 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2047,6 +2047,9 @@ def create_node(self, **kwargs): :keyword ex_ebs_optimized: EBS-Optimized if True :type ex_ebs_optimized: ``bool`` + + :keyword ex_subnet: The subnet to launch the instance into. + :type ex_subnet: :class:`.EC2Subnet` """ image = kwargs["image"] size = kwargs["size"] @@ -2118,6 +2121,9 @@ def create_node(self, **kwargs): if 'ex_ebs_optimized' in kwargs: params['EbsOptimized'] = kwargs['ex_ebs_optimized'] + if 'ex_subnet' in kwargs: + params['SubnetId'] = kwargs['ex_subnet'].id + object = self.connection.request(self.path, params=params).object nodes = self._to_nodes(object, 'instancesSet/item') From e71615f8daadeda5d145929fea6f373e3b445ed5 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 22 Jun 2014 20:20:33 +0200 Subject: [PATCH 092/315] Bump version to 0.15.0. --- CHANGES.rst | 4 ++-- libcloud/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index a6e6f25eed..5e2dcca4bc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes with Apache Libcloud in development -------------------------------------------- +Changes with Apache Libcloud 0.15.0 +----------------------------------- General ~~~~~~~ diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 04de4532b5..8e969a600f 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.14.1' +__version__ = '0.15.0' import os From c4148551d062a4e20d4b79369534f1729f4af626 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Tue, 3 Jun 2014 18:44:41 +0400 Subject: [PATCH 093/315] LIBCLOUD-571 subnet-filters for ec2 driver Closes #306 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 12 ++++++++++++ libcloud/compute/drivers/ec2.py | 18 +++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 5e2dcca4bc..a33bc9ec45 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,18 @@ Changelog ========= +Changes with Apache Libcloud in development +------------------------------------------- + +Compute +~~~~~~~ + +- Allow user to limit a list of subnets which are returned by passing + ``subnet_ids`` and ``filters`` argument to the ``ex_list_subnets`` + method in the EC2 driver. + (LIBCLOUD-571, GITHUB-306) + [Lior Goikhburg] + Changes with Apache Libcloud 0.15.0 ----------------------------------- diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 1f86789940..72d9f22f80 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2610,15 +2610,31 @@ def ex_delete_network(self, vpc): return element == 'true' - def ex_list_subnets(self): + def ex_list_subnets(self, subnet_ids=None, filters=None): """ Return a list of :class:`EC2NetworkSubnet` objects for the current region. + :param subnet_ids: Return only subnets matching the provided + subnet IDs. If not specified, a list of all + the subnets in the corresponding region + is returned. + :type subnet_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain subnets. + :type filters: ``dict`` + :rtype: ``list`` of :class:`EC2NetworkSubnet` """ params = {'Action': 'DescribeSubnets'} + if subnet_ids: + params.update(self._pathlist('SubnetId', subnet_ids)) + + if filters: + params.update(self._build_filters(filters)) + return self._to_subnets( self.connection.request(self.path, params=params).object ) From ee39aad51021453e4d0425ddcd859b87293c3217 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Tue, 3 Jun 2014 20:10:57 +0400 Subject: [PATCH 094/315] LIBCLOUD-572 internet-gateways-filters for ec2 driver Closes #307 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 6 ++++++ libcloud/compute/drivers/ec2.py | 19 ++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index a33bc9ec45..d5ccdc532e 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -13,6 +13,12 @@ Compute (LIBCLOUD-571, GITHUB-306) [Lior Goikhburg] +- Allow user to limit a list of internet gateways which are returned by + passing ``gateway_ids`` and ``filters`` argument to the + ``ex_list_internet_gateways`` method in the EC2 driver. + (LIBCLOUD-572, GITHUB-307) + [Lior Goikhburg] + Changes with Apache Libcloud 0.15.0 ----------------------------------- diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 72d9f22f80..657ae3644e 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -3992,16 +3992,33 @@ def ex_find_or_import_keypair_by_key_material(self, pubkey): return result - def ex_list_internet_gateways(self): + def ex_list_internet_gateways(self, gateway_ids=None, filters=None): """ Describes available Internet gateways and whether or not they are attached to a VPC. These are required for VPC nodes to communicate over the Internet. + :param gateway_ids: Return only intenet gateways matching the + provided internet gateway IDs. If not + specified, a list of all the internet + gateways in the corresponding region is + returned. + :type gateway_ids: ``list`` + + :param filters: The filters so that the response includes + information for only certain gateways. + :type filters: ``dict`` + :rtype: ``list`` of :class:`.VPCInternetGateway` """ params = {'Action': 'DescribeInternetGateways'} + if gateway_ids: + params.update(self._pathlist('InternetGatewayId', gateway_ids)) + + if filters: + params.update(self._build_filters(filters)) + response = self.connection.request(self.path, params=params).object return self._to_internet_gateways(response, 'internetGatewaySet/item') From 158380bb8e84bb584f9314cbc5579a80c219f75c Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Mon, 23 Jun 2014 16:51:33 +0400 Subject: [PATCH 095/315] Fix setting Name tags and name attributes when creating some objects in the EC2 driver. Closes #322 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 58 +++++++++++++++++++-------------- 1 file changed, 34 insertions(+), 24 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 657ae3644e..77e76299f8 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1746,12 +1746,15 @@ class EC2RouteTable(object): Note: This class is VPC specific. """ - def __init__(self, id, routes, subnet_associations, + def __init__(self, id, name, routes, subnet_associations, propagating_gateway_ids, extra=None): """ :param id: The ID of the route table. :type id: ``str`` + :param name: The name of the route table. + :type name: ``str`` + :param routes: A list of routes in the route table. :type routes: ``list`` of :class:`EC2Route` @@ -1767,6 +1770,7 @@ def __init__(self, id, routes, subnet_associations, """ self.id = id + self.name = name self.routes = routes self.subnet_associations = subnet_associations self.propagating_gateway_ids = propagating_gateway_ids @@ -2193,7 +2197,10 @@ def create_volume(self, size, name, location=None, snapshot=None, volume = self._to_volume( self.connection.request(self.path, params=params).object, name=name) - self.ex_create_tags(volume, {'Name': name}) + + if self.ex_create_tags(volume, {'Name': name}): + volume.extra['tags']['Name'] = name + return volume def attach_volume(self, node, volume, device): @@ -2245,8 +2252,8 @@ def create_volume_snapshot(self, volume, name=None): response = self.connection.request(self.path, params=params).object snapshot = self._to_snapshot(response, name) - if name: - self.ex_create_tags(snapshot, {'Name': name}) + if name and self.ex_create_tags(snapshot, {'Name': name}): + snapshot.extra['tags']['Name'] = name return snapshot @@ -2586,10 +2593,10 @@ def ex_create_network(self, cidr_block, name=None, element = response.findall(fixxpath(xpath='vpc', namespace=NAMESPACE))[0] - network = self._to_network(element) + network = self._to_network(element, name) - if name is not None: - self.ex_create_tags(network, {'Name': name}) + if name and self.ex_create_tags(network, {'Name': name}): + network.extra['tags']['Name'] = name return network @@ -2669,10 +2676,10 @@ def ex_create_subnet(self, vpc_id, cidr_block, element = response.findall(fixxpath(xpath='subnet', namespace=NAMESPACE))[0] - subnet = self._to_subnet(element) + subnet = self._to_subnet(element, name) - if name is not None: - self.ex_create_tags(subnet, {'Name': name}) + if name and self.ex_create_tags(subnet, {'Name': name}): + subnet.extra['tags']['Name'] = name return subnet @@ -3528,9 +3535,8 @@ def ex_create_network_interface(self, subnet, name=None, interface = self._to_interface(element, name) - if name is not None: - tags = {'Name': name} - self.ex_create_tags(resource=interface, tags=tags) + if name and self.ex_create_tags(interface, {'Name': name}): + interface.extra['tags']['Name'] = name return interface @@ -4038,8 +4044,8 @@ def ex_create_internet_gateway(self, name=None): gateway = self._to_internet_gateway(element[0], name) - if name is not None: - self.ex_create_tags(gateway, {'Name': name}) + if name and self.ex_create_tags(gateway, {'Name': name}): + gateway.extra['tags']['Name'] = name return gateway @@ -4153,10 +4159,10 @@ def ex_create_route_table(self, network, name=None): element = response.findall(fixxpath(xpath='routeTable', namespace=NAMESPACE))[0] - route_table = self._to_route_table(element) + route_table = self._to_route_table(element, name=name) - if name: - self.ex_create_tags(route_table, {'Name': name}) + if name and self.ex_create_tags(route_table, {'Name': name}): + route_table.extra['tags']['Name'] = name return route_table @@ -4505,6 +4511,8 @@ def _to_volume(self, element, name=None): extra = self._get_extra_dict( element, RESOURCE_EXTRA_ATTRIBUTES_MAP['volume']) + extra['tags'] = tags + return StorageVolume(id=volId, name=name, size=int(size), @@ -4667,7 +4675,7 @@ def _to_networks(self, response): fixxpath(xpath='vpcSet/item', namespace=NAMESPACE)) ] - def _to_network(self, element): + def _to_network(self, element, name=None): # Get the network id vpc_id = findtext(element=element, xpath='vpcId', @@ -4678,7 +4686,7 @@ def _to_network(self, element): # Set our name if the Name key/value if available # If we don't get anything back then use the vpc_id - name = tags.get('Name', vpc_id) + name = name if name else tags.get('Name', vpc_id) cidr_block = findtext(element=element, xpath='cidrBlock', @@ -4740,7 +4748,7 @@ def _to_subnets(self, response): fixxpath(xpath='subnetSet/item', namespace=NAMESPACE)) ] - def _to_subnet(self, element): + def _to_subnet(self, element, name=None): # Get the subnet ID subnet_id = findtext(element=element, xpath='subnetId', @@ -4750,7 +4758,7 @@ def _to_subnet(self, element): tags = self._get_resource_tags(element) # If we don't get anything back then use the subnet_id - name = tags.get('Name', subnet_id) + name = name if name else tags.get('Name', subnet_id) state = findtext(element=element, xpath='state', @@ -4934,7 +4942,7 @@ def _to_route_tables(self, response): fixxpath(xpath='routeTableSet/item', namespace=NAMESPACE)) ] - def _to_route_table(self, element): + def _to_route_table(self, element, name=None): # route table id route_table_id = findtext(element=element, xpath='routeTableId', @@ -4965,7 +4973,9 @@ def _to_route_table(self, element): xpath='gatewayId', namespace=NAMESPACE)) - return EC2RouteTable(route_table_id, routes, subnet_associations, + name = name if name else tags.get('Name', id) + + return EC2RouteTable(route_table_id, name, routes, subnet_associations, propagating_gateway_ids, extra=extra) def _to_routes(self, element, xpath): From 8f704e86f256ba2cfc17251ddacfa41cd7674d98 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Tue, 24 Jun 2014 19:04:51 +0400 Subject: [PATCH 096/315] use _get_boolean() function instead of parsing api response in every method Closes #324 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 165 +++++++++++--------------------- 1 file changed, 57 insertions(+), 108 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 77e76299f8..efd8251951 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2352,10 +2352,9 @@ def delete_key_pair(self, key_pair): 'Action': 'DeleteKeyPair', 'KeyName': key_pair.name } - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + res = self.connection.request(self.path, params=params).object + + return self._get_boolean(res) def copy_image(self, image, source_region, name=None, description=None): """ @@ -2611,11 +2610,9 @@ def ex_delete_network(self, vpc): """ params = {'Action': 'DeleteVpc', 'VpcId': vpc.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_list_subnets(self, subnet_ids=None, filters=None): """ @@ -2694,12 +2691,9 @@ def ex_delete_subnet(self, subnet): """ params = {'Action': 'DeleteSubnet', 'SubnetId': subnet.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_list_security_groups(self): """ @@ -2801,11 +2795,9 @@ def ex_delete_security_group_by_id(self, group_id): """ params = {'Action': 'DeleteSecurityGroup', 'GroupId': group_id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_delete_security_group_by_name(self, group_name): """ @@ -2818,11 +2810,9 @@ def ex_delete_security_group_by_name(self, group_name): """ params = {'Action': 'DeleteSecurityGroup', 'GroupName': group_name} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_delete_security_group(self, name): """ @@ -2867,10 +2857,9 @@ def ex_authorize_security_group(self, name, from_port, to_port, cidr_ip, 'ToPort': str(to_port), 'CidrIp': cidr_ip} try: - resp = self.connection.request( + res = self.connection.request( self.path, params=params.copy()).object - return bool(findtext(element=resp, xpath='return', - namespace=NAMESPACE)) + return self._get_boolean(res) except Exception: e = sys.exc_info()[1] if e.args[0].find('InvalidPermission.Duplicate') == -1: @@ -2924,11 +2913,9 @@ def ex_authorize_security_group_ingress(self, id, from_port, to_port, params["Action"] = 'AuthorizeSecurityGroupIngress' - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_authorize_security_group_egress(self, id, from_port, to_port, cidr_ips, group_pairs=None, @@ -2980,11 +2967,9 @@ def ex_authorize_security_group_egress(self, id, from_port, to_port, params["Action"] = 'AuthorizeSecurityGroupEgress' - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_revoke_security_group_ingress(self, id, from_port, to_port, cidr_ips=None, group_pairs=None, @@ -3034,11 +3019,9 @@ def ex_revoke_security_group_ingress(self, id, from_port, to_port, params["Action"] = 'RevokeSecurityGroupIngress' - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_revoke_security_group_egress(self, id, from_port, to_port, cidr_ips=None, group_pairs=None, @@ -3090,11 +3073,9 @@ def ex_revoke_security_group_egress(self, id, from_port, to_port, params['Action'] = 'RevokeSecurityGroupEgress' - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_authorize_security_group_permissive(self, name): """ @@ -3236,11 +3217,10 @@ def ex_create_tags(self, resource, tags): params['Tag.%d.Key' % i] = key params['Tag.%d.Value' % i] = tags[key] - result = self.connection.request(self.path, - params=params.copy()).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) def ex_delete_tags(self, resource, tags): """ @@ -3264,11 +3244,10 @@ def ex_delete_tags(self, resource, tags): params['Tag.%d.Key' % i] = key params['Tag.%d.Value' % i] = tags[key] - result = self.connection.request(self.path, - params=params.copy()).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) def ex_get_metadata_for_node(self, node): """ @@ -3552,11 +3531,9 @@ def ex_delete_network_interface(self, network_interface): params = {'Action': 'DeleteNetworkInterface', 'NetworkInterfaceId': network_interface.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_attach_network_interface_to_node(self, network_interface, node, device_index): @@ -3607,11 +3584,9 @@ def ex_detach_network_interface(self, attachment_id, force=False): if force: params['Force'] = True - result = self.connection.request(self.path, params=params).object + res = self.connection.request(self.path, params=params).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + return self._get_boolean(res) def ex_modify_instance_attribute(self, node, attributes): """ @@ -3633,11 +3608,10 @@ def ex_modify_instance_attribute(self, node, attributes): params = {'Action': 'ModifyInstanceAttribute'} params.update(attributes) - result = self.connection.request(self.path, - params=params.copy()).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) def ex_modify_image_attribute(self, image, attributes): """ @@ -3658,11 +3632,10 @@ def ex_modify_image_attribute(self, image, attributes): params = {'Action': 'ModifyImageAttribute'} params.update(attributes) - result = self.connection.request(self.path, - params=params.copy()).object - element = findtext(element=result, xpath='return', - namespace=NAMESPACE) - return element == 'true' + res = self.connection.request(self.path, + params=params.copy()).object + + return self._get_boolean(res) def ex_change_node_size(self, node, new_size): """ @@ -4061,12 +4034,9 @@ def ex_delete_internet_gateway(self, gateway): params = {'Action': 'DeleteInternetGateway', 'InternetGatewayId': gateway.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_attach_internet_gateway(self, gateway, network): """ @@ -4084,12 +4054,9 @@ def ex_attach_internet_gateway(self, gateway, network): 'InternetGatewayId': gateway.id, 'VpcId': network.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_detach_internet_gateway(self, gateway, network): """ @@ -4107,12 +4074,9 @@ def ex_detach_internet_gateway(self, gateway, network): 'InternetGatewayId': gateway.id, 'VpcId': network.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_list_route_tables(self, route_table_ids=None, filters=None): """ @@ -4179,12 +4143,9 @@ def ex_delete_route_table(self, route_table): params = {'Action': 'DeleteRouteTable', 'RouteTableId': route_table.id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_associate_route_table(self, route_table, subnet): """ @@ -4233,12 +4194,9 @@ def ex_dissociate_route_table(self, subnet_association): params = {'Action': 'DisassociateRouteTable', 'AssociationId': subnet_association_id} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_replace_route_table_association(self, subnet_association, route_table): @@ -4325,12 +4283,9 @@ def ex_create_route(self, route_table, cidr, if vpc_peering_connection: params['VpcPeeringConnectionId'] = vpc_peering_connection.id - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_delete_route(self, route_table, cidr): """ @@ -4349,12 +4304,9 @@ def ex_delete_route(self, route_table, cidr): 'RouteTableId': route_table.id, 'DestinationCidrBlock': cidr} - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def ex_replace_route(self, route_table, cidr, internet_gateway=None, node=None, @@ -4404,12 +4356,9 @@ def ex_replace_route(self, route_table, cidr, if vpc_peering_connection: params['VpcPeeringConnectionId'] = vpc_peering_connection.id - result = self.connection.request(self.path, params=params).object - element = findtext(element=result, - xpath='return', - namespace=NAMESPACE) + res = self.connection.request(self.path, params=params).object - return element == 'true' + return self._get_boolean(res) def _to_nodes(self, object, xpath): return [self._to_node(el) From 8aa715ba5739475aceed94a4cd62845f44365121 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 25 Jun 2014 15:42:48 +0200 Subject: [PATCH 097/315] docs: Fix typo, update committer guide. --- docs/committer_guide.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/committer_guide.rst b/docs/committer_guide.rst index 14fec2480f..3287868e2d 100644 --- a/docs/committer_guide.rst +++ b/docs/committer_guide.rst @@ -143,13 +143,14 @@ command.** * Go to the `PyPi release management page`_, find a new release and click on "files" link. -* Once you are there, upload all the release artifacts (.tar.bz2, .tar.gz and - .zip). For ``File Type`` select ``Source`` and for ``Python Version`` select - ``Any (ie. pure Python)``. Make sure to also select and upload a PGP +* Once you are there, upload all the release artifacts (.tar.bz2, .tar.gz, + .zip, and .whl). For ``File Type`` select ``Source`` (except for ``.whl`` + file where you should select ``Python Wheel``) and for ``Python Version`` + select ``Any (ie. pure Python)``. Make sure to also select and upload a PGP signature for each file (``PGP signature (.asc)`` field). Once all the files have been uploaded, the page should look similar to the -screenshot bellow. +screenshot below. .. image:: _static/images/pypi_files_page.png :width: 700px From 8d93bf1383ff1001e3f4155009a784ba179cc0f8 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Fri, 20 Jun 2014 18:24:55 +0400 Subject: [PATCH 098/315] Node filters for ec2 driver Closes #320 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/ec2.py | 15 ++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d5ccdc532e..c4df822e4b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -19,6 +19,11 @@ Compute (LIBCLOUD-572, GITHUB-307) [Lior Goikhburg] +- Allow user to filter which nodes are returned by passing ``ex_filters`` + argument to the ``list_nodes`` method in the EC2 driver. + (LIBCLOUD-580, GITHUB-320) + [Lior Goikhburg] + Changes with Apache Libcloud 0.15.0 ----------------------------------- diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index efd8251951..a8124bf468 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1879,7 +1879,7 @@ class BaseEC2NodeDriver(NodeDriver): 'terminated': NodeState.TERMINATED } - def list_nodes(self, ex_node_ids=None): + def list_nodes(self, ex_node_ids=None, ex_filters=None): """ List all nodes @@ -1890,21 +1890,34 @@ def list_nodes(self, ex_node_ids=None): :param ex_node_ids: List of ``node.id`` :type ex_node_ids: ``list`` of ``str`` + :param ex_filters: The filters so that the response includes + information for only certain nodes. + :type ex_filters: ``dict`` + :rtype: ``list`` of :class:`Node` """ + params = {'Action': 'DescribeInstances'} + if ex_node_ids: params.update(self._pathlist('InstanceId', ex_node_ids)) + + if ex_filters: + params.update(self._build_filters(ex_filters)) + elem = self.connection.request(self.path, params=params).object + nodes = [] for rs in findall(element=elem, xpath='reservationSet/item', namespace=NAMESPACE): nodes += self._to_nodes(rs, 'instancesSet/item') nodes_elastic_ips_mappings = self.ex_describe_addresses(nodes) + for node in nodes: ips = nodes_elastic_ips_mappings[node.id] node.public_ips.extend(ips) + return nodes def list_sizes(self, location=None): From 13c38debe1b7d9554f224c67086a033a3d2b34c9 Mon Sep 17 00:00:00 2001 From: Lior Goikhburg Date: Mon, 23 Jun 2014 17:33:44 +0400 Subject: [PATCH 099/315] use _build_filters function instead of specifying request params manually Closes #323 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/ec2.py | 34 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index a8124bf468..628ee63bd4 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2015,10 +2015,9 @@ def list_volumes(self, node=None): 'Action': 'DescribeVolumes', } if node: - params.update({ - 'Filter.1.Name': 'attachment.instance-id', - 'Filter.1.Value': node.id, - }) + filters = {'attachment.instance-id': node.id} + params.update(self._build_filters(filters)) + response = self.connection.request(self.path, params=params).object volumes = [self._to_volume(el) for el in response.findall( fixxpath(xpath='volumeSet/item', namespace=NAMESPACE)) @@ -3157,12 +3156,11 @@ def ex_list_availability_zones(self, only_available=True): """ params = {'Action': 'DescribeAvailabilityZones'} + filters = {'region-name': self.region_name} if only_available: - params.update({'Filter.0.Name': 'state'}) - params.update({'Filter.0.Value.0': 'available'}) + filters['state'] = 'available' - params.update({'Filter.1.Name': 'region-name'}) - params.update({'Filter.1.Value.0': self.region_name}) + params.update(self._build_filters(filters)) result = self.connection.request(self.path, params=params.copy()).object @@ -3197,12 +3195,14 @@ def ex_describe_tags(self, resource): :return: dict Node tags :rtype: ``dict`` """ - params = {'Action': 'DescribeTags', - 'Filter.0.Name': 'resource-id', - 'Filter.0.Value.0': resource.id, - 'Filter.1.Name': 'resource-type', - 'Filter.1.Value.0': 'instance', - } + params = {'Action': 'DescribeTags'} + + filters = { + 'resource-id': resource.id, + 'resource-type': 'instance' + } + + params.update(self._build_filters(filters)) result = self.connection.request(self.path, params=params).object @@ -5047,10 +5047,8 @@ def _add_instance_filter(self, params, node): """ Add instance filter to the provided params dictionary. """ - params.update({ - 'Filter.0.Name': 'instance-id', - 'Filter.0.Value.0': node.id - }) + filters = {'instance-id': node.id} + params.update(self._build_filters(filters)) return params From 9979467611edde4ffdfe92ebb8d4c0f2168bb669 Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Thu, 26 Jun 2014 18:50:03 +0200 Subject: [PATCH 100/315] Add network_association_id to ex_list_public_ips and CloudstackAddress object Closes #327 Signed-off-by: Sebastien Goasguen --- libcloud/compute/drivers/cloudstack.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 921b68ef50..d4c7173f63 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -339,12 +339,24 @@ def ex_stop(self): class CloudStackAddress(object): """ A public IP address. + + :param id: UUID of the Public IP + :type id: ``str`` + + :param address: The public IP address + :type address: ``str`` + + :param associated_network_id: The ID of the network where this address + has been associated with + :type associated_network_id: ``str`` """ - def __init__(self, id, address, driver): + def __init__(self, id, address, driver, associated_network_id=None,): + self.id = id self.address = address self.driver = driver + self.associated_network_id = associated_network_id def release(self): self.driver.ex_release_public_ip(address=self) @@ -1351,7 +1363,10 @@ def ex_list_public_ips(self): return ips for ip in res['publicipaddress']: - ips.append(CloudStackAddress(ip['id'], ip['ipaddress'], self)) + ips.append(CloudStackAddress(ip['id'], + ip['ipaddress'], + self, + ip['associatednetworkid'])) return ips def ex_allocate_public_ip(self, location=None): From b9f9254a3e3bcb687288082137a1244876c247f2 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Fri, 27 Jun 2014 13:38:10 +0200 Subject: [PATCH 101/315] Update CHANGES file with GITHUB-327 --- CHANGES.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index c4df822e4b..902463fca0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,6 +7,10 @@ Changes with Apache Libcloud in development Compute ~~~~~~~ +- Add network_association_id to ex_list_public_ips and CloudstackAddress object + (GITHUB-327) + [Roeland Kuipers] + - Allow user to limit a list of subnets which are returned by passing ``subnet_ids`` and ``filters`` argument to the ``ex_list_subnets`` method in the EC2 driver. From 7a31c7b2971ce43ae0b66c1eb31bbc3255744d35 Mon Sep 17 00:00:00 2001 From: Marcus Devich Date: Sun, 29 Jun 2014 12:28:32 +0200 Subject: [PATCH 102/315] Fix docstrings, add ex_admin_pass argument to create_node in the Openstack driver. Closes #315 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ .../generate_provider_feature_matrix_table.py | 1 + libcloud/compute/drivers/abiquo.py | 4 ++-- libcloud/compute/drivers/brightbox.py | 17 ++++++++++------- libcloud/compute/drivers/cloudsigma.py | 8 ++++---- libcloud/compute/drivers/ec2.py | 8 ++++---- libcloud/compute/drivers/gogrid.py | 2 +- libcloud/compute/drivers/libvirt_driver.py | 1 + libcloud/compute/drivers/opennebula.py | 2 +- libcloud/compute/drivers/openstack.py | 16 +++++++++++----- 10 files changed, 40 insertions(+), 24 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c4df822e4b..82a6e671c3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -24,6 +24,11 @@ Compute (LIBCLOUD-580, GITHUB-320) [Lior Goikhburg] +- Allow user to specify admin password by passing ``ex_admin_pass`` argument + to the ``create_node`` method in the Openstack driver. + (GITHUB-315) + [Marcus Devich] + Changes with Apache Libcloud 0.15.0 ----------------------------------- diff --git a/contrib/generate_provider_feature_matrix_table.py b/contrib/generate_provider_feature_matrix_table.py index 54ab7fedd7..54ef3e615a 100755 --- a/contrib/generate_provider_feature_matrix_table.py +++ b/contrib/generate_provider_feature_matrix_table.py @@ -13,6 +13,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import with_statement import os import sys diff --git a/libcloud/compute/drivers/abiquo.py b/libcloud/compute/drivers/abiquo.py index edf2f25484..cccd5cfa68 100644 --- a/libcloud/compute/drivers/abiquo.py +++ b/libcloud/compute/drivers/abiquo.py @@ -274,8 +274,8 @@ def ex_create_group(self, name, location=None): You can specify the location as well. - :param name: name of the group (required) - :type name: ``str`` + :param group: name of the group (required) + :type group: ``str`` :param location: location were to create the group :type location: :class:`NodeLocation` diff --git a/libcloud/compute/drivers/brightbox.py b/libcloud/compute/drivers/brightbox.py index a7b5ccde6f..8798332ff0 100644 --- a/libcloud/compute/drivers/brightbox.py +++ b/libcloud/compute/drivers/brightbox.py @@ -121,12 +121,15 @@ def _to_size(self, data): ) def _to_location(self, data): - return NodeLocation( - id=data['id'], - name=data['handle'], - country='GB', - driver=self - ) + if data: + return NodeLocation( + id=data['id'], + name=data['handle'], + country='GB', + driver=self + ) + else: + return None def _post(self, path, data={}): headers = {'Content-Type': 'application/json'} @@ -183,7 +186,7 @@ def list_nodes(self): data = self.connection.request('/%s/servers' % self.api_version).object return list(map(self._to_node, data)) - def list_images(self): + def list_images(self, location=None): data = self.connection.request('/%s/images' % self.api_version).object return list(map(self._to_image, data)) diff --git a/libcloud/compute/drivers/cloudsigma.py b/libcloud/compute/drivers/cloudsigma.py index 55e472f1e1..29b65cb21b 100644 --- a/libcloud/compute/drivers/cloudsigma.py +++ b/libcloud/compute/drivers/cloudsigma.py @@ -1611,8 +1611,8 @@ def ex_get_tag(self, tag_id): """ Retrieve a single tag. - :param id: ID of the tag to retrieve. - :type id: ``str`` + :param tag_id: ID of the tag to retrieve. + :type tag_id: ``str`` :rtype: ``list`` of :class:`.CloudSigmaTag` objects """ @@ -1674,8 +1674,8 @@ def ex_tag_resources(self, resources, tag): """ Associate tag with the provided resources. - :param resource: Resources to associate a tag with. - :type resource: ``list`` of :class:`libcloud.compute.base.Node` or + :param resources: Resources to associate a tag with. + :type resources: ``list`` of :class:`libcloud.compute.base.Node` or :class:`.CloudSigmaDrive` :param tag: Tag to associate with the resources. diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 628ee63bd4..c17f072a8c 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -3490,8 +3490,8 @@ def ex_create_network_interface(self, subnet, name=None, """ Create a network interface within a VPC subnet. - :param node: EC2NetworkSubnet instance - :type node: :class:`EC2NetworkSubnet` + :param subnet: EC2NetworkSubnet instance + :type subnet: :class:`EC2NetworkSubnet` :param name: Optional name of the interface :type name: ``str`` @@ -3630,8 +3630,8 @@ def ex_modify_image_attribute(self, image, attributes): """ Modify image attributes. - :param node: Node instance - :type node: :class:`Node` + :param image: NodeImage instance + :type image: :class:`NodeImage` :param attributes: Dictionary with node attributes :type attributes: ``dict`` diff --git a/libcloud/compute/drivers/gogrid.py b/libcloud/compute/drivers/gogrid.py index cc242527b2..6c73e46eb9 100644 --- a/libcloud/compute/drivers/gogrid.py +++ b/libcloud/compute/drivers/gogrid.py @@ -403,7 +403,7 @@ def ex_edit_image(self, **kwargs): :type ex_description: ``str`` :keyword name: name of the image - :type name ``str`` + :type name: ``str`` :rtype: :class:`NodeImage` """ diff --git a/libcloud/compute/drivers/libvirt_driver.py b/libcloud/compute/drivers/libvirt_driver.py index 9b7188503f..3618ac4368 100644 --- a/libcloud/compute/drivers/libvirt_driver.py +++ b/libcloud/compute/drivers/libvirt_driver.py @@ -12,6 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import with_statement import re import os diff --git a/libcloud/compute/drivers/opennebula.py b/libcloud/compute/drivers/opennebula.py index c29833e722..c295cd49f5 100644 --- a/libcloud/compute/drivers/opennebula.py +++ b/libcloud/compute/drivers/opennebula.py @@ -415,9 +415,9 @@ def ex_list_networks(self, location=None): """ List virtual networks on a provider. - :type location: :class:`NodeLocation` :param location: Location from which to request a list of virtual networks. (optional) + :type location: :class:`NodeLocation` :return: List of virtual networks available to be connected to a compute node. diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index c573febcbe..c09cee6e0c 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1083,7 +1083,7 @@ def __init__(self, id, tenant_id, name, description, driver, rules=None, :type description: ``str`` :keyword rules: Rules associated with this group. - :type description: ``list`` of + :type rules: ``list`` of :class:`OpenStackSecurityGroupRule` :keyword extra: Extra attributes associated with this group. @@ -1259,6 +1259,9 @@ def create_node(self, **kwargs): Can be either ``AUTO`` or ``MANUAL``. :type ex_disk_config: ``str`` + :keyword ex_admin_pass: The root password for the node + :type ex_admin_pass: ``str`` + :keyword ex_availability_zone: Nova availability zone for the node :type ex_availability_zone: ``str`` """ @@ -1345,6 +1348,9 @@ def _create_args_to_params(self, node, **kwargs): if 'ex_disk_config' in kwargs: server_params['OS-DCF:diskConfig'] = kwargs['ex_disk_config'] + if 'ex_admin_pass' in kwargs: + server_params['adminPass'] = kwargs['ex_admin_pass'] + if 'networks' in kwargs: networks = kwargs['networks'] networks = [{'uuid': network.id} for network in networks] @@ -1653,8 +1659,8 @@ def ex_create_snapshot(self, volume, name, description=None, force=False): """ Create a snapshot based off of a volume. - :param node: volume - :type node: :class:`StorageVolume` + :param volume: volume + :type volume: :class:`StorageVolume` :keyword name: New name for the volume snapshot :type name: ``str`` @@ -1680,8 +1686,8 @@ def ex_delete_snapshot(self, snapshot): """ Delete a VolumeSnapshot - :param node: snapshot - :type node: :class:`VolumeSnapshot` + :param snapshot: snapshot + :type snapshot: :class:`VolumeSnapshot` :rtype: ``bool`` """ From 0f369d715b96f7934e5d91a15e7fd59402444fa2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 29 Jun 2014 15:24:50 +0200 Subject: [PATCH 103/315] Remove extra comma, re-order changes. --- CHANGES.rst | 8 ++++---- libcloud/compute/drivers/cloudstack.py | 3 +-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 67c73a02d6..84e2b91813 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,10 +7,6 @@ Changes with Apache Libcloud in development Compute ~~~~~~~ -- Add network_association_id to ex_list_public_ips and CloudstackAddress object - (GITHUB-327) - [Roeland Kuipers] - - Allow user to limit a list of subnets which are returned by passing ``subnet_ids`` and ``filters`` argument to the ``ex_list_subnets`` method in the EC2 driver. @@ -28,6 +24,10 @@ Compute (LIBCLOUD-580, GITHUB-320) [Lior Goikhburg] +- Add network_association_id to ex_list_public_ips and CloudstackAddress object + (GITHUB-327) + [Roeland Kuipers] + - Allow user to specify admin password by passing ``ex_admin_pass`` argument to the ``create_node`` method in the Openstack driver. (GITHUB-315) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index d4c7173f63..089e3f08b3 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -351,8 +351,7 @@ class CloudStackAddress(object): :type associated_network_id: ``str`` """ - def __init__(self, id, address, driver, associated_network_id=None,): - + def __init__(self, id, address, driver, associated_network_id=None): self.id = id self.address = address self.driver = driver From 907cdcee71d34b876923eb9a586426d02068db1e Mon Sep 17 00:00:00 2001 From: David Gay Date: Sat, 28 Jun 2014 14:41:17 -0400 Subject: [PATCH 104/315] Fix a possible race condition in deploy_node which would occur if node is online and can be accessed via SSH, but the SSH key we want to use hasn't been installed yet. Closes #331 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 9 +++++++++ libcloud/compute/base.py | 14 ++++++++++---- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 84e2b91813..bc28e97598 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -33,6 +33,15 @@ Compute (GITHUB-315) [Marcus Devich] +- Fix a possible race condition in deploy_node which would occur if node + is online and can be accessed via SSH, but the SSH key we want to use hasn't + been installed yet. + + Previously, we would immediately throw if we can connect, but the SSH key + hasn't been installed yet. + (GITHUB-331) + [David Gay] + Changes with Apache Libcloud 0.15.0 ----------------------------------- diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 142c2cbc82..99523ac459 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -35,10 +35,17 @@ from libcloud.common.base import ConnectionKey from libcloud.common.base import BaseDriver from libcloud.common.types import LibcloudError +from libcloud.compute.ssh import have_paramiko from libcloud.utils.networking import is_private_subnet from libcloud.utils.networking import is_valid_ip_address +if have_paramiko: + from paramiko.ssh_exception import SSHException + SSH_TIMEOUT_EXCEPTION_CLASSES = (SSHException, IOError, socket.gaierror, + socket.error) +else: + SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error) # How long to wait for the node to come online after creating it NODE_ONLINE_WAIT_TIMEOUT = 10 * 60 @@ -1372,9 +1379,9 @@ def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300): while time.time() < end: try: ssh_client.connect() - except (IOError, socket.gaierror, socket.error): - # Retry if a connection is refused or timeout - # occurred + except SSH_TIMEOUT_EXCEPTION_CLASSES: + # Retry if a connection is refused, timeout occurred, + # or the connection fails due to failed authentication. ssh_client.close() time.sleep(wait_period) continue @@ -1401,7 +1408,6 @@ def _connect_and_run_deployment_script(self, task, node, ssh_hostname, key_files=ssh_key_file, timeout=ssh_timeout) - # Connect to the SSH server running on the node ssh_client = self._ssh_client_connect(ssh_client=ssh_client, timeout=timeout) From 68c73b5ca61d8c8f5531b561a3ffd060da743187 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 2 Jul 2014 15:28:01 +0200 Subject: [PATCH 105/315] Include a better message in the exception which is thrown when a request in the Rackspace driver ends up in an ERROR state. --- CHANGES.rst | 7 +++++++ libcloud/dns/drivers/rackspace.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index bc28e97598..0ffeb7cb6c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -42,6 +42,13 @@ Compute (GITHUB-331) [David Gay] +DNS +~~~ + +- Include a better message in the exception which is thrown when a request + in the Rackspace driver ends up in an ``ERROR`` state. + [Tomaz Muraus] + Changes with Apache Libcloud 0.15.0 ----------------------------------- diff --git a/libcloud/dns/drivers/rackspace.py b/libcloud/dns/drivers/rackspace.py index 55f834f7f9..d71f4c5975 100644 --- a/libcloud/dns/drivers/rackspace.py +++ b/libcloud/dns/drivers/rackspace.py @@ -93,7 +93,15 @@ def get_poll_request_kwargs(self, response, context, request_kwargs): def has_completed(self, response): status = response.object['status'] if status == 'ERROR': - raise LibcloudError(response.object['error']['message'], + data = response.object['error'] + + if 'code' and 'message' in data: + message = '%s - %s (%s)' % (data['code'], data['message'], + data['details']) + else: + message = data['message'] + + raise LibcloudError(message, driver=self.driver) return status == 'COMPLETED' @@ -318,6 +326,7 @@ def update_record(self, record, name=None, type=None, data=None, updated_record = get_new_obj(obj=record, klass=Record, attributes={'type': type, 'data': data, + 'driver': self, 'extra': merged}) return updated_record From 0314a1a4c5f8d08b1084acbc561d31961e361ee1 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 2 Jul 2014 19:59:16 +0200 Subject: [PATCH 106/315] Fix _ssh_client_connect method to correctly propagate an exception if a key file doesn't exist instead of silently swallowing and ignoring it. --- CHANGES.rst | 5 +++++ libcloud/compute/base.py | 8 ++++++++ 2 files changed, 13 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 0ffeb7cb6c..ead8b2676b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -42,6 +42,11 @@ Compute (GITHUB-331) [David Gay] +- Propagate an exception in ``deploy_node`` method if user specified an invalid + path to the private key file. Previously this exception was silently swallowed + and ignored. + [Tomaz Muraus] + DNS ~~~ diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index 99523ac459..f9a9e31d60 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -1380,6 +1380,14 @@ def _ssh_client_connect(self, ssh_client, wait_period=1.5, timeout=300): try: ssh_client.connect() except SSH_TIMEOUT_EXCEPTION_CLASSES: + e = sys.exc_info()[1] + message = str(e).lower() + expected_msg = 'no such file or directory' + + if isinstance(e, IOError) and expected_msg in message: + # Propagate (key) file doesn't exist errors + raise e + # Retry if a connection is refused, timeout occurred, # or the connection fails due to failed authentication. ssh_client.close() From 6df77f6e2d3b4df9c3b23938dc65951d7f0a0059 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Wed, 2 Jul 2014 23:41:07 +0200 Subject: [PATCH 107/315] Bump version to 0.15.1 --- CHANGES.rst | 4 ++-- libcloud/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index ead8b2676b..d7a78612a3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes with Apache Libcloud in development -------------------------------------------- +Changes with Apache Libcloud 0.15.1 +----------------------------------- Compute ~~~~~~~ diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 8e969a600f..3ce79f0b20 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.15.0' +__version__ = '0.15.1' import os From b392c964457e8c3948b615e0036f823ef420bc1a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 6 Jul 2014 21:48:01 +0200 Subject: [PATCH 108/315] Modify deploy_node so it's future proof and will also work correctly when authentication bug (https://github.com/paramiko/paramiko/pull/351) has been fixed in paramiko. --- libcloud/compute/base.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index f9a9e31d60..a585128b8d 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -42,8 +42,10 @@ if have_paramiko: from paramiko.ssh_exception import SSHException - SSH_TIMEOUT_EXCEPTION_CLASSES = (SSHException, IOError, socket.gaierror, - socket.error) + from paramiko.ssh_exception import AuthenticationException + + SSH_TIMEOUT_EXCEPTION_CLASSES = (AuthenticationException, SSHException, + IOError, socket.gaierror, socket.error) else: SSH_TIMEOUT_EXCEPTION_CLASSES = (IOError, socket.gaierror, socket.error) From 3d2a38db9e15e67e8ea8639782446960bf968ee4 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Tue, 1 Jul 2014 09:03:17 -0700 Subject: [PATCH 109/315] Updating Compute Engine image project list Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 9 ++++++++- libcloud/test/compute/test_gce.py | 4 ++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index a1cf75e16b..c216c64179 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -2305,10 +2305,17 @@ def ex_get_image(self, partial_name): return self._to_node_image(response.object) image = self._match_images(None, partial_name) if not image: - if partial_name.startswith('debian'): + if (partial_name.startswith('debian') or + partial_name.startswith('backports')): image = self._match_images('debian-cloud', partial_name) elif partial_name.startswith('centos'): image = self._match_images('centos-cloud', partial_name) + elif partial_name.startswith('sles'): + image = self._match_images('suse-cloud', partial_name) + elif partial_name.startswith('rhel'): + image = self._match_images('rhel-cloud', partial_name) + elif partial_name.startswith('windows'): + image = self._match_images('windows-cloud', partial_name) elif partial_name.startswith('container-vm'): image = self._match_images('google-containers', partial_name) diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 80110d0176..faf6303601 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -100,8 +100,8 @@ def test_match_images(self): project = 'debian-cloud' image = self.driver._match_images(project, 'debian-7') self.assertEqual(image.name, 'debian-7-wheezy-v20131120') - image = self.driver._match_images(project, 'debian-6') - self.assertEqual(image.name, 'debian-6-squeeze-v20130926') + image = self.driver._match_images(project, 'backports') + self.assertEqual(image.name, 'backports-debian-7-wheezy-v20131127') def test_ex_list_addresses(self): address_list = self.driver.ex_list_addresses() From 784e7beb05df95fe6d85c9a0720c8234670229d4 Mon Sep 17 00:00:00 2001 From: Atsushi Sasaki Date: Sat, 5 Jul 2014 19:55:22 +0900 Subject: [PATCH 110/315] LIBCLOUD-593: CloudStack driver's create_node fails to join network_id when network_id is int Closes #336 Signed-off-by: Sebastien Goasguen --- libcloud/compute/drivers/cloudstack.py | 2 +- .../deployVirtualMachine_deploynetworks.json | 1 + .../listNetworks_deploynetworks.json | 1 + .../listServiceOfferings_deploynetworks.json | 1 + .../listTemplates_deploynetworks.json | 1 + .../cloudstack/listZones_deploynetworks.json | 1 + .../queryAsyncJobResult_1149366.json | 1 + libcloud/test/compute/test_cloudstack.py | 20 +++++++++++++++++++ 8 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploynetworks.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNetworks_deploynetworks.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deploynetworks.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listTemplates_deploynetworks.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listZones_deploynetworks.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149366.json diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 089e3f08b3..6f60afed9d 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -801,7 +801,7 @@ def _create_args_to_params(self, node, **kwargs): server_params['zoneid'] = self.list_locations()[0].id if networks: - networks = ','.join([network.id for network in networks]) + networks = ','.join([str(network.id) for network in networks]) server_params['networkids'] = networks if project: diff --git a/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploynetworks.json b/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploynetworks.json new file mode 100644 index 0000000000..166e89bfc0 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deploynetworks.json @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":1149366,"id":65385} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworks_deploynetworks.json b/libcloud/test/compute/fixtures/cloudstack/listNetworks_deploynetworks.json new file mode 100644 index 0000000000..ad0bb8095a --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworks_deploynetworks.json @@ -0,0 +1 @@ +{"listnetworksresponse": { "count": 3, "network": [ { "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://1002", "displaytext": "network:192.168.2.0/24", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "endip": "192.168.2.233", "gateway": "192.168.2.254", "id": 1823, "isdefault": false, "isshared": true, "issystem": false, "name": "ROOT", "netmask": "255.255.255.0", "networkdomain": "cs1cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Direct", "networkofferingid": 7, "networkofferingname": "DefaultDirectNetworkOffering", "related": 1823, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Dhcp" } ], "startip": "192.168.2.1", "state": "Setup", "traffictype": "Guest", "type": "Direct", "vlan": "1002", "zoneid": 1 }, { "account": "testuser", "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://2909", "displaytext": "testuser-network", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "id": 1547, "isdefault": true, "isshared": false, "issystem": false, "name": "testuser-network", "networkdomain": "cs586cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Virtual Vlan", "networkofferingid": 6, "networkofferingname": "DefaultVirtualizedNetworkOffering", "related": 1547, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Gateway" }, { "name": "Dhcp" }, { "capability": [ { "name": "SupportedVpnTypes", "value": "pptp,l2tp,ipsec" } ], "name": "Vpn" }, { "capability": [ { "name": "MultipleIps", "value": "true" }, { "name": "SupportedSourceNatTypes", "value": "per account" }, { "name": "SupportedProtocols", "value": "tcp,udp,icmp" }, { "name": "TrafficStatistics", "value": "per public ip" }, { "name": "PortForwarding", "value": "true" }, { "name": "StaticNat", "value": "true" } ], "name": "Firewall" } ], "state": "Implemented", "traffictype": "Guest", "type": "Virtual", "zoneid": 2 }, { "account": "testuser", "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://3564", "displaytext": "testuser-network", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "id": 1374, "isdefault": true, "isshared": false, "issystem": false, "name": "testuser-network", "networkdomain": "cs586cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Virtual Vlan", "networkofferingid": 6, "networkofferingname": "DefaultVirtualizedNetworkOffering", "related": 1374, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Gateway" }, { "name": "Dhcp" }, { "capability": [ { "name": "SupportedVpnTypes", "value": "pptp,l2tp,ipsec" } ], "name": "Vpn" }, { "capability": [ { "name": "MultipleIps", "value": "true" }, { "name": "SupportedSourceNatTypes", "value": "per account" }, { "name": "SupportedProtocols", "value": "tcp,udp,icmp" }, { "name": "TrafficStatistics", "value": "per public ip" }, { "name": "PortForwarding", "value": "true" }, { "name": "StaticNat", "value": "true" } ], "name": "Firewall" } ], "state": "Implemented", "traffictype": "Guest", "type": "Virtual", "zoneid": 1 } ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deploynetworks.json b/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deploynetworks.json new file mode 100644 index 0000000000..944a6f2919 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deploynetworks.json @@ -0,0 +1 @@ +{ "listserviceofferingsresponse" : {"count": 3, "serviceoffering": [ {"cpunumber": 2, "cpuspeed": 1600, "created": "2011-09-09T13:14:19+0900", "defaultuse": false, "displaytext": "M4", "id": 21, "issystem": false, "limitcpuuse": true, "memory": 4096, "name": "M4", "networkrate": 500, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}, {"cpunumber": 1, "cpuspeed": 800, "created": "2011-09-09T13:17:52+0900", "defaultuse": false, "displaytext": "XS", "id": 24, "issystem": false, "limitcpuuse": true, "memory": 512, "name": "XS", "networkrate": 100, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}, {"cpunumber": 1, "cpuspeed": 1600, "created": "2011-09-14T22:51:23+0900", "defaultuse": false, "displaytext": "S2", "id": 30, "issystem": false, "limitcpuuse": true, "memory": 2048, "name": "S2", "networkrate": 500, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/listTemplates_deploynetworks.json b/libcloud/test/compute/fixtures/cloudstack/listTemplates_deploynetworks.json new file mode 100644 index 0000000000..a9c778c738 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listTemplates_deploynetworks.json @@ -0,0 +1 @@ +{ "listtemplatesresponse" : {"count": 2, "template": [ {"account": "admin", "created": "2014-06-06T20:08:49+0900", "crossZones": false, "displaytext": "CentOS 6.5", "domain": "ROOT", "domainid": 1, "format": "OVA", "hypervisor": "VMware", "id": 8028, "isextractable": true, "isfeatured": true, "ispublic": true, "isready": true, "name": "CentOS 6.5 64-bit", "ostypeid": 112, "ostypename": "CentOS 5.5 (64-bit)", "passwordenabled": true, "size": 16106127360, "status": "Download Complete", "templatetype": "USER", "zoneid": 2, "zonename": "zone2"}, {"account": "admin", "created": "2014-06-06T20:08:48+0900", "crossZones": false, "displaytext": "CentOS 6.5", "domain": "ROOT", "domainid": 1, "format": "OVA", "hypervisor": "VMware", "id": 8028, "isextractable": true, "isfeatured": true, "ispublic": true, "isready": true, "name": "CentOS 6.5 64-bit", "ostypeid": 112, "ostypename": "CentOS 5.5 (64-bit)", "passwordenabled": true, "size": 16106127360, "status": "Download Complete", "templatetype": "USER", "zoneid": 1, "zonename": "zone1"} ]} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listZones_deploynetworks.json b/libcloud/test/compute/fixtures/cloudstack/listZones_deploynetworks.json new file mode 100644 index 0000000000..b072749410 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listZones_deploynetworks.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "count":2 ,"zone" : [ {"id":1,"name":"zone1","networktype":"Advanced","securitygroupsenabled":false,"allocationstate":"Enabled","zonetoken":"6a3bfa26-67cd-3ff2-867e-20e86b211bb1","dhcpprovider":"VirtualRouter"}, {"id":2,"name":"zone2","networktype":"Advanced","securitygroupsenabled":false,"allocationstate":"Enabled","zonetoken":"8366e550-542d-373d-88e3-ca7c90bc8e6c","dhcpprovider":"VirtualRouter"} ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149366.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149366.json new file mode 100644 index 0000000000..f3854d325c --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149366.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid": 1149366, "jobprocstatus": 0, "jobresult": {"virtualmachine": {"account": "testuser", "cpunumber": 2, "cpuspeed": 1600, "created": "2014-07-06T16:40:39+0900", "displayname": "deploynetworks", "domain": "ROOT", "domainid": 1623, "guestosid": 112, "haenable": false, "hypervisor": "VMware", "id": 65385, "memory": 4096, "name": "deploynetworks", "nic": [{"gateway": "10.1.0.1", "id": 87320, "ipaddress": "10.1.0.29", "isdefault": true, "macaddress": "02:00:78:4a:01:9e", "netmask": "255.255.252.0", "networkid": 1374, "traffictype": "Guest", "type": "Virtual"}, {"gateway": "192.168.2.254", "id": 87319, "ipaddress": "192.168.2.55", "isdefault": false, "macaddress": "06:e6:50:00:70:0e", "netmask": "255.255.255.0", "networkid": 1823, "traffictype": "Guest", "type": "Direct"}], "password": "password", "passwordenabled": true, "rootdeviceid": 0, "rootdevicetype": "VMFS", "securitygroup": [], "serviceofferingid": 21, "serviceofferingname": "M4", "state": "Running" , "templatedisplaytext": "CentOS 6.5", "templateid": 8028, "templatename": "CentOS 6.5 64-bit", "zoneid": 1, "zonename": "zone1"}}, "jobresultcode": 0, "jobresulttype": "object", "jobstatus": 1} } diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 091d98b0ee..b8bb279860 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -107,6 +107,26 @@ def test_create_node_default_location_success(self): self.assertEqual(node.private_ips, ['192.168.1.2']) self.assertEqual(node.extra['zone_id'], default_location.id) + def test_create_node_ex_networks(self): + CloudStackMockHttp.fixture_tag = 'deploynetworks' + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + + networks = [nw for nw in self.driver.ex_list_networks() + if str(nw.zoneid) == str(location.id)] + + node = self.driver.create_node(name='deploynetworks', + location=location, + image=image, + size=size, + networks=networks) + self.assertEqual(node.name, 'deploynetworks') + self.assertEqual(node.extra['size_id'], size.id) + self.assertEqual(node.extra['zone_id'], location.id) + self.assertEqual(node.extra['image_id'], image.id) + self.assertEqual(len(node.private_ips), 2) + def test_create_node_ex_security_groups(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] From fde24958140d45e2bc9cc7d985124646fd6bf2c3 Mon Sep 17 00:00:00 2001 From: Atsushi Sasaki Date: Sun, 6 Jul 2014 14:32:20 +0900 Subject: [PATCH 111/315] LIBCLOUD-594: Add firewall rule support for CloudStack driver Closes #337 Signed-off-by: Sebastien Goasguen --- libcloud/compute/drivers/cloudstack.py | 150 ++++++++++++++++++ .../createFirewallRule_default.json | 1 + .../createFirewallRule_firewallicmp.json | 1 + .../deleteFirewallRule_default.json | 1 + .../cloudstack/listFirewallRules_default.json | 1 + .../listFirewallRules_firewallicmp.json | 1 + .../listPublicIpAddresses_firewallicmp.json | 1 + .../queryAsyncJobResult_1149341.json | 1 + .../queryAsyncJobResult_1149342.json | 1 + .../queryAsyncJobResult_1149343.json | 1 + libcloud/test/compute/test_cloudstack.py | 66 ++++++++ 11 files changed, 225 insertions(+) create mode 100644 libcloud/test/compute/fixtures/cloudstack/createFirewallRule_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/createFirewallRule_firewallicmp.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteFirewallRule_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listFirewallRules_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listFirewallRules_firewallicmp.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_firewallicmp.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149341.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149342.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149343.json diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 6f60afed9d..81b8cb4be9 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -367,6 +367,61 @@ def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id +class CloudStackFirewallRule(object): + """ + A firewall rule. + """ + + def __init__(self, id, address, cidr_list, protocol, + icmp_code=None, icmp_type=None, + start_port=None, end_port=None): + + """ + A Firewall rule. + + @note: This is a non-standard extension API, and only works for + CloudStack. + + :param id: Firewall Rule ID + :type id: ``int`` + + :param address: External IP address + :type address: :class:`CloudStackAddress` + + :param cidr_list: cidr list + :type cidr_list: ``str`` + + :param protocol: TCP/IP Protocol (TCP, UDP) + :type protocol: ``str`` + + :param icmp_code: Error code for this icmp message + :type icmp_code: ``int`` + + :param icmp_type: Type of the icmp message being sent + :type icmp_type: ``int`` + + :param start_port: start of port range + :type start_port: ``int`` + + :param end_port: end of port range + :type end_port: ``int`` + + :rtype: :class:`CloudStackFirewallRule` + """ + + self.id = id + self.address = address + self.cidr_list = cidr_list + self.protocol = protocol + self.icmp_code = icmp_code + self.icmp_type = icmp_type + self.start_port = start_port + self.end_port = end_port + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + class CloudStackIPForwardingRule(object): """ A NAT/firewall forwarding rule. @@ -1401,6 +1456,101 @@ def ex_release_public_ip(self, address): method='GET') return res['success'] + def ex_list_firewall_rules(self): + """ + Lists all Firewall Rules + + :rtype: ``list`` of :class:`CloudStackFirewallRule` + """ + rules = [] + result = self._sync_request(command='listFirewallRules', + method='GET') + if result != {}: + public_ips = self.ex_list_public_ips() + for rule in result['firewallrule']: + addr = [a for a in public_ips if + a.address == rule['ipaddress']] + + rules.append(CloudStackFirewallRule(rule['id'], + addr[0], + rule['cidrlist'], + rule['protocol'], + rule.get('icmpcode'), + rule.get('icmptype'), + rule.get('startport'), + rule.get('endport'))) + + return rules + + def ex_create_firewall_rule(self, address, cidr_list, protocol, + icmp_code=None, icmp_type=None, + start_port=None, end_port=None): + """ + Creates a Firewalle Rule + + :param address: External IP address + :type address: :class:`CloudStackAddress` + + :param cidr_list: cidr list + :type cidr_list: ``str`` + + :param protocol: TCP/IP Protocol (TCP, UDP) + :type protocol: ``str`` + + :param icmp_code: Error code for this icmp message + :type icmp_code: ``int`` + + :param icmp_type: Type of the icmp message being sent + :type icmp_type: ``int`` + + :param start_port: start of port range + :type start_port: ``int`` + + :param end_port: end of port range + :type end_port: ``int`` + + :rtype: :class:`CloudStackFirewallRule` + """ + args = { + 'ipaddressid': address.id, + 'cidrlist': cidr_list, + 'protocol': protocol + } + if icmp_code is not None: + args['icmpcode'] = int(icmp_code) + if icmp_type is not None: + args['icmptype'] = int(icmp_type) + if start_port is not None: + args['startport'] = int(start_port) + if end_port is not None: + args['endport'] = int(end_port) + result = self._async_request(command='createFirewallRule', + params=args, + method='GET') + rule = CloudStackFirewallRule(result['firewallrule']['id'], + address, + cidr_list, + protocol, + icmp_code, + icmp_type, + start_port, + end_port) + return rule + + def ex_delete_firewall_rule(self, firewall_rule): + """ + Remove a Firewall rule. + + :param firewall_rule: Firewall rule which should be used + :type firewall_rule: :class:`CloudStackFirewallRule` + + :rtype: ``bool`` + """ + res = self._async_request(command='deleteFirewallRule', + params={'id': firewall_rule.id}, + method='GET') + return res['success'] + def ex_list_port_forwarding_rules(self): """ Lists all Port Forwarding Rules diff --git a/libcloud/test/compute/fixtures/cloudstack/createFirewallRule_default.json b/libcloud/test/compute/fixtures/cloudstack/createFirewallRule_default.json new file mode 100644 index 0000000000..aa9dadda18 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createFirewallRule_default.json @@ -0,0 +1 @@ +{ "createfirewallruleresponse" : {"jobid":1149341,"id":172465} } diff --git a/libcloud/test/compute/fixtures/cloudstack/createFirewallRule_firewallicmp.json b/libcloud/test/compute/fixtures/cloudstack/createFirewallRule_firewallicmp.json new file mode 100644 index 0000000000..6f7a63e7c8 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createFirewallRule_firewallicmp.json @@ -0,0 +1 @@ +{ "createfirewallruleresponse" : {"jobid":1149343,"id":172466} } diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteFirewallRule_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteFirewallRule_default.json new file mode 100644 index 0000000000..719d96bd81 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteFirewallRule_default.json @@ -0,0 +1 @@ +{ "deletefirewallruleresponse" : {"jobid":1149342} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listFirewallRules_default.json b/libcloud/test/compute/fixtures/cloudstack/listFirewallRules_default.json new file mode 100644 index 0000000000..7834d9dbb2 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listFirewallRules_default.json @@ -0,0 +1 @@ +{"listfirewallrulesresponse": {"count": 1, "firewallrule": [ { "cidrlist": "192.168.0.0/16", "id": 171597, "ipaddress": "1.1.1.116", "protocol": "tcp", "startport": "33", "endport": "34", "ipaddressid": 34000, "state": "Active" }]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/listFirewallRules_firewallicmp.json b/libcloud/test/compute/fixtures/cloudstack/listFirewallRules_firewallicmp.json new file mode 100644 index 0000000000..36ec78bfae --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listFirewallRules_firewallicmp.json @@ -0,0 +1 @@ +{"listfirewallrulesresponse": {"count": 1, "firewallrule": [ { "cidrlist": "192.168.0.0/16", "icmpcode": 0, "icmptype": 8, "id": 52394, "ipaddress": "1.1.1.116", "ipaddressid": 34000, "protocol": "icmp", "state": "Active" } ]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_firewallicmp.json b/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_firewallicmp.json new file mode 100644 index 0000000000..ca80e129a1 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listPublicIpAddresses_firewallicmp.json @@ -0,0 +1 @@ +{ "listpublicipaddressesresponse" : { "publicipaddress" : [ {"id":34000,"ipaddress":"1.1.1.116","virtualmachineid":"2600","allocated":"2011-06-23T05:20:39+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33999,"ipaddress":"1.1.1.48","allocated":"2011-06-23T05:20:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33998,"ipaddress":"1.1.1.47","allocated":"2011-06-23T05:20:30+0000","zoneid":1,"zonename":"Sydney","issourcenat":false,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"}, {"id":33970,"ipaddress":"1.1.1.19","allocated":"2011-06-20T04:08:34+0000","zoneid":1,"zonename":"Sydney","issourcenat":true,"account":"fakeaccount","domainid":801,"domain":"AA000062-libcloud-dev","forvirtualnetwork":true,"isstaticnat":false,"associatednetworkid":860,"networkid":200,"state":"Allocated"} ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149341.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149341.json new file mode 100644 index 0000000000..607bdb774f --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149341.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":1149341,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"firewallrule":{"id":172465,"protocol":"tcp","startport":"33","endport":"34","ipaddressid":34000,"ipaddress":"1.1.1.116","state":"Active","cidrlist":"192.168.0.0/16"}}} } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149342.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149342.json new file mode 100644 index 0000000000..9b35f8a7ea --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149342.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":1149342,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149343.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149343.json new file mode 100644 index 0000000000..5c1a7f8662 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1149343.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":1149343,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"firewallrule":{"id":172466,"protocol":"icmp","ipaddressid":34000,"ipaddress":"1.1.1.116","state":"Active","cidrlist":"192.168.0.0/16","icmptype":8,"icmpcode":0}}} } diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index b8bb279860..aade378bda 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -546,6 +546,72 @@ def test_ex_create_port_forwarding_rule(self): self.assertEqual(rule.private_port, private_port) self.assertEqual(rule.private_end_port, private_end_port) + def test_ex_list_firewall_rules(self): + rules = self.driver.ex_list_firewall_rules() + self.assertEqual(len(rules), 1) + rule = rules[0] + self.assertEqual(rule.address.address, '1.1.1.116') + self.assertEqual(rule.protocol, 'tcp') + self.assertEqual(rule.cidr_list, '192.168.0.0/16') + self.assertIsNone(rule.icmp_code) + self.assertIsNone(rule.icmp_type) + self.assertEqual(rule.start_port, '33') + self.assertEqual(rule.end_port, '34') + + def test_ex_list_firewall_rules_icmp(self): + CloudStackMockHttp.fixture_tag = 'firewallicmp' + rules = self.driver.ex_list_firewall_rules() + self.assertEqual(len(rules), 1) + rule = rules[0] + self.assertEqual(rule.address.address, '1.1.1.116') + self.assertEqual(rule.protocol, 'icmp') + self.assertEqual(rule.cidr_list, '192.168.0.0/16') + self.assertEqual(rule.icmp_code, 0) + self.assertEqual(rule.icmp_type, 8) + self.assertIsNone(rule.start_port) + self.assertIsNone(rule.end_port) + + def test_ex_delete_firewall_rule(self): + rules = self.driver.ex_list_firewall_rules() + res = self.driver.ex_delete_firewall_rule(rules[0]) + self.assertTrue(res) + + def test_ex_create_firewall_rule(self): + address = self.driver.ex_list_public_ips()[0] + cidr_list = '192.168.0.0/16' + protocol = 'TCP' + start_port = 33 + end_port = 34 + rule = self.driver.ex_create_firewall_rule(address, + cidr_list, + protocol, + start_port=start_port, + end_port=end_port) + self.assertEqual(rule.address, address) + self.assertEqual(rule.protocol, protocol) + self.assertIsNone(rule.icmp_code) + self.assertIsNone(rule.icmp_type) + self.assertEqual(rule.start_port, start_port) + self.assertEqual(rule.end_port, end_port) + + def test_ex_create_firewall_rule_icmp(self): + address = self.driver.ex_list_public_ips()[0] + cidr_list = '192.168.0.0/16' + protocol = 'icmp' + icmp_code = 0 + icmp_type = 8 + rule = self.driver.ex_create_firewall_rule(address, + cidr_list, + protocol, + icmp_code=icmp_code, + icmp_type=icmp_type) + self.assertEqual(rule.address, address) + self.assertEqual(rule.protocol, protocol) + self.assertEqual(rule.icmp_code, 0) + self.assertEqual(rule.icmp_type, 8) + self.assertIsNone(rule.start_port) + self.assertIsNone(rule.end_port) + def test_ex_list_port_forwarding_rules(self): rules = self.driver.ex_list_port_forwarding_rules() self.assertEqual(len(rules), 1) From fa709b14a06c8419e2110c99be3ac0dd376553f6 Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Sun, 13 Jul 2014 11:14:26 -0400 Subject: [PATCH 112/315] Updating CHANGES file to add fixes and features from the development trunk --- CHANGES.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index d7a78612a3..5ffdb692e2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,21 @@ Changelog ========= +Changes with Apache Libcloud in development +------------------------------------------- + +Compute +~~~~~~~ + +- Fix to join networks properly in ``deploy_node`` in the CloudStack + driver. + (LIBCLOUD-593, GITUHB-336) + [Atsushi Sasaki] + +- Create ``CloudStackFirewallRule`` class and corresponding methods. + (LIBCLOUD-594, GITHUB-337) + [Atsushi Sasaki] + Changes with Apache Libcloud 0.15.1 ----------------------------------- From fd008a02329d0222a96c55f3337dbc410ee23f9b Mon Sep 17 00:00:00 2001 From: Sebastien Goasguen Date: Mon, 14 Jul 2014 10:56:28 +0200 Subject: [PATCH 113/315] Improve release instructions --- docs/committer_guide.rst | 70 ++++++++++++++++++++++++++-------------- 1 file changed, 46 insertions(+), 24 deletions(-) diff --git a/docs/committer_guide.rst b/docs/committer_guide.rst index 3287868e2d..5f49c56d34 100644 --- a/docs/committer_guide.rst +++ b/docs/committer_guide.rst @@ -83,12 +83,15 @@ preparing a release. * Make sure tests pass on all the supported Python versions (``tox``) * Make sure ``CHANGES`` file is up to date * Make sure ``__version__`` string in ``libcloud/__init__.py`` is up to date +* Remove the ``tox`` directory with ``rm -rf .tox`` +* Remove the _secrets_ file with ``rm test/secrets.py`` 2. Creating release artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have a script that runs the required setup.py commands and then hashes -and signs the files. To run it: +and signs the files. You will need the latest version of ``pip`` and the ``wheel`` +package. To run it: .. sourcecode:: bash @@ -102,7 +105,33 @@ This should result in a set of ``apache-libcloud-${VERSION}.{tar.bz2,tar.gz,zip}{,asc,md5,sha1}`` files that are suitable to be uploaded for a release. -3. Uploading release artifacts to Apache servers +Copy the artifacts in another directory, unpack one of them and test it with ``tox``. + +3. Tagging a release +~~~~~~~~~~~~~~~~~~~~ + +Tag the tentative release with a ``-tentative`` postfix. + +.. sourcecode:: bash + + git tag + +For example: + +.. sourcecode:: bash + + git tag v0.15.0-tentative 105b9610835f99704996d861d613c5a9a8b3f8b1 + +4. Upload the release artifacts and start a [VOTE] thread +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Upload all release artifacts including the ``whl`` files to your people.apache.org +space. Then start a [VOTE] thread on the dev@libcloud.apache.org mailing list. + +Once the vote has passed tag the release with a new tag, removing the ``-tentative`` postfix. +Upload the release artifacts to Apache servers and Pypi. + +5. Uploading release artifacts to Apache servers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Add release artifacts to the dist SVN repository at @@ -117,20 +146,7 @@ are suitable to be uploaded for a release. are automatically archived and available at https://dist.apache.org/repos/dist/release/libcloud/. -4. Tagging a release -~~~~~~~~~~~~~~~~~~~~ - -.. sourcecode:: bash - - git tag - -For example: - -.. sourcecode:: bash - - git tag v0.13.0 105b9610835f99704996d861d613c5a9a8b3f8b1 - -5. Publishing package to PyPi +6. Publishing package to PyPi ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **For consistency and security reasons packages are always uploaded to PyPi @@ -156,7 +172,7 @@ screenshot below. :width: 700px :align: center -6. Verifying the release artifact check sums +7. Verifying the release artifact check sums ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To verify that nothing went wrong doing the release process, run the @@ -177,21 +193,25 @@ For example ./dist/verify_checksums.sh apache-libcloud-0.13.2 -7. Updating doap_libcloud.rdf file +8. Updating doap_libcloud.rdf file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add information about the new release to the ``doap_libcloud.rdf`` file in the root of the main code repository. -8. Updating website +9. Updating website ~~~~~~~~~~~~~~~~~~~ -* Update "News" page (``content/news.mdtext`` file) -* Update "Downloads" page (``content/downloads.mdtext`` file) -* Update "Get it" section in the sidebar (``templates/blocks/other.html`` file) +Check out the website using SVN: ``svn co https://svn.apache.org/repos/asf/libcloud/site/trunk`` + +* Upate the front page (``source/index.html`` file) +* Update "Downloads" page (``source/downloads.md`` file) +* Add a blog entry in the ``_posts`` directory. -9. Sending announcements -~~~~~~~~~~~~~~~~~~~~~~~~ +Build the site locally and make sure everything is correct. Check the ``README.md`` file. + +10. Sending announcements +~~~~~~~~~~~~~~~~~~~~~~~~~ * Send a release announcement to {dev,users}@libcloud.apache.org. If it's a major release also send it to announce@apache.org. @@ -235,6 +255,8 @@ Body:: Release artifacts can be found at . + KEYS file can found at https://dist.apache.org/repos/dist/release/libcloud/KEYS + Please test the release and post your votes. +/- 1 From 2c90e9e5d843b10ff935d8394dd08b7d6ec9596a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 20 Jul 2014 11:33:10 +0200 Subject: [PATCH 114/315] Add more badges to the readme. --- README.rst | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 4a0aa76ae6..ba1e5c6270 100644 --- a/README.rst +++ b/README.rst @@ -1,15 +1,24 @@ Apache Libcloud - a unified interface into the cloud ==================================================== -.. image:: https://badge.fury.io/py/apache-libcloud.png - :target: http://badge.fury.io/py/apache-libcloud +.. image:: https://pypip.in/version/apache-libcloud/badge.png + :target: https://pypi.python.org/pypi/apache-libcloud/ .. image:: https://pypip.in/d/apache-libcloud/badge.png - :target: https://crate.io/packages/apache-libcloud/ + :target: https://pypi.python.org/pypi/apache-libcloud/ .. image:: https://secure.travis-ci.org/apache/libcloud.png?branch=trunk :target: http://travis-ci.org/apache/libcloud +.. image:: https://pypip.in/py_versions/apache-libcloud/badge.png + :target: https://pypi.python.org/pypi/apache-libcloud/ + +.. image:: https://pypip.in/wheel/apache-libcloud/badge.png + :target: https://pypi.python.org/pypi/apache-libcloud/ + +.. image:: https://pypip.in/license/apache-libcloud/badge.png + :target: https://github.com/apache/libcloud/blob/trunk/LICENSE + Apache Libcloud is a Python library which hides differences between different cloud provider APIs and allows you to manage different cloud resources through a unified and easy to use API. From d12def1d8f428465d7f8cbad6abfc5286f6a8d9b Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Thu, 17 Jul 2014 16:26:41 +0000 Subject: [PATCH 115/315] adding coreos and opensuse to GCE images Closes #340 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index c216c64179..28bcb485dd 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -2318,6 +2318,10 @@ def ex_get_image(self, partial_name): image = self._match_images('windows-cloud', partial_name) elif partial_name.startswith('container-vm'): image = self._match_images('google-containers', partial_name) + elif partial_name.startswith('coreos'): + image = self._match_images('coreos-cloud', partial_name) + elif partial_name.startswith('opensuse'): + image = self._match_images('opensuse-cloud', partial_name) return image From a26da95ef1d532ba2379eafc707c358922d462e3 Mon Sep 17 00:00:00 2001 From: Lee Verberne Date: Thu, 10 Jul 2014 13:31:34 -0700 Subject: [PATCH 116/315] Support GCE LB session affinity Add support for sessionAffinity to the GCE Load Balancer driver. sessionAffinity has the effect of causing clients to "stick" to a particular backend server. Also, trivial documentation fixes: * resolve ambiguous type specification in create_balancer * fix typo in development doc Closes #341 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 9 +++++++++ docs/development.rst | 4 ++-- libcloud/compute/drivers/gce.py | 9 ++++++++- libcloud/loadbalancer/drivers/gce.py | 13 ++++++++++--- ...entral1_targetPools_lctargetpool_sticky.json | 9 +++++++++ libcloud/test/compute/test_gce.py | 17 +++++++++++++++++ 6 files changed, 55 insertions(+), 6 deletions(-) create mode 100644 libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_sticky.json diff --git a/CHANGES.rst b/CHANGES.rst index 5ffdb692e2..17999cc9c8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,6 +16,15 @@ Compute (LIBCLOUD-594, GITHUB-337) [Atsushi Sasaki] + +Loadbalancer +~~~~~~~~~~~~ + +- Allow user to specify session affinity algorithm in the GCE driver by passing + ``ex_session_affinity`` argument to the ``create_balancer`` method. + (LIBCLOUD-595, GITHUB-341) + [Lee Verberne, Eric Johnson] + Changes with Apache Libcloud 0.15.1 ----------------------------------- diff --git a/docs/development.rst b/docs/development.rst index 3bd9ea4c6c..8899df9d49 100644 --- a/docs/development.rst +++ b/docs/development.rst @@ -212,8 +212,8 @@ Bad (please avoid): description = kwargs.get('description', None) public_ips = kwargs.get('public_ips', None) -5. When returning a dictionary, document it's structure -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +5. When returning a dictionary, document its structure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Dynamic nature of Python can be very nice and useful, but if (ab)use it in a wrong way it can also make it hard for the API consumer to understand what is diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 28bcb485dd..62164fd502 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1350,7 +1350,7 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, return node_list def ex_create_targetpool(self, name, region=None, healthchecks=None, - nodes=None): + nodes=None, session_affinity=None): """ Create a target pool. @@ -1367,6 +1367,10 @@ def ex_create_targetpool(self, name, region=None, healthchecks=None, :keyword nodes: Optional list of nodes to attach to the pool :type nodes: ``list`` of ``str`` or :class:`Node` + :keyword session_affinity: Optional algorithm to use for session + affinity. + :type session_affinity: ``str`` + :return: Target Pool object :rtype: :class:`GCETargetPool` """ @@ -1391,6 +1395,8 @@ def ex_create_targetpool(self, name, region=None, healthchecks=None, else: node_list = [n.extra['selfLink'] for n in nodes] targetpool_data['instances'] = node_list + if session_affinity: + targetpool_data['sessionAffinity'] = session_affinity request = '/regions/%s/targetPools' % (region.name) @@ -3316,6 +3322,7 @@ def _to_targetpool(self, targetpool): extra = {} extra['selfLink'] = targetpool.get('selfLink') extra['description'] = targetpool.get('description') + extra['sessionAffinity'] = targetpool.get('sessionAffinity') region = self.ex_get_region(targetpool['region']) healthcheck_list = [self.ex_get_healthcheck(h.split('/')[-1]) for h in targetpool.get('healthChecks', [])] diff --git a/libcloud/loadbalancer/drivers/gce.py b/libcloud/loadbalancer/drivers/gce.py index 09a32fdb6d..e1fa29956d 100644 --- a/libcloud/loadbalancer/drivers/gce.py +++ b/libcloud/loadbalancer/drivers/gce.py @@ -89,7 +89,8 @@ def list_balancers(self, ex_region=None): return balancers def create_balancer(self, name, port, protocol, algorithm, members, - ex_region=None, ex_healthchecks=None, ex_address=None): + ex_region=None, ex_healthchecks=None, ex_address=None, + ex_session_affinity=None): """ Create a new load balancer instance. @@ -126,12 +127,18 @@ def create_balancer(self, name, port, protocol, algorithm, members, :keyword ex_healthchecks: Optional list of healthcheck objects or names to add to the load balancer. :type ex_healthchecks: ``list`` of :class:`GCEHealthCheck` or - ``str`` + ``list`` of ``str`` :keyword ex_address: Optional static address object to be assigned to the load balancer. :type ex_address: C{GCEAddress} + :keyword ex_session_affinity: Optional algorithm to use for session + affinity. This will modify the hashing + algorithm such that a client will tend + to stick to a particular Member. + :type ex_session_affinity: ``str`` + :return: LoadBalancer object :rtype: :class:`LoadBalancer` """ @@ -154,7 +161,7 @@ def create_balancer(self, name, port, protocol, algorithm, members, tp_name = '%s-tp' % name targetpool = self.gce.ex_create_targetpool( tp_name, region=ex_region, healthchecks=ex_healthchecks, - nodes=node_list) + nodes=node_list, session_affinity=ex_session_affinity) # Create the Forwarding rule, but if it fails, delete the target pool. try: diff --git a/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_sticky.json b/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_sticky.json new file mode 100644 index 0000000000..caf3f87379 --- /dev/null +++ b/libcloud/test/compute/fixtures/gce/regions_us-central1_targetPools_lctargetpool_sticky.json @@ -0,0 +1,9 @@ +{ + "creationTimestamp": "2014-07-11T15:52:43.720-07:00", + "id": "13598380121688918358", + "kind": "compute#targetPool", + "name": "lctargetpool-sticky", + "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", + "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/targetPools/lctargetpool-sticky", + "sessionAffinity": "CLIENT_IP_PROTO" +} \ No newline at end of file diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index faf6303601..963675bb23 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -327,6 +327,17 @@ def test_ex_create_targetpool(self): self.assertEqual(len(targetpool.nodes), len(nodes)) self.assertEqual(targetpool.region.name, region) + def test_ex_create_targetpool_session_affinity(self): + targetpool_name = 'lctargetpool-sticky' + region = 'us-central1' + session_affinity = 'CLIENT_IP_PROTO' + targetpool = self.driver.ex_create_targetpool( + targetpool_name, region=region, + session_affinity=session_affinity) + self.assertEqual(targetpool.name, targetpool_name) + self.assertEqual(targetpool.extra.get('sessionAffinity'), + session_affinity) + def test_ex_create_volume_snapshot(self): snapshot_name = 'lcsnapshot' volume = self.driver.ex_get_volume('lcdisk') @@ -1038,6 +1049,12 @@ def _regions_us_central1_targetPools_lctargetpool(self, method, url, 'regions_us-central1_targetPools_lctargetpool.json') return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + def _regions_us_central1_targetPools_lctargetpool_sticky(self, method, url, + body, headers): + body = self.fixtures.load( + 'regions_us-central1_targetPools_lctargetpool_sticky.json') + return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK]) + def _regions_us_central1_targetPools_libcloud_lb_demo_lb_tp( self, method, url, body, headers): body = self.fixtures.load( From e3edf278c3310bd93f5d4e463bcc0373a38d5401 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Wed, 16 Jul 2014 15:40:12 +0000 Subject: [PATCH 117/315] Add SSD support to GCE Compute driver Closes #339 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 3 + demos/gce_demo.py | 6 +- libcloud/compute/drivers/gce.py | 71 +- libcloud/compute/drivers/gce.py.orig | 3353 +++++++++++++++++ .../gce/global_snapshots_lcsnapshot.json | 4 +- .../gce/zones_us-central1-a_disks.json | 8 +- .../gce/zones_us-central1-a_disks_lcdisk.json | 6 +- libcloud/test/compute/test_gce.py | 17 +- 8 files changed, 3445 insertions(+), 23 deletions(-) create mode 100644 libcloud/compute/drivers/gce.py.orig diff --git a/CHANGES.rst b/CHANGES.rst index 17999cc9c8..bef4c9d103 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -16,6 +16,9 @@ Compute (LIBCLOUD-594, GITHUB-337) [Atsushi Sasaki] +- Add support for SSD disks to Google Compute driver. + (GITHUB-339) + [Eric Johnson] Loadbalancer ~~~~~~~~~~~~ diff --git a/demos/gce_demo.py b/demos/gce_demo.py index 5f45c68525..3535689d63 100755 --- a/demos/gce_demo.py +++ b/demos/gce_demo.py @@ -190,16 +190,16 @@ def main(): # == Create Node with disk auto-created == if MAX_NODES > 1: - print('Creating Node with auto-created disk:') + print('Creating Node with auto-created SSD:') name = '%s-np-node' % DEMO_BASE_NAME node_1 = gce.create_node(name, 'n1-standard-1', 'debian-7', - ex_tags=['libcloud']) + ex_tags=['libcloud'], ex_disk_type='pd-ssd') print(' Node %s created' % name) # == Create, and attach a disk == print('Creating a new disk:') disk_name = '%s-attach-disk' % DEMO_BASE_NAME - volume = gce.create_volume(1, disk_name) + volume = gce.create_volume(10, disk_name) if volume.attach(node_1): print (' Attached %s to %s' % (volume.name, node_1.name)) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 62164fd502..a6f3c9f157 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1140,7 +1140,8 @@ def ex_create_network(self, name, cidr): def create_node(self, name, size, image, location=None, ex_network='default', ex_tags=None, ex_metadata=None, ex_boot_disk=None, use_existing_disk=True, - external_ip='ephemeral'): + external_ip='ephemeral', ex_disk_type='pd-standard', + ex_disk_auto_delete=True): """ Create a new node and return a node object for the node. @@ -1182,6 +1183,15 @@ def create_node(self, name, size, image, location=None, a GCEAddress object should be passed in. :type external_ip: :class:`GCEAddress` or ``str`` or None + :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd + for an SSD disk. + :type ex_disk_type: ``str`` + + :keyword ex_disk_auto_delete: Indicate that the boot disk should be + deleted when the Node is deleted. Set to + True by default. + :type ex_disk_auto_delete: ``bool`` + :return: A Node object for the new node. :rtype: :class:`Node` """ @@ -1198,7 +1208,8 @@ def create_node(self, name, size, image, location=None, if not ex_boot_disk: ex_boot_disk = self.create_volume(None, name, location=location, image=image, - use_existing=use_existing_disk) + use_existing=use_existing_disk, + ex_disk_type=ex_disk_type) if ex_metadata is not None: ex_metadata = {"items": [{"key": k, "value": v} @@ -1207,7 +1218,9 @@ def create_node(self, name, size, image, location=None, request, node_data = self._create_node_req(name, size, image, location, ex_network, ex_tags, ex_metadata, - ex_boot_disk, external_ip) + ex_boot_disk, external_ip, + ex_disk_type, + ex_disk_auto_delete) self.connection.async_request(request, method='POST', data=node_data) return self.ex_get_node(name, location.name) @@ -1217,6 +1230,8 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, ex_tags=None, ex_metadata=None, ignore_errors=True, use_existing_disk=True, poll_interval=2, external_ip='ephemeral', + ex_disk_type='pd-standard', + ex_auto_disk_delete=True, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): """ Create multiple nodes and return a list of Node objects. @@ -1273,6 +1288,15 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, multiple node creation.) :type external_ip: ``str`` or None + :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd + for an SSD disk. + :type ex_disk_type: ``str`` + + :keyword ex_disk_auto_delete: Indicate that the boot disk should be + deleted when the Node is deleted. Set to + True by default. + :type ex_disk_auto_delete: ``bool`` + :keyword timeout: The number of seconds to wait for all nodes to be created before timing out. :type timeout: ``int`` @@ -1298,7 +1322,8 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, 'metadata': ex_metadata, 'ignore_errors': ignore_errors, 'use_existing_disk': use_existing_disk, - 'external_ip': external_ip} + 'external_ip': external_ip, + 'ex_disk_type': ex_disk_type} # List for holding the status information for disk/node creation. status_list = [] @@ -1406,7 +1431,8 @@ def ex_create_targetpool(self, name, region=None, healthchecks=None, return self.ex_get_targetpool(name, region) def create_volume(self, size, name, location=None, snapshot=None, - image=None, use_existing=True): + image=None, use_existing=True, + ex_disk_type='pd-standard'): """ Create a volume (disk). @@ -1432,11 +1458,15 @@ def create_volume(self, size, name, location=None, snapshot=None, of attempting to create a new disk. :type use_existing: ``bool`` + :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd + for an SSD disk. + :type ex_disk_type: ``str`` + :return: Storage Volume object :rtype: :class:`StorageVolume` """ request, volume_data, params = self._create_vol_req( - size, name, location, snapshot, image) + size, name, location, snapshot, image, ex_disk_type) try: self.connection.async_request(request, method='POST', data=volume_data, params=params) @@ -2716,7 +2746,8 @@ def _set_zone(self, zone): def _create_node_req(self, name, size, image, location, network, tags=None, metadata=None, boot_disk=None, - external_ip='ephemeral'): + external_ip='ephemeral', ex_disk_type='pd-standard', + ex_disk_auto_delete=True): """ Returns a request and body to create a new node. This is a helper method to support both :class:`create_node` and @@ -2754,6 +2785,15 @@ def _create_node_req(self, name, size, image, location, network, a GCEAddress object should be passed in. :type external_ip: :class:`GCEAddress` or ``str`` or None + :keyword ex_disk_type: Specify a pd-standard (default) disk or pd-ssd + for an SSD disk. + :type ex_disk_type: ``str`` + + :keyword ex_disk_auto_delete: Indicate that the boot disk should be + deleted when the Node is deleted. Set to + True by default. + :type ex_disk_auto_delete: ``bool`` + :return: A tuple containing a request string and a node_data dict. :rtype: ``tuple`` of ``str`` and ``dict`` """ @@ -2799,7 +2839,7 @@ def _multi_create_disk(self, status, node_attrs): :type status: ``dict`` :param node_attrs: Dictionary for holding node attribute information. - (size, image, location, etc.) + (size, image, location, ex_disk_type, etc.) :type node_attrs: ``dict`` """ disk = None @@ -2818,7 +2858,8 @@ def _multi_create_disk(self, status, node_attrs): # Or, if there is an error, mark as failed. disk_req, disk_data, disk_params = self._create_vol_req( None, status['name'], location=node_attrs['location'], - image=node_attrs['image']) + image=node_attrs['image'], + ex_disk_type=node_attrs['ex_disk_type']) try: disk_res = self.connection.request( disk_req, method='POST', data=disk_data, @@ -2926,7 +2967,7 @@ def _multi_check_node(self, status, node_attrs): node_attrs['location']) def _create_vol_req(self, size, name, location=None, snapshot=None, - image=None): + image=None, ex_disk_type='pd-standard'): """ Assemble the request/data for creating a volume. @@ -2949,6 +2990,9 @@ def _create_vol_req(self, size, name, location=None, snapshot=None, :keyword image: Image to create disk from. :type image: :class:`GCENodeImage` or ``str`` or ``None`` + :keyword ex_disk_type: Specify pd-standard (default) or pd-ssd + :type ex_disk_type: ``str`` + :return: Tuple containing the request string, the data dictionary and the URL parameters :rtype: ``tuple`` @@ -2976,6 +3020,12 @@ def _create_vol_req(self, size, name, location=None, snapshot=None, location = location or self.zone if not hasattr(location, 'name'): location = self.ex_get_zone(location) + if ex_disk_type.startswith('https'): + volume_data['type'] = ex_disk_type + else: + volume_data['type'] = 'https://www.googleapis.com/compute/' + volume_data['type'] += '%s/projects/%s/zones/%s/diskTypes/%s' % ( + API_VERSION, self.project, location.name, ex_disk_type) request = '/zones/%s/disks' % (location.name) return request, volume_data, params @@ -3305,6 +3355,7 @@ def _to_storage_volume(self, volume): extra['status'] = volume.get('status') extra['creationTimestamp'] = volume.get('creationTimestamp') extra['description'] = volume.get('description') + extra['type'] = volume.get('type', 'pd-standard').split('/')[-1] return StorageVolume(id=volume['id'], name=volume['name'], size=volume['sizeGb'], driver=self, extra=extra) diff --git a/libcloud/compute/drivers/gce.py.orig b/libcloud/compute/drivers/gce.py.orig new file mode 100644 index 0000000000..c216c64179 --- /dev/null +++ b/libcloud/compute/drivers/gce.py.orig @@ -0,0 +1,3353 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Module for Google Compute Engine Driver. +""" +from __future__ import with_statement + +import datetime +import time +import sys + +from libcloud.common.google import GoogleResponse +from libcloud.common.google import GoogleBaseConnection +from libcloud.common.google import GoogleBaseError +from libcloud.common.google import ResourceNotFoundError +from libcloud.common.google import ResourceExistsError + +from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation +from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot +from libcloud.compute.base import UuidMixin +from libcloud.compute.providers import Provider +from libcloud.compute.types import NodeState + +API_VERSION = 'v1' +DEFAULT_TASK_COMPLETION_TIMEOUT = 180 + + +def timestamp_to_datetime(timestamp): + """ + Return a datetime object that corresponds to the time in an RFC3339 + timestamp. + + :param timestamp: RFC3339 timestamp string + :type timestamp: ``str`` + + :return: Datetime object corresponding to timestamp + :rtype: :class:`datetime.datetime` + """ + # We remove timezone offset and microseconds (Python 2.5 strptime doesn't + # support %f) + ts = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S') + tz_hours = int(timestamp[-5:-3]) + tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + '1') + tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins) + return ts + tz_delta + + +class GCEResponse(GoogleResponse): + pass + + +class GCEConnection(GoogleBaseConnection): + """Connection class for the GCE driver.""" + host = 'www.googleapis.com' + responseCls = GCEResponse + + def __init__(self, user_id, key, secure, auth_type=None, + credential_file=None, project=None, **kwargs): + super(GCEConnection, self).__init__(user_id, key, secure=secure, + auth_type=auth_type, + credential_file=credential_file, + **kwargs) + self.request_path = '/compute/%s/projects/%s' % (API_VERSION, + project) + + +class GCEAddress(UuidMixin): + """A GCE Static address.""" + def __init__(self, id, name, address, region, driver, extra=None): + self.id = str(id) + self.name = name + self.address = address + self.region = region + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this address. + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_address(address=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.address) + + +class GCEFailedDisk(object): + """Dummy Node object for disks that are not created.""" + def __init__(self, name, error, code): + self.name = name + self.error = error + self.code = code + + def __repr__(self): + return '' % ( + self.name, self.code) + + +class GCEFailedNode(object): + """Dummy Node object for nodes that are not created.""" + def __init__(self, name, error, code): + self.name = name + self.error = error + self.code = code + + def __repr__(self): + return '' % ( + self.name, self.code) + + +class GCEHealthCheck(UuidMixin): + """A GCE Http Health Check class.""" + def __init__(self, id, name, path, port, interval, timeout, + unhealthy_threshold, healthy_threshold, driver, extra=None): + self.id = str(id) + self.name = name + self.path = path + self.port = port + self.interval = interval + self.timeout = timeout + self.unhealthy_threshold = unhealthy_threshold + self.healthy_threshold = healthy_threshold + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this Health Check. + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_healthcheck(healthcheck=self) + + def update(self): + """ + Commit updated healthcheck values. + + :return: Updated Healthcheck object + :rtype: :class:`GCEHealthcheck` + """ + return self.driver.ex_update_healthcheck(healthcheck=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.path, self.port) + + +class GCEFirewall(UuidMixin): + """A GCE Firewall rule class.""" + def __init__(self, id, name, allowed, network, source_ranges, source_tags, + target_tags, driver, extra=None): + self.id = str(id) + self.name = name + self.network = network + self.allowed = allowed + self.source_ranges = source_ranges + self.source_tags = source_tags + self.target_tags = target_tags + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this firewall. + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_firewall(firewall=self) + + def update(self): + """ + Commit updated firewall values. + + :return: Updated Firewall object + :rtype: :class:`GCEFirewall` + """ + return self.driver.ex_update_firewall(firewall=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.network.name) + + +class GCEForwardingRule(UuidMixin): + def __init__(self, id, name, region, address, protocol, targetpool, driver, + extra=None): + self.id = str(id) + self.name = name + self.region = region + self.address = address + self.protocol = protocol + self.targetpool = targetpool + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this Forwarding Rule + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_forwarding_rule(forwarding_rule=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.address) + + +class GCENodeImage(NodeImage): + """A GCE Node Image class.""" + def __init__(self, id, name, driver, extra=None): + super(GCENodeImage, self).__init__(id, name, driver, extra=extra) + + def delete(self): + """ + Delete this image + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_delete_image(image=self) + + def deprecate(self, replacement, state): + """ + Deprecate this image + + :param replacement: Image to use as a replacement + :type replacement: ``str`` or :class: `GCENodeImage` + + :param state: Deprecation state of this image. Possible values include + \'DELETED\', \'DEPRECATED\' or \'OBSOLETE\'. + :type state: ``str`` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_deprecate_image(self, replacement, state) + + +class GCENetwork(UuidMixin): + """A GCE Network object class.""" + def __init__(self, id, name, cidr, driver, extra=None): + self.id = str(id) + self.name = name + self.cidr = cidr + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def destroy(self): + """ + Destroy this network + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_network(network=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.cidr) + + +class GCENodeSize(NodeSize): + """A GCE Node Size (MachineType) class.""" + def __init__(self, id, name, ram, disk, bandwidth, price, driver, + extra=None): + self.extra = extra + super(GCENodeSize, self).__init__(id, name, ram, disk, bandwidth, + price, driver, extra=extra) + + +class GCEProject(UuidMixin): + """GCE Project information.""" + def __init__(self, id, name, metadata, quotas, driver, extra=None): + self.id = str(id) + self.name = name + self.metadata = metadata + self.quotas = quotas + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % (self.id, self.name) + + +class GCERegion(UuidMixin): + def __init__(self, id, name, status, zones, quotas, deprecated, driver, + extra=None): + self.id = str(id) + self.name = name + self.status = status + self.zones = zones + self.quotas = quotas + self.deprecated = deprecated + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.status) + + +class GCESnapshot(VolumeSnapshot): + def __init__(self, id, name, size, status, driver, extra=None): + self.name = name + self.status = status + super(GCESnapshot, self).__init__(id, driver, size, extra) + + +class GCETargetPool(UuidMixin): + def __init__(self, id, name, region, healthchecks, nodes, driver, + extra=None): + self.id = str(id) + self.name = name + self.region = region + self.healthchecks = healthchecks + self.nodes = nodes + self.driver = driver + self.extra = extra + UuidMixin.__init__(self) + + def add_node(self, node): + """ + Add a node to this target pool. + + :param node: Node to add + :type node: ``str`` or :class:`Node` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_add_node(targetpool=self, node=node) + + def remove_node(self, node): + """ + Remove a node from this target pool. + + :param node: Node to remove + :type node: ``str`` or :class:`Node` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_remove_node(targetpool=self, + node=node) + + def add_healthcheck(self, healthcheck): + """ + Add a healthcheck to this target pool. + + :param healthcheck: Healthcheck to add + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_add_healthcheck( + targetpool=self, healthcheck=healthcheck) + + def remove_healthcheck(self, healthcheck): + """ + Remove a healthcheck from this target pool. + + :param healthcheck: Healthcheck to remove + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_targetpool_remove_healthcheck( + targetpool=self, healthcheck=healthcheck) + + def destroy(self): + """ + Destroy this Target Pool + + :return: True if successful + :rtype: ``bool`` + """ + return self.driver.ex_destroy_targetpool(targetpool=self) + + def __repr__(self): + return '' % ( + self.id, self.name, self.region.name) + + +class GCEZone(NodeLocation): + """Subclass of NodeLocation to provide additional information.""" + def __init__(self, id, name, status, maintenance_windows, deprecated, + driver, extra=None): + self.status = status + self.maintenance_windows = maintenance_windows + self.deprecated = deprecated + self.extra = extra + country = name.split('-')[0] + super(GCEZone, self).__init__(id=str(id), name=name, country=country, + driver=driver) + + @property + def time_until_mw(self): + """ + Returns the time until the next Maintenance Window as a + datetime.timedelta object. + """ + return self._get_time_until_mw() + + @property + def next_mw_duration(self): + """ + Returns the duration of the next Maintenance Window as a + datetime.timedelta object. + """ + return self._get_next_mw_duration() + + def _now(self): + """ + Returns current UTC time. + + Can be overridden in unittests. + """ + return datetime.datetime.utcnow() + + def _get_next_maint(self): + """ + Returns the next Maintenance Window. + + :return: A dictionary containing maintenance window info (or None if + no maintenance windows are scheduled) + The dictionary contains 4 keys with values of type ``str`` + - name: The name of the maintenance window + - description: Description of the maintenance window + - beginTime: RFC3339 Timestamp + - endTime: RFC3339 Timestamp + :rtype: ``dict`` or ``None`` + """ + begin = None + next_window = None + if not self.maintenance_windows: + return None + if len(self.maintenance_windows) == 1: + return self.maintenance_windows[0] + for mw in self.maintenance_windows: + begin_next = timestamp_to_datetime(mw['beginTime']) + if (not begin) or (begin_next < begin): + begin = begin_next + next_window = mw + return next_window + + def _get_time_until_mw(self): + """ + Returns time until next maintenance window. + + :return: Time until next maintenance window (or None if no + maintenance windows are scheduled) + :rtype: :class:`datetime.timedelta` or ``None`` + """ + next_window = self._get_next_maint() + if not next_window: + return None + now = self._now() + next_begin = timestamp_to_datetime(next_window['beginTime']) + return next_begin - now + + def _get_next_mw_duration(self): + """ + Returns the duration of the next maintenance window. + + :return: Duration of next maintenance window (or None if no + maintenance windows are scheduled) + :rtype: :class:`datetime.timedelta` or ``None`` + """ + next_window = self._get_next_maint() + if not next_window: + return None + next_begin = timestamp_to_datetime(next_window['beginTime']) + next_end = timestamp_to_datetime(next_window['endTime']) + return next_end - next_begin + + def __repr__(self): + return '' % (self.id, self.name, + self.status) + + +class GCENodeDriver(NodeDriver): + """ + GCE Node Driver class. + + This is the primary driver for interacting with Google Compute Engine. It + contains all of the standard libcloud methods, plus additional ex_* methods + for more features. + + Note that many methods allow either objects or strings (or lists of + objects/strings). In most cases, passing strings instead of objects will + result in additional GCE API calls. + """ + connectionCls = GCEConnection + api_name = 'googleapis' + name = "Google Compute Engine" + type = Provider.GCE + website = 'https://cloud.google.com/' + + NODE_STATE_MAP = { + "PROVISIONING": NodeState.PENDING, + "STAGING": NodeState.PENDING, + "RUNNING": NodeState.RUNNING, + "STOPPED": NodeState.TERMINATED, + "STOPPING": NodeState.TERMINATED, + "TERMINATED": NodeState.TERMINATED + } + + def __init__(self, user_id, key, datacenter=None, project=None, + auth_type=None, scopes=None, **kwargs): + """ + :param user_id: The email address (for service accounts) or Client ID + (for installed apps) to be used for authentication. + :type user_id: ``str`` + + :param key: The RSA Key (for service accounts) or file path containing + key or Client Secret (for installed apps) to be used for + authentication. + :type key: ``str`` + + :keyword datacenter: The name of the datacenter (zone) used for + operations. + :type datacenter: ``str`` + + :keyword project: Your GCE project name. (required) + :type project: ``str`` + + :keyword auth_type: Accepted values are "SA" or "IA" + ("Service Account" or "Installed Application"). + If not supplied, auth_type will be guessed based + on value of user_id. + :type auth_type: ``str`` + + :keyword scopes: List of authorization URLs. Default is empty and + grants read/write to Compute, Storage, DNS. + :type scopes: ``list`` + """ + self.auth_type = auth_type + self.project = project + self.scopes = scopes + if not self.project: + raise ValueError('Project name must be specified using ' + '"project" keyword.') + super(GCENodeDriver, self).__init__(user_id, key, **kwargs) + + # Cache Zone and Region information to reduce API calls and + # increase speed + self.base_path = '/compute/%s/projects/%s' % (API_VERSION, + self.project) + self.zone_list = self.ex_list_zones() + self.zone_dict = {} + for zone in self.zone_list: + self.zone_dict[zone.name] = zone + if datacenter: + self.zone = self.ex_get_zone(datacenter) + else: + self.zone = None + + self.region_list = self.ex_list_regions() + self.region_dict = {} + for region in self.region_list: + self.region_dict[region.name] = region + + if self.zone: + self.region = self._get_region_from_zone(self.zone) + else: + self.region = None + + def ex_list_addresses(self, region=None): + """ + Return a list of static addresses for a region or all. + + :keyword region: The region to return addresses from. For example: + 'us-central1'. If None, will return addresses from + region of self.zone. If 'all', will return all + addresses. + :type region: ``str`` or ``None`` + + :return: A list of static address objects. + :rtype: ``list`` of :class:`GCEAddress` + """ + list_addresses = [] + region = self._set_region(region) + if region is None: + request = '/aggregated/addresses' + else: + request = '/regions/%s/addresses' % (region.name) + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_addresses = [self._to_address(a) for a in + v.get('addresses', [])] + list_addresses.extend(region_addresses) + else: + list_addresses = [self._to_address(a) for a in + response['items']] + return list_addresses + + def ex_list_healthchecks(self): + """ + Return the list of health checks. + + :return: A list of health check objects. + :rtype: ``list`` of :class:`GCEHealthCheck` + """ + list_healthchecks = [] + request = '/global/httpHealthChecks' + response = self.connection.request(request, method='GET').object + list_healthchecks = [self._to_healthcheck(h) for h in + response.get('items', [])] + return list_healthchecks + + def ex_list_firewalls(self): + """ + Return the list of firewalls. + + :return: A list of firewall objects. + :rtype: ``list`` of :class:`GCEFirewall` + """ + list_firewalls = [] + request = '/global/firewalls' + response = self.connection.request(request, method='GET').object + list_firewalls = [self._to_firewall(f) for f in + response.get('items', [])] + return list_firewalls + + def ex_list_forwarding_rules(self, region=None): + """ + Return the list of forwarding rules for a region or all. + + :keyword region: The region to return forwarding rules from. For + example: 'us-central1'. If None, will return + forwarding rules from the region of self.region + (which is based on self.zone). If 'all', will + return all forwarding rules. + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :return: A list of forwarding rule objects. + :rtype: ``list`` of :class:`GCEForwardingRule` + """ + list_forwarding_rules = [] + region = self._set_region(region) + if region is None: + request = '/aggregated/forwardingRules' + else: + request = '/regions/%s/forwardingRules' % (region.name) + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_forwarding_rules = [self._to_forwarding_rule(f) for + f in v.get('forwardingRules', + [])] + list_forwarding_rules.extend(region_forwarding_rules) + else: + list_forwarding_rules = [self._to_forwarding_rule(f) for f in + response['items']] + return list_forwarding_rules + + def list_images(self, ex_project=None): + """ + Return a list of image objects for a project. + + :keyword ex_project: Optional alternate project name. + :type ex_project: ``str`` or ``None`` + + :return: List of GCENodeImage objects + :rtype: ``list`` of :class:`GCENodeImage` + """ + request = '/global/images' + if ex_project is None: + response = self.connection.request(request, method='GET').object + else: + # Save the connection request_path + save_request_path = self.connection.request_path + # Override the connection request path + new_request_path = save_request_path.replace(self.project, + ex_project) + self.connection.request_path = new_request_path + response = self.connection.request(request, method='GET').object + # Restore the connection request_path + self.connection.request_path = save_request_path + list_images = [self._to_node_image(i) for i in + response.get('items', [])] + return list_images + + def list_locations(self): + """ + Return a list of locations (zones). + + The :class:`ex_list_zones` method returns more comprehensive results, + but this is here for compatibility. + + :return: List of NodeLocation objects + :rtype: ``list`` of :class:`NodeLocation` + """ + list_locations = [] + request = '/zones' + response = self.connection.request(request, method='GET').object + list_locations = [self._to_node_location(l) for l in response['items']] + return list_locations + + def ex_list_networks(self): + """ + Return the list of networks. + + :return: A list of network objects. + :rtype: ``list`` of :class:`GCENetwork` + """ + list_networks = [] + request = '/global/networks' + response = self.connection.request(request, method='GET').object + list_networks = [self._to_network(n) for n in + response.get('items', [])] + return list_networks + + def list_nodes(self, ex_zone=None): + """ + Return a list of nodes in the current zone or all zones. + + :keyword ex_zone: Optional zone name or 'all' + :type ex_zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: List of Node objects + :rtype: ``list`` of :class:`Node` + """ + list_nodes = [] + zone = self._set_zone(ex_zone) + if zone is None: + request = '/aggregated/instances' + else: + request = '/zones/%s/instances' % (zone.name) + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_nodes = [self._to_node(i) for i in + v.get('instances', [])] + list_nodes.extend(zone_nodes) + else: + list_nodes = [self._to_node(i) for i in response['items']] + return list_nodes + + def ex_list_regions(self): + """ + Return the list of regions. + + :return: A list of region objects. + :rtype: ``list`` of :class:`GCERegion` + """ + list_regions = [] + request = '/regions' + response = self.connection.request(request, method='GET').object + list_regions = [self._to_region(r) for r in response['items']] + return list_regions + + def list_sizes(self, location=None): + """ + Return a list of sizes (machineTypes) in a zone. + + :keyword location: Location or Zone for sizes + :type location: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: List of GCENodeSize objects + :rtype: ``list`` of :class:`GCENodeSize` + """ + list_sizes = [] + zone = self._set_zone(location) + if zone is None: + request = '/aggregated/machineTypes' + else: + request = '/zones/%s/machineTypes' % (zone.name) + + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_sizes = [self._to_node_size(s) for s in + v.get('machineTypes', [])] + list_sizes.extend(zone_sizes) + else: + list_sizes = [self._to_node_size(s) for s in response['items']] + return list_sizes + + def ex_list_snapshots(self): + """ + Return the list of disk snapshots in the project. + + :return: A list of snapshot objects + :rtype: ``list`` of :class:`GCESnapshot` + """ + list_snapshots = [] + request = '/global/snapshots' + response = self.connection.request(request, method='GET').object + list_snapshots = [self._to_snapshot(s) for s in + response.get('items', [])] + return list_snapshots + + def ex_list_targetpools(self, region=None): + """ + Return the list of target pools. + + :return: A list of target pool objects + :rtype: ``list`` of :class:`GCETargetPool` + """ + list_targetpools = [] + region = self._set_region(region) + if region is None: + request = '/aggregated/targetPools' + else: + request = '/regions/%s/targetPools' % (region.name) + response = self.connection.request(request, method='GET').object + + if 'items' in response: + # The aggregated result returns dictionaries for each region + if region is None: + for v in response['items'].values(): + region_targetpools = [self._to_targetpool(t) for t in + v.get('targetPools', [])] + list_targetpools.extend(region_targetpools) + else: + list_targetpools = [self._to_targetpool(t) for t in + response['items']] + return list_targetpools + + def list_volumes(self, ex_zone=None): + """ + Return a list of volumes for a zone or all. + + Will return list from provided zone, or from the default zone unless + given the value of 'all'. + + :keyword ex_zone: The zone to return volumes from. + :type ex_zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: A list of volume objects. + :rtype: ``list`` of :class:`StorageVolume` + """ + list_volumes = [] + zone = self._set_zone(ex_zone) + if zone is None: + request = '/aggregated/disks' + else: + request = '/zones/%s/disks' % (zone.name) + + response = self.connection.request(request, method='GET').object + if 'items' in response: + # The aggregated response returns a dict for each zone + if zone is None: + for v in response['items'].values(): + zone_volumes = [self._to_storage_volume(d) for d in + v.get('disks', [])] + list_volumes.extend(zone_volumes) + else: + list_volumes = [self._to_storage_volume(d) for d in + response['items']] + return list_volumes + + def ex_list_zones(self): + """ + Return the list of zones. + + :return: A list of zone objects. + :rtype: ``list`` of :class:`GCEZone` + """ + list_zones = [] + request = '/zones' + response = self.connection.request(request, method='GET').object + list_zones = [self._to_zone(z) for z in response['items']] + return list_zones + + def ex_create_address(self, name, region=None): + """ + Create a static address in a region. + + :param name: Name of static address + :type name: ``str`` + + :keyword region: Name of region for the address (e.g. 'us-central1') + :type region: ``str`` or :class:`GCERegion` + + :return: Static Address object + :rtype: :class:`GCEAddress` + """ + region = region or self.region + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + elif region is None: + raise ValueError('REGION_NOT_SPECIFIED', + 'Region must be provided for an address') + address_data = {'name': name} + request = '/regions/%s/addresses' % (region.name) + self.connection.async_request(request, method='POST', + data=address_data) + return self.ex_get_address(name, region=region) + + def ex_create_healthcheck(self, name, host=None, path=None, port=None, + interval=None, timeout=None, + unhealthy_threshold=None, + healthy_threshold=None): + """ + Create an Http Health Check. + + :param name: Name of health check + :type name: ``str`` + + :keyword host: Hostname of health check request. Defaults to empty + and public IP is used instead. + :type host: ``str`` + + :keyword path: The request path for the check. Defaults to /. + :type path: ``str`` + + :keyword port: The TCP port number for the check. Defaults to 80. + :type port: ``int`` + + :keyword interval: How often (in seconds) to check. Defaults to 5. + :type interval: ``int`` + + :keyword timeout: How long to wait before failing. Defaults to 5. + :type timeout: ``int`` + + :keyword unhealthy_threshold: How many failures before marking + unhealthy. Defaults to 2. + :type unhealthy_threshold: ``int`` + + :keyword healthy_threshold: How many successes before marking as + healthy. Defaults to 2. + :type healthy_threshold: ``int`` + + :return: Health Check object + :rtype: :class:`GCEHealthCheck` + """ + hc_data = {} + hc_data['name'] = name + if host: + hc_data['host'] = host + # As of right now, the 'default' values aren't getting set when called + # through the API, so set them explicitly + hc_data['requestPath'] = path or '/' + hc_data['port'] = port or 80 + hc_data['checkIntervalSec'] = interval or 5 + hc_data['timeoutSec'] = timeout or 5 + hc_data['unhealthyThreshold'] = unhealthy_threshold or 2 + hc_data['healthyThreshold'] = healthy_threshold or 2 + + request = '/global/httpHealthChecks' + + self.connection.async_request(request, method='POST', data=hc_data) + return self.ex_get_healthcheck(name) + + def ex_create_firewall(self, name, allowed, network='default', + source_ranges=None, source_tags=None, + target_tags=None): + """ + Create a firewall on a network. + + Firewall rules should be supplied in the "allowed" field. This is a + list of dictionaries formated like so ("ports" is optional):: + + [{"IPProtocol": "", + "ports": ""}] + + For example, to allow tcp on port 8080 and udp on all ports, 'allowed' + would be:: + + [{"IPProtocol": "tcp", + "ports": ["8080"]}, + {"IPProtocol": "udp"}] + + See `Firewall Reference `_ for more information. + + :param name: Name of the firewall to be created + :type name: ``str`` + + :param allowed: List of dictionaries with rules + :type allowed: ``list`` of ``dict`` + + :keyword network: The network that the firewall applies to. + :type network: ``str`` or :class:`GCENetwork` + + :keyword source_ranges: A list of IP ranges in CIDR format that the + firewall should apply to. Defaults to + ['0.0.0.0/0'] + :type source_ranges: ``list`` of ``str`` + + :keyword source_tags: A list of source instance tags the rules apply + to. + :type source_tags: ``list`` of ``str`` + + :keyword target_tags: A list of target instance tags the rules apply + to. + :type target_tags: ``list`` of ``str`` + + :return: Firewall object + :rtype: :class:`GCEFirewall` + """ + firewall_data = {} + if not hasattr(network, 'name'): + nw = self.ex_get_network(network) + else: + nw = network + + firewall_data['name'] = name + firewall_data['allowed'] = allowed + firewall_data['network'] = nw.extra['selfLink'] + firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0'] + if source_tags is not None: + firewall_data['sourceTags'] = source_tags + if target_tags is not None: + firewall_data['targetTags'] = target_tags + + request = '/global/firewalls' + + self.connection.async_request(request, method='POST', + data=firewall_data) + return self.ex_get_firewall(name) + + def ex_create_forwarding_rule(self, name, targetpool, region=None, + protocol='tcp', port_range=None, + address=None): + """ + Create a forwarding rule. + + :param name: Name of forwarding rule to be created + :type name: ``str`` + + :param targetpool: Target pool to apply the rule to + :param targetpool: ``str`` or :class:`GCETargetPool` + + :keyword region: Region to create the forwarding rule in. Defaults to + self.region + :type region: ``str`` or :class:`GCERegion` + + :keyword protocol: Should be 'tcp' or 'udp' + :type protocol: ``str`` + + :keyword port_range: Optional single port number or range separated + by a dash. Examples: '80', '5000-5999'. + :type port_range: ``str`` + + :keyword address: Optional static address for forwarding rule. Must be + in same region. + :type address: ``str`` or :class:`GCEAddress` + + :return: Forwarding Rule object + :rtype: :class:`GCEForwardingRule` + """ + forwarding_rule_data = {} + region = region or self.region + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool, region) + + forwarding_rule_data['name'] = name + forwarding_rule_data['region'] = region.extra['selfLink'] + forwarding_rule_data['target'] = targetpool.extra['selfLink'] + forwarding_rule_data['protocol'] = protocol.upper() + if address: + if not hasattr(address, 'name'): + address = self.ex_get_address(address, region) + forwarding_rule_data['IPAddress'] = address.extra['selfLink'] + if port_range: + forwarding_rule_data['portRange'] = port_range + + request = '/regions/%s/forwardingRules' % (region.name) + + self.connection.async_request(request, method='POST', + data=forwarding_rule_data) + + return self.ex_get_forwarding_rule(name) + + def ex_create_network(self, name, cidr): + """ + Create a network. + + :param name: Name of network to be created + :type name: ``str`` + + :param cidr: Address range of network in CIDR format. + :type cidr: ``str`` + + :return: Network object + :rtype: :class:`GCENetwork` + """ + network_data = {} + network_data['name'] = name + network_data['IPv4Range'] = cidr + + request = '/global/networks' + + self.connection.async_request(request, method='POST', + data=network_data) + + return self.ex_get_network(name) + + def create_node(self, name, size, image, location=None, + ex_network='default', ex_tags=None, ex_metadata=None, + ex_boot_disk=None, use_existing_disk=True, + external_ip='ephemeral'): + """ + Create a new node and return a node object for the node. + + :param name: The name of the node to create. + :type name: ``str`` + + :param size: The machine type to use. + :type size: ``str`` or :class:`GCENodeSize` + + :param image: The image to use to create the node (or, if attaching + a persistent disk, the image used to create the disk) + :type image: ``str`` or :class:`GCENodeImage` + + :keyword location: The location (zone) to create the node in. + :type location: ``str`` or :class:`NodeLocation` or + :class:`GCEZone` or ``None`` + + :keyword ex_network: The network to associate with the node. + :type ex_network: ``str`` or :class:`GCENetwork` + + :keyword ex_tags: A list of tags to associate with the node. + :type ex_tags: ``list`` of ``str`` or ``None`` + + :keyword ex_metadata: Metadata dictionary for instance. + :type ex_metadata: ``dict`` or ``None`` + + :keyword ex_boot_disk: The boot disk to attach to the instance. + :type ex_boot_disk: :class:`StorageVolume` or ``str`` + + :keyword use_existing_disk: If True and if an existing disk with the + same name/location is found, use that + disk instead of creating a new one. + :type use_existing_disk: ``bool`` + + :keyword external_ip: The external IP address to use. If 'ephemeral' + (default), a new non-static address will be + used. If 'None', then no external address will + be used. To use an existing static IP address, + a GCEAddress object should be passed in. + :type external_ip: :class:`GCEAddress` or ``str`` or None + + :return: A Node object for the new node. + :rtype: :class:`Node` + """ + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + if not hasattr(size, 'name'): + size = self.ex_get_size(size, location) + if not hasattr(ex_network, 'name'): + ex_network = self.ex_get_network(ex_network) + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + if not ex_boot_disk: + ex_boot_disk = self.create_volume(None, name, location=location, + image=image, + use_existing=use_existing_disk) + + if ex_metadata is not None: + ex_metadata = {"items": [{"key": k, "value": v} + for k, v in ex_metadata.items()]} + + request, node_data = self._create_node_req(name, size, image, + location, ex_network, + ex_tags, ex_metadata, + ex_boot_disk, external_ip) + self.connection.async_request(request, method='POST', data=node_data) + + return self.ex_get_node(name, location.name) + + def ex_create_multiple_nodes(self, base_name, size, image, number, + location=None, ex_network='default', + ex_tags=None, ex_metadata=None, + ignore_errors=True, use_existing_disk=True, + poll_interval=2, external_ip='ephemeral', + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + """ + Create multiple nodes and return a list of Node objects. + + Nodes will be named with the base name and a number. For example, if + the base name is 'libcloud' and you create 3 nodes, they will be + named:: + + libcloud-000 + libcloud-001 + libcloud-002 + + :param base_name: The base name of the nodes to create. + :type base_name: ``str`` + + :param size: The machine type to use. + :type size: ``str`` or :class:`GCENodeSize` + + :param image: The image to use to create the nodes. + :type image: ``str`` or :class:`GCENodeImage` + + :param number: The number of nodes to create. + :type number: ``int`` + + :keyword location: The location (zone) to create the nodes in. + :type location: ``str`` or :class:`NodeLocation` or + :class:`GCEZone` or ``None`` + + :keyword ex_network: The network to associate with the nodes. + :type ex_network: ``str`` or :class:`GCENetwork` + + :keyword ex_tags: A list of tags to assiciate with the nodes. + :type ex_tags: ``list`` of ``str`` or ``None`` + + :keyword ex_metadata: Metadata dictionary for instances. + :type ex_metadata: ``dict`` or ``None`` + + :keyword ignore_errors: If True, don't raise Exceptions if one or + more nodes fails. + :type ignore_errors: ``bool`` + + :keyword use_existing_disk: If True and if an existing disk with the + same name/location is found, use that + disk instead of creating a new one. + :type use_existing_disk: ``bool`` + + :keyword poll_interval: Number of seconds between status checks. + :type poll_interval: ``int`` + + :keyword external_ip: The external IP address to use. If 'ephemeral' + (default), a new non-static address will be + used. If 'None', then no external address will + be used. (Static addresses are not supported for + multiple node creation.) + :type external_ip: ``str`` or None + + :keyword timeout: The number of seconds to wait for all nodes to be + created before timing out. + :type timeout: ``int`` + + :return: A list of Node objects for the new nodes. + :rtype: ``list`` of :class:`Node` + """ + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + if not hasattr(size, 'name'): + size = self.ex_get_size(size, location) + if not hasattr(ex_network, 'name'): + ex_network = self.ex_get_network(ex_network) + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + node_attrs = {'size': size, + 'image': image, + 'location': location, + 'network': ex_network, + 'tags': ex_tags, + 'metadata': ex_metadata, + 'ignore_errors': ignore_errors, + 'use_existing_disk': use_existing_disk, + 'external_ip': external_ip} + + # List for holding the status information for disk/node creation. + status_list = [] + + for i in range(number): + name = '%s-%03d' % (base_name, i) + + status = {'name': name, + 'node_response': None, + 'node': None, + 'disk_response': None, + 'disk': None} + + status_list.append(status) + + # Create disks for nodes + for status in status_list: + self._multi_create_disk(status, node_attrs) + + start_time = time.time() + complete = False + while not complete: + if (time.time() - start_time >= timeout): + raise Exception("Timeout (%s sec) while waiting for multiple " + "instances") + complete = True + time.sleep(poll_interval) + for status in status_list: + # If disk does not yet exist, check on its status + if not status['disk']: + self._multi_check_disk(status, node_attrs) + + # If disk exists, but node does not, create the node or check + # on its status if already in progress. + if status['disk'] and not status['node']: + if not status['node_response']: + self._multi_create_node(status, node_attrs) + else: + self._multi_check_node(status, node_attrs) + # If any of the nodes have not been created (or failed) we are + # not done yet. + if not status['node']: + complete = False + + # Return list of nodes + node_list = [] + for status in status_list: + node_list.append(status['node']) + return node_list + + def ex_create_targetpool(self, name, region=None, healthchecks=None, + nodes=None): + """ + Create a target pool. + + :param name: Name of target pool + :type name: ``str`` + + :keyword region: Region to create the target pool in. Defaults to + self.region + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :keyword healthchecks: Optional list of health checks to attach + :type healthchecks: ``list`` of ``str`` or :class:`GCEHealthCheck` + + :keyword nodes: Optional list of nodes to attach to the pool + :type nodes: ``list`` of ``str`` or :class:`Node` + + :return: Target Pool object + :rtype: :class:`GCETargetPool` + """ + region = region or self.region + targetpool_data = {} + targetpool_data['name'] = name + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + targetpool_data['region'] = region.extra['selfLink'] + + if healthchecks: + if not hasattr(healthchecks[0], 'name'): + hc_list = [self.ex_get_healthcheck(h).extra['selfLink'] for h + in healthchecks] + else: + hc_list = [h.extra['selfLink'] for h in healthchecks] + targetpool_data['healthChecks'] = hc_list + if nodes: + if not hasattr(nodes[0], 'name'): + node_list = [self.ex_get_node(n, 'all').extra['selfLink'] for n + in nodes] + else: + node_list = [n.extra['selfLink'] for n in nodes] + targetpool_data['instances'] = node_list + + request = '/regions/%s/targetPools' % (region.name) + + self.connection.async_request(request, method='POST', + data=targetpool_data) + + return self.ex_get_targetpool(name, region) + + def create_volume(self, size, name, location=None, snapshot=None, + image=None, use_existing=True): + """ + Create a volume (disk). + + :param size: Size of volume to create (in GB). Can be None if image + or snapshot is supplied. + :type size: ``int`` or ``str`` or ``None`` + + :param name: Name of volume to create + :type name: ``str`` + + :keyword location: Location (zone) to create the volume in + :type location: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :keyword snapshot: Snapshot to create image from + :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` + + :keyword image: Image to create disk from. + :type image: :class:`GCENodeImage` or ``str`` or ``None`` + + :keyword use_existing: If True and a disk with the given name already + exists, return an object for that disk instead + of attempting to create a new disk. + :type use_existing: ``bool`` + + :return: Storage Volume object + :rtype: :class:`StorageVolume` + """ + request, volume_data, params = self._create_vol_req( + size, name, location, snapshot, image) + try: + self.connection.async_request(request, method='POST', + data=volume_data, params=params) + except ResourceExistsError: + e = sys.exc_info()[1] + if not use_existing: + raise e + + return self.ex_get_volume(name, location) + + def create_volume_snapshot(self, volume, name): + """ + Create a snapshot of the provided Volume. + + :param volume: A StorageVolume object + :type volume: :class:`StorageVolume` + + :return: A GCE Snapshot object + :rtype: :class:`GCESnapshot` + """ + snapshot_data = {} + snapshot_data['name'] = name + request = '/zones/%s/disks/%s/createSnapshot' % ( + volume.extra['zone'].name, volume.name) + self.connection.async_request(request, method='POST', + data=snapshot_data) + + return self.ex_get_snapshot(name) + + def list_volume_snapshots(self, volume): + """ + List snapshots created from the provided volume. + + For GCE, snapshots are global, but while the volume they were + created from still exists, the source disk for the snapshot is + tracked. + + :param volume: A StorageVolume object + :type volume: :class:`StorageVolume` + + :return: A list of Snapshot objects + :rtype: ``list`` of :class:`GCESnapshot` + """ + volume_snapshots = [] + volume_link = volume.extra['selfLink'] + all_snapshots = self.ex_list_snapshots() + for snapshot in all_snapshots: + if snapshot.extra['sourceDisk'] == volume_link: + volume_snapshots.append(snapshot) + return volume_snapshots + + def ex_update_healthcheck(self, healthcheck): + """ + Update a health check with new values. + + To update, change the attributes of the health check object and pass + the updated object to the method. + + :param healthcheck: A healthcheck object with updated values. + :type healthcheck: :class:`GCEHealthCheck` + + :return: An object representing the new state of the health check. + :rtype: :class:`GCEHealthCheck` + """ + hc_data = {} + hc_data['name'] = healthcheck.name + hc_data['requestPath'] = healthcheck.path + hc_data['port'] = healthcheck.port + hc_data['checkIntervalSec'] = healthcheck.interval + hc_data['timeoutSec'] = healthcheck.timeout + hc_data['unhealthyThreshold'] = healthcheck.unhealthy_threshold + hc_data['healthyThreshold'] = healthcheck.healthy_threshold + if healthcheck.extra['host']: + hc_data['host'] = healthcheck.extra['host'] + if healthcheck.extra['description']: + hc_data['description'] = healthcheck.extra['description'] + + request = '/global/httpHealthChecks/%s' % (healthcheck.name) + + self.connection.async_request(request, method='PUT', + data=hc_data) + + return self.ex_get_healthcheck(healthcheck.name) + + def ex_update_firewall(self, firewall): + """ + Update a firewall with new values. + + To update, change the attributes of the firewall object and pass the + updated object to the method. + + :param firewall: A firewall object with updated values. + :type firewall: :class:`GCEFirewall` + + :return: An object representing the new state of the firewall. + :rtype: :class:`GCEFirewall` + """ + firewall_data = {} + firewall_data['name'] = firewall.name + firewall_data['allowed'] = firewall.allowed + firewall_data['network'] = firewall.network.extra['selfLink'] + if firewall.source_ranges: + firewall_data['sourceRanges'] = firewall.source_ranges + if firewall.source_tags: + firewall_data['sourceTags'] = firewall.source_tags + if firewall.target_tags: + firewall_data['targetTags'] = firewall.target_tags + if firewall.extra['description']: + firewall_data['description'] = firewall.extra['description'] + + request = '/global/firewalls/%s' % (firewall.name) + + self.connection.async_request(request, method='PUT', + data=firewall_data) + + return self.ex_get_firewall(firewall.name) + + def ex_targetpool_add_node(self, targetpool, node): + """ + Add a node to a target pool. + + :param targetpool: The targetpool to add node to + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param node: The node to add + :type node: ``str`` or :class:`Node` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(node, 'name'): + node = self.ex_get_node(node, 'all') + + targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} + + request = '/regions/%s/targetPools/%s/addInstance' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + targetpool.nodes.append(node) + return True + + def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): + """ + Add a health check to a target pool. + + :param targetpool: The targetpool to add health check to + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param healthcheck: The healthcheck to add + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(healthcheck, 'name'): + healthcheck = self.ex_get_healthcheck(healthcheck) + + targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} + + request = '/regions/%s/targetPools/%s/addHealthCheck' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + targetpool.healthchecks.append(healthcheck) + return True + + def ex_targetpool_remove_node(self, targetpool, node): + """ + Remove a node from a target pool. + + :param targetpool: The targetpool to remove node from + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param node: The node to remove + :type node: ``str`` or :class:`Node` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(node, 'name'): + node = self.ex_get_node(node, 'all') + + targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} + + request = '/regions/%s/targetPools/%s/removeInstance' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + # Remove node object from node list + index = None + for i, nd in enumerate(targetpool.nodes): + if nd.name == node.name: + index = i + break + if index is not None: + targetpool.nodes.pop(index) + return True + + def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck): + """ + Remove a health check from a target pool. + + :param targetpool: The targetpool to remove health check from + :type targetpool: ``str`` or :class:`GCETargetPool` + + :param healthcheck: The healthcheck to remove + :type healthcheck: ``str`` or :class:`GCEHealthCheck` + + :returns: True if successful + :rtype: ``bool`` + """ + if not hasattr(targetpool, 'name'): + targetpool = self.ex_get_targetpool(targetpool) + if not hasattr(healthcheck, 'name'): + healthcheck = self.ex_get_healthcheck(healthcheck) + + targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} + + request = '/regions/%s/targetPools/%s/removeHealthCheck' % ( + targetpool.region.name, targetpool.name) + self.connection.async_request(request, method='POST', + data=targetpool_data) + # Remove healthcheck object from healthchecks list + index = None + for i, hc in enumerate(targetpool.healthchecks): + if hc.name == healthcheck.name: + index = i + if index is not None: + targetpool.healthchecks.pop(index) + return True + + def reboot_node(self, node): + """ + Reboot a node. + + :param node: Node to be rebooted + :type node: :class:`Node` + + :return: True if successful, False if not + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/reset' % (node.extra['zone'].name, + node.name) + self.connection.async_request(request, method='POST', + data='ignored') + return True + + def ex_set_node_tags(self, node, tags): + """ + Set the tags on a Node instance. + + Note that this updates the node object directly. + + :param node: Node object + :type node: :class:`Node` + + :param tags: List of tags to apply to the object + :type tags: ``list`` of ``str`` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/setTags' % (node.extra['zone'].name, + node.name) + + tags_data = {} + tags_data['items'] = tags + tags_data['fingerprint'] = node.extra['tags_fingerprint'] + + self.connection.async_request(request, method='POST', + data=tags_data) + new_node = self.ex_get_node(node.name, node.extra['zone']) + node.extra['tags'] = new_node.extra['tags'] + node.extra['tags_fingerprint'] = new_node.extra['tags_fingerprint'] + return True + + def ex_set_node_scheduling(self, node, on_host_maintenance=None, + automatic_restart=None): + """Set the maintenance behavior for the node. + + See `Scheduling `_ documentation for more info. + + :param node: Node object + :type node: :class:`Node` + + :keyword on_host_maintenance: Defines whether node should be + terminated or migrated when host machine + goes down. Acceptable values are: + 'MIGRATE' or 'TERMINATE' (If not + supplied, value will be reset to GCE + default value for the instance type.) + :type on_host_maintenance: ``str`` + + :keyword automatic_restart: Defines whether the instance should be + automatically restarted when it is + terminated by Compute Engine. (If not + supplied, value will be set to the GCE + default value for the instance type.) + :type automatic_restart: ``bool`` + + :return: True if successful. + :rtype: ``bool`` + """ + if not hasattr(node, 'name'): + node = self.ex_get_node(node, 'all') + if on_host_maintenance is not None: + on_host_maintenance = on_host_maintenance.upper() + ohm_values = ['MIGRATE', 'TERMINATE'] + if on_host_maintenance not in ohm_values: + raise ValueError('on_host_maintenance must be one of %s' % + ','.join(ohm_values)) + + request = '/zones/%s/instances/%s/setScheduling' % ( + node.extra['zone'].name, node.name) + + scheduling_data = {} + if on_host_maintenance is not None: + scheduling_data['onHostMaintenance'] = on_host_maintenance + if automatic_restart is not None: + scheduling_data['automaticRestart'] = automatic_restart + + self.connection.async_request(request, method='POST', + data=scheduling_data) + + new_node = self.ex_get_node(node.name, node.extra['zone']) + node.extra['scheduling'] = new_node.extra['scheduling'] + + ohm = node.extra['scheduling'].get('onHostMaintenance') + ar = node.extra['scheduling'].get('automaticRestart') + + success = True + if on_host_maintenance not in [None, ohm]: + success = False + if automatic_restart not in [None, ar]: + success = False + + return success + + def deploy_node(self, name, size, image, script, location=None, + ex_network='default', ex_tags=None): + """ + Create a new node and run a script on start-up. + + :param name: The name of the node to create. + :type name: ``str`` + + :param size: The machine type to use. + :type size: ``str`` or :class:`GCENodeSize` + + :param image: The image to use to create the node. + :type image: ``str`` or :class:`GCENodeImage` + + :param script: File path to start-up script + :type script: ``str`` + + :keyword location: The location (zone) to create the node in. + :type location: ``str`` or :class:`NodeLocation` or + :class:`GCEZone` or ``None`` + + :keyword ex_network: The network to associate with the node. + :type ex_network: ``str`` or :class:`GCENetwork` + + :keyword ex_tags: A list of tags to associate with the node. + :type ex_tags: ``list`` of ``str`` or ``None`` + + :return: A Node object for the new node. + :rtype: :class:`Node` + """ + with open(script, 'r') as f: + script_data = f.read() + metadata = {'items': [{'key': 'startup-script', + 'value': script_data}]} + + return self.create_node(name, size, image, location=location, + ex_network=ex_network, ex_tags=ex_tags, + ex_metadata=metadata) + + def attach_volume(self, node, volume, device=None, ex_mode=None, + ex_boot=False): + """ + Attach a volume to a node. + + If volume is None, a scratch disk will be created and attached. + + :param node: The node to attach the volume to + :type node: :class:`Node` + + :param volume: The volume to attach. If none, a scratch disk will be + attached. + :type volume: :class:`StorageVolume` or ``None`` + + :keyword device: The device name to attach the volume as. Defaults to + volume name. + :type device: ``str`` + + :keyword ex_mode: Either 'READ_WRITE' or 'READ_ONLY' + :type ex_mode: ``str`` + + :keyword ex_boot: If true, disk will be attached as a boot disk + :type ex_boot: ``bool`` + + :return: True if successful + :rtype: ``bool`` + """ + volume_data = {} + if volume is None: + volume_data['type'] = 'SCRATCH' + else: + volume_data['type'] = 'PERSISTENT' + volume_data['source'] = volume.extra['selfLink'] + volume_data['kind'] = 'compute#attachedDisk' + volume_data['mode'] = ex_mode or 'READ_WRITE' + + if device: + volume_data['deviceName'] = device + else: + volume_data['deviceName'] = volume.name + + volume_data['boot'] = ex_boot + + request = '/zones/%s/instances/%s/attachDisk' % ( + node.extra['zone'].name, node.name) + self.connection.async_request(request, method='POST', + data=volume_data) + return True + + def detach_volume(self, volume, ex_node=None): + """ + Detach a volume from a node. + + :param volume: Volume object to detach + :type volume: :class:`StorageVolume` + + :keyword ex_node: Node object to detach volume from (required) + :type ex_node: :class:`Node` + + :return: True if successful + :rtype: ``bool`` + """ + if not ex_node: + return False + request = '/zones/%s/instances/%s/detachDisk?deviceName=%s' % ( + ex_node.extra['zone'].name, ex_node.name, volume.name) + + self.connection.async_request(request, method='POST', + data='ignored') + return True + + def ex_set_volume_auto_delete(self, volume, node, auto_delete=True): + """ + Sets the auto-delete flag for a volume attached to a node. + + :param volume: Volume object to auto-delete + :type volume: :class:`StorageVolume` + + :param ex_node: Node object to auto-delete volume from + :type ex_node: :class:`Node` + + :keyword auto_delete: Flag to set for the auto-delete value + :type auto_delete: ``bool`` (default True) + + :return: True if successfull + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s/setDiskAutoDelete' % ( + node.extra['zone'].name, node.name + ) + delete_params = { + 'deviceName': volume, + 'autoDelete': auto_delete, + } + self.connection.async_request(request, method='POST', + params=delete_params) + return True + + def ex_destroy_address(self, address): + """ + Destroy a static address. + + :param address: Address object to destroy + :type address: :class:`GCEAddress` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/regions/%s/addresses/%s' % (address.region.name, + address.name) + + self.connection.async_request(request, method='DELETE') + return True + + def ex_delete_image(self, image): + """ + Delete a specific image resource. + + :param image: Image object to delete + :type image: ``str`` or :class:`GCENodeImage` + + :return: True if successfull + :rtype: ``bool`` + """ + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + request = '/global/images/%s' % (image.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_deprecate_image(self, image, replacement, state=None): + """ + Deprecate a specific image resource. + + :param image: Image object to deprecate + :type image: ``str`` or :class: `GCENodeImage` + + :param replacement: Image object to use as a replacement + :type replacement: ``str`` or :class: `GCENodeImage` + + :param state: State of the image + :type state: ``str`` + + :return: True if successfull + :rtype: ``bool`` + """ + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + + if not hasattr(replacement, 'name'): + replacement = self.ex_get_image(replacement) + + if state is None: + state = 'DEPRECATED' + + possible_states = ['DELETED', 'DEPRECATED', 'OBSOLETE'] + + if state not in possible_states: + raise ValueError('state must be one of %s' + % ','.join(possible_states)) + + image_data = { + 'state': state, + 'replacement': replacement.extra['selfLink'], + } + + request = '/global/images/%s/deprecate' % (image.name) + + self.connection.request( + request, method='POST', data=image_data).object + + return True + + def ex_destroy_healthcheck(self, healthcheck): + """ + Destroy a healthcheck. + + :param healthcheck: Health check object to destroy + :type healthcheck: :class:`GCEHealthCheck` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/httpHealthChecks/%s' % (healthcheck.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_destroy_firewall(self, firewall): + """ + Destroy a firewall. + + :param firewall: Firewall object to destroy + :type firewall: :class:`GCEFirewall` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/firewalls/%s' % (firewall.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_destroy_forwarding_rule(self, forwarding_rule): + """ + Destroy a forwarding rule. + + :param forwarding_rule: Forwarding Rule object to destroy + :type forwarding_rule: :class:`GCEForwardingRule` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/regions/%s/forwardingRules/%s' % ( + forwarding_rule.region.name, forwarding_rule.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_destroy_network(self, network): + """ + Destroy a network. + + :param network: Network object to destroy + :type network: :class:`GCENetwork` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/networks/%s' % (network.name) + self.connection.async_request(request, method='DELETE') + return True + + def destroy_node(self, node, destroy_boot_disk=False): + """ + Destroy a node. + + :param node: Node object to destroy + :type node: :class:`Node` + + :keyword destroy_boot_disk: If true, also destroy the node's + boot disk. (Note that this keyword is not + accessible from the node's .destroy() + method.) + :type destroy_boot_disk: ``bool`` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/zones/%s/instances/%s' % (node.extra['zone'].name, + node.name) + self.connection.async_request(request, method='DELETE') + if destroy_boot_disk and node.extra['boot_disk']: + node.extra['boot_disk'].destroy() + return True + + def ex_destroy_multiple_nodes(self, node_list, ignore_errors=True, + destroy_boot_disk=False, poll_interval=2, + timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): + """ + Destroy multiple nodes at once. + + :param node_list: List of nodes to destroy + :type node_list: ``list`` of :class:`Node` + + :keyword ignore_errors: If true, don't raise an exception if one or + more nodes fails to be destroyed. + :type ignore_errors: ``bool`` + + :keyword destroy_boot_disk: If true, also destroy the nodes' boot + disks. + :type destroy_boot_disk: ``bool`` + + :keyword poll_interval: Number of seconds between status checks. + :type poll_interval: ``int`` + + :keyword timeout: Number of seconds to wait for all nodes to be + destroyed. + :type timeout: ``int`` + + :return: A list of boolean values. One for each node. True means + that the node was successfully destroyed. + :rtype: ``list`` of ``bool`` + """ + status_list = [] + complete = False + start_time = time.time() + for node in node_list: + request = '/zones/%s/instances/%s' % (node.extra['zone'].name, + node.name) + try: + response = self.connection.request(request, + method='DELETE').object + except GoogleBaseError: + self._catch_error(ignore_errors=ignore_errors) + response = None + + status = {'node': node, + 'node_success': False, + 'node_response': response, + 'disk_success': not destroy_boot_disk, + 'disk_response': None} + + status_list.append(status) + + while not complete: + if (time.time() - start_time >= timeout): + raise Exception("Timeout (%s sec) while waiting to delete " + "multiple instances") + complete = True + for status in status_list: + # If one of the operations is running, check the status + operation = status['node_response'] or status['disk_response'] + delete_disk = False + if operation: + no_errors = True + try: + response = self.connection.request( + operation['selfLink']).object + except GoogleBaseError: + self._catch_error(ignore_errors=ignore_errors) + no_errors = False + response = {'status': 'DONE'} + if response['status'] == 'DONE': + # If a node was deleted, update status and indicate + # that the disk is ready to be deleted. + if status['node_response']: + status['node_response'] = None + status['node_success'] = no_errors + delete_disk = True + else: + status['disk_response'] = None + status['disk_success'] = no_errors + # If we are destroying disks, and the node has been deleted, + # destroy the disk. + if delete_disk and destroy_boot_disk: + boot_disk = status['node'].extra['boot_disk'] + if boot_disk: + request = '/zones/%s/disks/%s' % ( + boot_disk.extra['zone'].name, boot_disk.name) + try: + response = self.connection.request( + request, method='DELETE').object + except GoogleBaseError: + self._catch_error(ignore_errors=ignore_errors) + no_errors = False + response = None + status['disk_response'] = response + else: # If there is no boot disk, ignore + status['disk_success'] = True + operation = status['node_response'] or status['disk_response'] + if operation: + time.sleep(poll_interval) + complete = False + + success = [] + for status in status_list: + s = status['node_success'] and status['disk_success'] + success.append(s) + return success + + def ex_destroy_targetpool(self, targetpool): + """ + Destroy a target pool. + + :param targetpool: TargetPool object to destroy + :type targetpool: :class:`GCETargetPool` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/regions/%s/targetPools/%s' % (targetpool.region.name, + targetpool.name) + + self.connection.async_request(request, method='DELETE') + return True + + def destroy_volume(self, volume): + """ + Destroy a volume. + + :param volume: Volume object to destroy + :type volume: :class:`StorageVolume` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/zones/%s/disks/%s' % (volume.extra['zone'].name, + volume.name) + self.connection.async_request(request, method='DELETE') + return True + + def destroy_volume_snapshot(self, snapshot): + """ + Destroy a snapshot. + + :param snapshot: Snapshot object to destroy + :type snapshot: :class:`GCESnapshot` + + :return: True if successful + :rtype: ``bool`` + """ + request = '/global/snapshots/%s' % (snapshot.name) + self.connection.async_request(request, method='DELETE') + return True + + def ex_get_address(self, name, region=None): + """ + Return an Address object based on an address name and optional region. + + :param name: The name of the address + :type name: ``str`` + + :keyword region: The region to search for the address in (set to + 'all' to search all regions) + :type region: ``str`` :class:`GCERegion` or ``None`` + + :return: An Address object for the address + :rtype: :class:`GCEAddress` + """ + region = self._set_region(region) or self._find_zone_or_region( + name, 'addresses', region=True, res_name='Address') + request = '/regions/%s/addresses/%s' % (region.name, name) + response = self.connection.request(request, method='GET').object + return self._to_address(response) + + def ex_get_healthcheck(self, name): + """ + Return a HealthCheck object based on the healthcheck name. + + :param name: The name of the healthcheck + :type name: ``str`` + + :return: A GCEHealthCheck object + :rtype: :class:`GCEHealthCheck` + """ + request = '/global/httpHealthChecks/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_healthcheck(response) + + def ex_get_firewall(self, name): + """ + Return a Firewall object based on the firewall name. + + :param name: The name of the firewall + :type name: ``str`` + + :return: A GCEFirewall object + :rtype: :class:`GCEFirewall` + """ + request = '/global/firewalls/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_firewall(response) + + def ex_get_forwarding_rule(self, name, region=None): + """ + Return a Forwarding Rule object based on the forwarding rule name. + + :param name: The name of the forwarding rule + :type name: ``str`` + + :keyword region: The region to search for the rule in (set to 'all' + to search all regions). + :type region: ``str`` or ``None`` + + :return: A GCEForwardingRule object + :rtype: :class:`GCEForwardingRule` + """ + region = self._set_region(region) or self._find_zone_or_region( + name, 'forwardingRules', region=True, res_name='ForwardingRule') + request = '/regions/%s/forwardingRules/%s' % (region.name, name) + response = self.connection.request(request, method='GET').object + return self._to_forwarding_rule(response) + + def ex_get_image(self, partial_name): + """ + Return an GCENodeImage object based on the name or link provided. + + :param partial_name: The name, partial name, or full path of a GCE + image. + :type partial_name: ``str`` + + :return: GCENodeImage object based on provided information or None if + an image with that name is not found. + :rtype: :class:`GCENodeImage` or ``None`` + """ + if partial_name.startswith('https://'): + response = self.connection.request(partial_name, method='GET') + return self._to_node_image(response.object) + image = self._match_images(None, partial_name) + if not image: + if (partial_name.startswith('debian') or + partial_name.startswith('backports')): + image = self._match_images('debian-cloud', partial_name) + elif partial_name.startswith('centos'): + image = self._match_images('centos-cloud', partial_name) + elif partial_name.startswith('sles'): + image = self._match_images('suse-cloud', partial_name) + elif partial_name.startswith('rhel'): + image = self._match_images('rhel-cloud', partial_name) + elif partial_name.startswith('windows'): + image = self._match_images('windows-cloud', partial_name) + elif partial_name.startswith('container-vm'): + image = self._match_images('google-containers', partial_name) + + return image + + def ex_get_network(self, name): + """ + Return a Network object based on a network name. + + :param name: The name of the network + :type name: ``str`` + + :return: A Network object for the network + :rtype: :class:`GCENetwork` + """ + request = '/global/networks/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_network(response) + + def ex_get_node(self, name, zone=None): + """ + Return a Node object based on a node name and optional zone. + + :param name: The name of the node + :type name: ``str`` + + :keyword zone: The zone to search for the node in. If set to 'all', + search all zones for the instance. + :type zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: A Node object for the node + :rtype: :class:`Node` + """ + zone = self._set_zone(zone) or self._find_zone_or_region( + name, 'instances', res_name='Node') + request = '/zones/%s/instances/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_node(response) + + def ex_get_project(self): + """ + Return a Project object with project-wide information. + + :return: A GCEProject object + :rtype: :class:`GCEProject` + """ + response = self.connection.request('', method='GET').object + return self._to_project(response) + + def ex_get_size(self, name, zone=None): + """ + Return a size object based on a machine type name and zone. + + :param name: The name of the node + :type name: ``str`` + + :keyword zone: The zone to search for the machine type in + :type zone: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :return: A GCENodeSize object for the machine type + :rtype: :class:`GCENodeSize` + """ + zone = zone or self.zone + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + request = '/zones/%s/machineTypes/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_node_size(response) + + def ex_get_snapshot(self, name): + """ + Return a Snapshot object based on snapshot name. + + :param name: The name of the snapshot + :type name: ``str`` + + :return: A GCESnapshot object for the snapshot + :rtype: :class:`GCESnapshot` + """ + request = '/global/snapshots/%s' % (name) + response = self.connection.request(request, method='GET').object + return self._to_snapshot(response) + + def ex_get_volume(self, name, zone=None): + """ + Return a Volume object based on a volume name and optional zone. + + :param name: The name of the volume + :type name: ``str`` + + :keyword zone: The zone to search for the volume in (set to 'all' to + search all zones) + :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` + or ``None`` + + :return: A StorageVolume object for the volume + :rtype: :class:`StorageVolume` + """ + zone = self._set_zone(zone) or self._find_zone_or_region( + name, 'disks', res_name='Volume') + request = '/zones/%s/disks/%s' % (zone.name, name) + response = self.connection.request(request, method='GET').object + return self._to_storage_volume(response) + + def ex_get_region(self, name): + """ + Return a Region object based on the region name. + + :param name: The name of the region. + :type name: ``str`` + + :return: A GCERegion object for the region + :rtype: :class:`GCERegion` + """ + if name.startswith('https://'): + short_name = self._get_components_from_path(name)['name'] + request = name + else: + short_name = name + request = '/regions/%s' % (name) + # Check region cache first + if short_name in self.region_dict: + return self.region_dict[short_name] + # Otherwise, look up region information + response = self.connection.request(request, method='GET').object + return self._to_region(response) + + def ex_get_targetpool(self, name, region=None): + """ + Return a TargetPool object based on a name and optional region. + + :param name: The name of the target pool + :type name: ``str`` + + :keyword region: The region to search for the target pool in (set to + 'all' to search all regions). + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :return: A TargetPool object for the pool + :rtype: :class:`GCETargetPool` + """ + region = self._set_region(region) or self._find_zone_or_region( + name, 'targetPools', region=True, res_name='TargetPool') + request = '/regions/%s/targetPools/%s' % (region.name, name) + response = self.connection.request(request, method='GET').object + return self._to_targetpool(response) + + def ex_get_zone(self, name): + """ + Return a Zone object based on the zone name. + + :param name: The name of the zone. + :type name: ``str`` + + :return: A GCEZone object for the zone or None if not found + :rtype: :class:`GCEZone` or ``None`` + """ + if name.startswith('https://'): + short_name = self._get_components_from_path(name)['name'] + request = name + else: + short_name = name + request = '/zones/%s' % (name) + # Check zone cache first + if short_name in self.zone_dict: + return self.zone_dict[short_name] + # Otherwise, look up zone information + try: + response = self.connection.request(request, method='GET').object + except ResourceNotFoundError: + return None + return self._to_zone(response) + + def ex_copy_image(self, name, url, description=None): + """ + Copy an image to your image collection. + + :param name: The name of the image + :type name: ``str`` + + :param url: The URL to the image. The URL can start with `gs://` + :param url: ``str`` + + :param description: The description of the image + :type description: ``str`` + + :return: NodeImage object based on provided information or None if an + image with that name is not found. + :rtype: :class:`NodeImage` or ``None`` + """ + + # the URL for an image can start with gs:// + if url.startswith('gs://'): + url = url.replace('gs://', 'https://storage.googleapis.com/', 1) + + image_data = { + 'name': name, + 'description': description, + 'sourceType': 'RAW', + 'rawDisk': { + 'source': url, + }, + } + + request = '/global/images' + self.connection.async_request(request, method='POST', + data=image_data) + return self.ex_get_image(name) + + def _ex_connection_class_kwargs(self): + return {'auth_type': self.auth_type, + 'project': self.project, + 'scopes': self.scopes} + + def _catch_error(self, ignore_errors=False): + """ + Catch an exception and raise it unless asked to ignore it. + + :keyword ignore_errors: If true, just return the error. Otherwise, + raise the error. + :type ignore_errors: ``bool`` + + :return: The exception that was raised. + :rtype: :class:`Exception` + """ + e = sys.exc_info()[1] + if ignore_errors: + return e + else: + raise e + + def _get_components_from_path(self, path): + """ + Return a dictionary containing name & zone/region from a request path. + + :param path: HTTP request path (e.g. + '/project/pjt-name/zones/us-central1-a/instances/mynode') + :type path: ``str`` + + :return: Dictionary containing name and zone/region of resource + :rtype ``dict`` + """ + region = None + zone = None + glob = False + components = path.split('/') + name = components[-1] + if components[-4] == 'regions': + region = components[-3] + elif components[-4] == 'zones': + zone = components[-3] + elif components[-3] == 'global': + glob = True + + return {'name': name, 'region': region, 'zone': zone, 'global': glob} + + def _get_region_from_zone(self, zone): + """ + Return the Region object that contains the given Zone object. + + :param zone: Zone object + :type zone: :class:`GCEZone` + + :return: Region object that contains the zone + :rtype: :class:`GCERegion` + """ + for region in self.region_list: + zones = [z.name for z in region.zones] + if zone.name in zones: + return region + + def _find_zone_or_region(self, name, res_type, region=False, + res_name=None): + """ + Find the zone or region for a named resource. + + :param name: Name of resource to find + :type name: ``str`` + + :param res_type: Type of resource to find. + Examples include: 'disks', 'instances' or 'addresses' + :type res_type: ``str`` + + :keyword region: If True, search regions instead of zones + :type region: ``bool`` + + :keyword res_name: The name of the resource type for error messages. + Examples: 'Volume', 'Node', 'Address' + :keyword res_name: ``str`` + + :return: Zone/Region object for the zone/region for the resource. + :rtype: :class:`GCEZone` or :class:`GCERegion` + """ + if region: + rz = 'region' + else: + rz = 'zone' + rz_name = None + res_name = res_name or res_type + request = '/aggregated/%s' % (res_type) + res_list = self.connection.request(request).object + for k, v in res_list['items'].items(): + for res in v.get(res_type, []): + if res['name'] == name: + rz_name = k.replace('%ss/' % (rz), '') + break + if not rz_name: + raise ResourceNotFoundError( + '%s \'%s\' not found in any %s.' % (res_name, name, rz), + None, None) + else: + getrz = getattr(self, 'ex_get_%s' % (rz)) + return getrz(rz_name) + + def _match_images(self, project, partial_name): + """ + Find the latest image, given a partial name. + + For example, providing 'debian-7' will return the image object for the + most recent image with a name that starts with 'debian-7' in the + supplied project. If no project is given, it will search your own + project. + + :param project: The name of the project to search for images. + Examples include: 'debian-cloud' and 'centos-cloud'. + :type project: ``str`` or ``None`` + + :param partial_name: The full name or beginning of a name for an + image. + :type partial_name: ``str`` + + :return: The latest image object that matches the partial name or None + if no matching image is found. + :rtype: :class:`GCENodeImage` or ``None`` + """ + project_images = self.list_images(project) + partial_match = [] + for image in project_images: + if image.name == partial_name: + return image + if image.name.startswith(partial_name): + ts = timestamp_to_datetime(image.extra['creationTimestamp']) + if not partial_match or partial_match[0] < ts: + partial_match = [ts, image] + + if partial_match: + return partial_match[1] + + def _set_region(self, region): + """ + Return the region to use for listing resources. + + :param region: A name, region object, None, or 'all' + :type region: ``str`` or :class:`GCERegion` or ``None`` + + :return: A region object or None if all regions should be considered + :rtype: :class:`GCERegion` or ``None`` + """ + region = region or self.region + + if region == 'all' or region is None: + return None + + if not hasattr(region, 'name'): + region = self.ex_get_region(region) + return region + + def _set_zone(self, zone): + """ + Return the zone to use for listing resources. + + :param zone: A name, zone object, None, or 'all' + :type region: ``str`` or :class:`GCEZone` or ``None`` + + :return: A zone object or None if all zones should be considered + :rtype: :class:`GCEZone` or ``None`` + """ + zone = zone or self.zone + + if zone == 'all' or zone is None: + return None + + if not hasattr(zone, 'name'): + zone = self.ex_get_zone(zone) + return zone + + def _create_node_req(self, name, size, image, location, network, + tags=None, metadata=None, boot_disk=None, + external_ip='ephemeral'): + """ + Returns a request and body to create a new node. This is a helper + method to support both :class:`create_node` and + :class:`ex_create_multiple_nodes`. + + :param name: The name of the node to create. + :type name: ``str`` + + :param size: The machine type to use. + :type size: :class:`GCENodeSize` + + :param image: The image to use to create the node (or, if using a + persistent disk, the image the disk was created from). + :type image: :class:`GCENodeImage` + + :param location: The location (zone) to create the node in. + :type location: :class:`NodeLocation` or :class:`GCEZone` + + :param network: The network to associate with the node. + :type network: :class:`GCENetwork` + + :keyword tags: A list of tags to associate with the node. + :type tags: ``list`` of ``str`` + + :keyword metadata: Metadata dictionary for instance. + :type metadata: ``dict`` + + :keyword boot_disk: Persistent boot disk to attach. + :type :class:`StorageVolume` + + :keyword external_ip: The external IP address to use. If 'ephemeral' + (default), a new non-static address will be + used. If 'None', then no external address will + be used. To use an existing static IP address, + a GCEAddress object should be passed in. + :type external_ip: :class:`GCEAddress` or ``str`` or None + + :return: A tuple containing a request string and a node_data dict. + :rtype: ``tuple`` of ``str`` and ``dict`` + """ + node_data = {} + node_data['machineType'] = size.extra['selfLink'] + node_data['name'] = name + if tags: + node_data['tags'] = {'items': tags} + if metadata: + node_data['metadata'] = metadata + + if boot_disk: + disks = [{'kind': 'compute#attachedDisk', + 'boot': True, + 'type': 'PERSISTENT', + 'mode': 'READ_WRITE', + 'deviceName': boot_disk.name, + 'zone': boot_disk.extra['zone'].extra['selfLink'], + 'source': boot_disk.extra['selfLink']}] + node_data['disks'] = disks + else: + node_data['image'] = image.extra['selfLink'] + + ni = [{'kind': 'compute#instanceNetworkInterface', + 'network': network.extra['selfLink']}] + if external_ip: + access_configs = [{'name': 'External NAT', + 'type': 'ONE_TO_ONE_NAT'}] + if hasattr(external_ip, 'address'): + access_configs[0]['natIP'] = external_ip.address + ni[0]['accessConfigs'] = access_configs + node_data['networkInterfaces'] = ni + + request = '/zones/%s/instances' % (location.name) + + return request, node_data + + def _multi_create_disk(self, status, node_attrs): + """Create disk for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + disk = None + # Check for existing disk + if node_attrs['use_existing_disk']: + try: + disk = self.ex_get_volume(status['name'], + node_attrs['location']) + except ResourceNotFoundError: + pass + + if disk: + status['disk'] = disk + else: + # Create disk and return response object back in the status dict. + # Or, if there is an error, mark as failed. + disk_req, disk_data, disk_params = self._create_vol_req( + None, status['name'], location=node_attrs['location'], + image=node_attrs['image']) + try: + disk_res = self.connection.request( + disk_req, method='POST', data=disk_data, + params=disk_params).object + except GoogleBaseError: + e = self._catch_error( + ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + disk_res = None + status['disk'] = GCEFailedDisk(status['name'], + error, code) + status['disk_response'] = disk_res + + def _multi_check_disk(self, status, node_attrs): + """Check disk status for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + error = None + try: + response = self.connection.request( + status['disk_response']['selfLink']).object + except GoogleBaseError: + e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + response = {'status': 'DONE'} + if response['status'] == 'DONE': + status['disk_response'] = None + if error: + status['disk'] = GCEFailedDisk(status['name'], error, code) + else: + status['disk'] = self.ex_get_volume(status['name'], + node_attrs['location']) + + def _multi_create_node(self, status, node_attrs): + """Create node for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + # If disk has an error, set the node as failed and return + if hasattr(status['disk'], 'error'): + status['node'] = status['disk'] + return + + # Create node and return response object in status dictionary. + # Or, if there is an error, mark as failed. + request, node_data = self._create_node_req( + status['name'], node_attrs['size'], node_attrs['image'], + node_attrs['location'], node_attrs['network'], node_attrs['tags'], + node_attrs['metadata'], boot_disk=status['disk'], + external_ip=node_attrs['external_ip']) + try: + node_res = self.connection.request( + request, method='POST', data=node_data).object + except GoogleBaseError: + e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + node_res = None + status['node'] = GCEFailedNode(status['name'], + error, code) + status['node_response'] = node_res + + def _multi_check_node(self, status, node_attrs): + """Check node status for ex_create_multiple_nodes. + + :param status: Dictionary for holding node/disk creation status. + (This dictionary is modified by this method) + :type status: ``dict`` + + :param node_attrs: Dictionary for holding node attribute information. + (size, image, location, etc.) + :type node_attrs: ``dict`` + """ + error = None + try: + response = self.connection.request( + status['node_response']['selfLink']).object + except GoogleBaseError: + e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) + error = e.value + code = e.code + response = {'status': 'DONE'} + if response['status'] == 'DONE': + status['node_response'] = None + if error: + status['node'] = GCEFailedNode(status['name'], + error, code) + else: + status['node'] = self.ex_get_node(status['name'], + node_attrs['location']) + + def _create_vol_req(self, size, name, location=None, snapshot=None, + image=None): + """ + Assemble the request/data for creating a volume. + + Used by create_volume and ex_create_multiple_nodes + + :param size: Size of volume to create (in GB). Can be None if image + or snapshot is supplied. + :type size: ``int`` or ``str`` or ``None`` + + :param name: Name of volume to create + :type name: ``str`` + + :keyword location: Location (zone) to create the volume in + :type location: ``str`` or :class:`GCEZone` or + :class:`NodeLocation` or ``None`` + + :keyword snapshot: Snapshot to create image from + :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` + + :keyword image: Image to create disk from. + :type image: :class:`GCENodeImage` or ``str`` or ``None`` + + :return: Tuple containing the request string, the data dictionary and + the URL parameters + :rtype: ``tuple`` + """ + volume_data = {} + params = None + volume_data['name'] = name + if size: + volume_data['sizeGb'] = str(size) + if image: + if not hasattr(image, 'name'): + image = self.ex_get_image(image) + params = {'sourceImage': image.extra['selfLink']} + volume_data['description'] = 'Image: %s' % ( + image.extra['selfLink']) + if snapshot: + if not hasattr(snapshot, 'name'): + # Check for full URI to not break backward-compatibility + if snapshot.startswith('https'): + snapshot = self._get_components_from_path(snapshot)['name'] + snapshot = self.ex_get_snapshot(snapshot) + snapshot_link = snapshot.extra['selfLink'] + volume_data['sourceSnapshot'] = snapshot_link + volume_data['description'] = 'Snapshot: %s' % (snapshot_link) + location = location or self.zone + if not hasattr(location, 'name'): + location = self.ex_get_zone(location) + request = '/zones/%s/disks' % (location.name) + + return request, volume_data, params + + def _to_address(self, address): + """ + Return an Address object from the json-response dictionary. + + :param address: The dictionary describing the address. + :type address: ``dict`` + + :return: Address object + :rtype: :class:`GCEAddress` + """ + extra = {} + + region = self.ex_get_region(address['region']) + + extra['selfLink'] = address.get('selfLink') + extra['status'] = address.get('status') + extra['creationTimestamp'] = address.get('creationTimestamp') + + return GCEAddress(id=address['id'], name=address['name'], + address=address['address'], + region=region, driver=self, extra=extra) + + def _to_healthcheck(self, healthcheck): + """ + Return a HealthCheck object from the json-response dictionary. + + :param healthcheck: The dictionary describing the healthcheck. + :type healthcheck: ``dict`` + + :return: HealthCheck object + :rtype: :class:`GCEHealthCheck` + """ + extra = {} + extra['selfLink'] = healthcheck.get('selfLink') + extra['creationTimestamp'] = healthcheck.get('creationTimestamp') + extra['description'] = healthcheck.get('description') + extra['host'] = healthcheck.get('host') + + return GCEHealthCheck( + id=healthcheck['id'], name=healthcheck['name'], + path=healthcheck.get('requestPath'), port=healthcheck.get('port'), + interval=healthcheck.get('checkIntervalSec'), + timeout=healthcheck.get('timeoutSec'), + unhealthy_threshold=healthcheck.get('unhealthyThreshold'), + healthy_threshold=healthcheck.get('healthyThreshold'), + driver=self, extra=extra) + + def _to_firewall(self, firewall): + """ + Return a Firewall object from the json-response dictionary. + + :param firewall: The dictionary describing the firewall. + :type firewall: ``dict`` + + :return: Firewall object + :rtype: :class:`GCEFirewall` + """ + extra = {} + extra['selfLink'] = firewall.get('selfLink') + extra['creationTimestamp'] = firewall.get('creationTimestamp') + extra['description'] = firewall.get('description') + extra['network_name'] = self._get_components_from_path( + firewall['network'])['name'] + + network = self.ex_get_network(extra['network_name']) + source_ranges = firewall.get('sourceRanges') + source_tags = firewall.get('sourceTags') + target_tags = firewall.get('targetTags') + + return GCEFirewall(id=firewall['id'], name=firewall['name'], + allowed=firewall.get('allowed'), network=network, + source_ranges=source_ranges, + source_tags=source_tags, + target_tags=target_tags, + driver=self, extra=extra) + + def _to_forwarding_rule(self, forwarding_rule): + """ + Return a Forwarding Rule object from the json-response dictionary. + + :param forwarding_rule: The dictionary describing the rule. + :type forwarding_rule: ``dict`` + + :return: ForwardingRule object + :rtype: :class:`GCEForwardingRule` + """ + extra = {} + extra['selfLink'] = forwarding_rule.get('selfLink') + extra['portRange'] = forwarding_rule.get('portRange') + extra['creationTimestamp'] = forwarding_rule.get('creationTimestamp') + extra['description'] = forwarding_rule.get('description') + + region = self.ex_get_region(forwarding_rule['region']) + targetpool = self.ex_get_targetpool( + self._get_components_from_path(forwarding_rule['target'])['name']) + + return GCEForwardingRule(id=forwarding_rule['id'], + name=forwarding_rule['name'], region=region, + address=forwarding_rule.get('IPAddress'), + protocol=forwarding_rule.get('IPProtocol'), + targetpool=targetpool, + driver=self, extra=extra) + + def _to_network(self, network): + """ + Return a Network object from the json-response dictionary. + + :param network: The dictionary describing the network. + :type network: ``dict`` + + :return: Network object + :rtype: :class:`GCENetwork` + """ + extra = {} + + extra['selfLink'] = network.get('selfLink') + extra['gatewayIPv4'] = network.get('gatewayIPv4') + extra['description'] = network.get('description') + extra['creationTimestamp'] = network.get('creationTimestamp') + + return GCENetwork(id=network['id'], name=network['name'], + cidr=network.get('IPv4Range'), + driver=self, extra=extra) + + def _to_node_image(self, image): + """ + Return an Image object from the json-response dictionary. + + :param image: The dictionary describing the image. + :type image: ``dict`` + + :return: Image object + :rtype: :class:`GCENodeImage` + """ + extra = {} + extra['preferredKernel'] = image.get('preferredKernel', None) + extra['description'] = image.get('description', None) + extra['creationTimestamp'] = image.get('creationTimestamp') + extra['selfLink'] = image.get('selfLink') + extra['deprecated'] = image.get('deprecated', None) + + return GCENodeImage(id=image['id'], name=image['name'], driver=self, + extra=extra) + + def _to_node_location(self, location): + """ + Return a Location object from the json-response dictionary. + + :param location: The dictionary describing the location. + :type location: ``dict`` + + :return: Location object + :rtype: :class:`NodeLocation` + """ + return NodeLocation(id=location['id'], name=location['name'], + country=location['name'].split('-')[0], + driver=self) + + def _to_node(self, node): + """ + Return a Node object from the json-response dictionary. + + :param node: The dictionary describing the node. + :type node: ``dict`` + + :return: Node object + :rtype: :class:`Node` + """ + public_ips = [] + private_ips = [] + extra = {} + + extra['status'] = node.get('status') + extra['description'] = node.get('description') + extra['zone'] = self.ex_get_zone(node['zone']) + extra['image'] = node.get('image') + extra['machineType'] = node.get('machineType') + extra['disks'] = node.get('disks', []) + extra['networkInterfaces'] = node.get('networkInterfaces') + extra['id'] = node['id'] + extra['selfLink'] = node.get('selfLink') + extra['name'] = node['name'] + extra['metadata'] = node.get('metadata', {}) + extra['tags_fingerprint'] = node['tags']['fingerprint'] + extra['scheduling'] = node.get('scheduling', {}) + extra['deprecated'] = True if node.get('deprecated', None) else False + + for disk in extra['disks']: + if disk.get('boot') and disk.get('type') == 'PERSISTENT': + bd = self._get_components_from_path(disk['source']) + extra['boot_disk'] = self.ex_get_volume(bd['name'], bd['zone']) + + if 'items' in node['tags']: + tags = node['tags']['items'] + else: + tags = [] + extra['tags'] = tags + + for network_interface in node.get('networkInterfaces', []): + private_ips.append(network_interface.get('networkIP')) + for access_config in network_interface.get('accessConfigs', []): + public_ips.append(access_config.get('natIP')) + + # For the node attributes, use just machine and image names, not full + # paths. Full paths are available in the "extra" dict. + if extra['image']: + image = self._get_components_from_path(extra['image'])['name'] + else: + image = None + size = self._get_components_from_path(node['machineType'])['name'] + + return Node(id=node['id'], name=node['name'], + state=self.NODE_STATE_MAP[node['status']], + public_ips=public_ips, private_ips=private_ips, + driver=self, size=size, image=image, extra=extra) + + def _to_node_size(self, machine_type): + """ + Return a Size object from the json-response dictionary. + + :param machine_type: The dictionary describing the machine. + :type machine_type: ``dict`` + + :return: Size object + :rtype: :class:`GCENodeSize` + """ + extra = {} + extra['selfLink'] = machine_type.get('selfLink') + extra['zone'] = self.ex_get_zone(machine_type['zone']) + extra['description'] = machine_type.get('description') + extra['guestCpus'] = machine_type.get('guestCpus') + extra['creationTimestamp'] = machine_type.get('creationTimestamp') + try: + price = self._get_size_price(size_id=machine_type['name']) + except KeyError: + price = None + + return GCENodeSize(id=machine_type['id'], name=machine_type['name'], + ram=machine_type.get('memoryMb'), + disk=machine_type.get('imageSpaceGb'), + bandwidth=0, price=price, driver=self, extra=extra) + + def _to_project(self, project): + """ + Return a Project object from the json-response dictionary. + + :param project: The dictionary describing the project. + :type project: ``dict`` + + :return: Project object + :rtype: :class:`GCEProject` + """ + extra = {} + extra['selfLink'] = project.get('selfLink') + extra['creationTimestamp'] = project.get('creationTimestamp') + extra['description'] = project.get('description') + metadata = project['commonInstanceMetadata'].get('items') + + return GCEProject(id=project['id'], name=project['name'], + metadata=metadata, quotas=project.get('quotas'), + driver=self, extra=extra) + + def _to_region(self, region): + """ + Return a Region object from the json-response dictionary. + + :param region: The dictionary describing the region. + :type region: ``dict`` + + :return: Region object + :rtype: :class:`GCERegion` + """ + extra = {} + extra['selfLink'] = region.get('selfLink') + extra['creationTimestamp'] = region.get('creationTimestamp') + extra['description'] = region.get('description') + + quotas = region.get('quotas') + zones = [self.ex_get_zone(z) for z in region.get('zones', [])] + # Work around a bug that will occasionally list missing zones in the + # region output + zones = [z for z in zones if z is not None] + deprecated = region.get('deprecated') + + return GCERegion(id=region['id'], name=region['name'], + status=region.get('status'), zones=zones, + quotas=quotas, deprecated=deprecated, + driver=self, extra=extra) + + def _to_snapshot(self, snapshot): + """ + Return a Snapshot object from the json-response dictionary. + + :param snapshot: The dictionary describing the snapshot + :type snapshot: ``dict`` + + :return: Snapshot object + :rtype: :class:`VolumeSnapshot` + """ + extra = {} + extra['selfLink'] = snapshot.get('selfLink') + extra['creationTimestamp'] = snapshot.get('creationTimestamp') + extra['sourceDisk'] = snapshot.get('sourceDisk') + + return GCESnapshot(id=snapshot['id'], name=snapshot['name'], + size=snapshot['diskSizeGb'], + status=snapshot.get('status'), driver=self, + extra=extra) + + def _to_storage_volume(self, volume): + """ + Return a Volume object from the json-response dictionary. + + :param volume: The dictionary describing the volume. + :type volume: ``dict`` + + :return: Volume object + :rtype: :class:`StorageVolume` + """ + extra = {} + extra['selfLink'] = volume.get('selfLink') + extra['zone'] = self.ex_get_zone(volume['zone']) + extra['status'] = volume.get('status') + extra['creationTimestamp'] = volume.get('creationTimestamp') + extra['description'] = volume.get('description') + + return StorageVolume(id=volume['id'], name=volume['name'], + size=volume['sizeGb'], driver=self, extra=extra) + + def _to_targetpool(self, targetpool): + """ + Return a Target Pool object from the json-response dictionary. + + :param targetpool: The dictionary describing the volume. + :type targetpool: ``dict`` + + :return: Target Pool object + :rtype: :class:`GCETargetPool` + """ + extra = {} + extra['selfLink'] = targetpool.get('selfLink') + extra['description'] = targetpool.get('description') + region = self.ex_get_region(targetpool['region']) + healthcheck_list = [self.ex_get_healthcheck(h.split('/')[-1]) for h + in targetpool.get('healthChecks', [])] + node_list = [] + for n in targetpool.get('instances', []): + # Nodes that do not exist can be part of a target pool. If the + # node does not exist, use the URL of the node instead of the node + # object. + comp = self._get_components_from_path(n) + try: + node = self.ex_get_node(comp['name'], comp['zone']) + except ResourceNotFoundError: + node = n + node_list.append(node) + + return GCETargetPool(id=targetpool['id'], name=targetpool['name'], + region=region, healthchecks=healthcheck_list, + nodes=node_list, driver=self, extra=extra) + + def _to_zone(self, zone): + """ + Return a Zone object from the json-response dictionary. + + :param zone: The dictionary describing the zone. + :type zone: ``dict`` + + :return: Zone object + :rtype: :class:`GCEZone` + """ + extra = {} + extra['selfLink'] = zone.get('selfLink') + extra['creationTimestamp'] = zone.get('creationTimestamp') + extra['description'] = zone.get('description') + + deprecated = zone.get('deprecated') + + return GCEZone(id=zone['id'], name=zone['name'], status=zone['status'], + maintenance_windows=zone.get('maintenanceWindows'), + deprecated=deprecated, driver=self, extra=extra) diff --git a/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json b/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json index 9b071a941e..12c6cf0398 100644 --- a/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json +++ b/libcloud/test/compute/fixtures/gce/global_snapshots_lcsnapshot.json @@ -1,7 +1,7 @@ { "creationTimestamp": "2013-12-16T13:03:51.345-08:00", "description": "", - "diskSizeGb": "1", + "diskSizeGb": "10", "id": "17482266715940883688", "kind": "compute#snapshot", "name": "lcsnapshot", @@ -9,4 +9,4 @@ "sourceDisk": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", "sourceDiskId": "-2511816066479461182", "status": "READY" -} \ No newline at end of file +} diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json index 68d40fc9a0..fc39c99c58 100644 --- a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks.json @@ -6,8 +6,10 @@ "id": "08045379695757218000", "kind": "compute#disk", "name": "lcdisk", + "description": "I'm a happy little SSD", + "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", - "sizeGb": "1", + "sizeGb": "10", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" }, @@ -17,6 +19,8 @@ "id": "0265567475385851075", "kind": "compute#disk", "name": "node-name", + "description": "I'm a happy little disk", + "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-standard", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/node-name", "sizeGb": "10", "sourceImage": "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-wheezy-v20131120", @@ -27,4 +31,4 @@ ], "kind": "compute#diskList", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks" -} \ No newline at end of file +} diff --git a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json index d88ba6ee42..af66fb8522 100644 --- a/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json +++ b/libcloud/test/compute/fixtures/gce/zones_us-central1-a_disks_lcdisk.json @@ -3,8 +3,10 @@ "id": "16109451798967042451", "kind": "compute#disk", "name": "lcdisk", + "description": "I'm a happy little SSD", + "type": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/diskTypes/pd-ssd", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a/disks/lcdisk", - "sizeGb": "1", + "sizeGb": "10", "status": "READY", "zone": "https://www.googleapis.com/compute/v1/projects/project_name/zones/us-central1-a" -} \ No newline at end of file +} diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 963675bb23..7d3855e6f6 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -343,11 +343,19 @@ def test_ex_create_volume_snapshot(self): volume = self.driver.ex_get_volume('lcdisk') snapshot = volume.snapshot(snapshot_name) self.assertEqual(snapshot.name, snapshot_name) - self.assertEqual(snapshot.size, '1') + self.assertEqual(snapshot.size, '10') + + def test_create_volume_ssd(self): + volume_name = 'lcdisk' + size = 10 + volume = self.driver.create_volume(size, volume_name, + ex_disk_type='pd-ssd') + self.assertTrue(isinstance(volume, StorageVolume)) + self.assertEqual(volume.extra['type'], 'pd-ssd') def test_create_volume(self): volume_name = 'lcdisk' - size = 1 + size = 10 volume = self.driver.create_volume(size, volume_name) self.assertTrue(isinstance(volume, StorageVolume)) self.assertEqual(volume.name, volume_name) @@ -595,15 +603,16 @@ def test_ex_get_snapshot(self): snapshot_name = 'lcsnapshot' snapshot = self.driver.ex_get_snapshot(snapshot_name) self.assertEqual(snapshot.name, snapshot_name) - self.assertEqual(snapshot.size, '1') + self.assertEqual(snapshot.size, '10') self.assertEqual(snapshot.status, 'READY') def test_ex_get_volume(self): volume_name = 'lcdisk' volume = self.driver.ex_get_volume(volume_name) self.assertEqual(volume.name, volume_name) - self.assertEqual(volume.size, '1') + self.assertEqual(volume.size, '10') self.assertEqual(volume.extra['status'], 'READY') + self.assertEqual(volume.extra['type'], 'pd-ssd') def test_ex_get_zone(self): zone_name = 'us-central1-b' From feea74d3fd76b421f05b9cda8b85221b875bdb8e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 21 Jul 2014 12:17:09 +0200 Subject: [PATCH 118/315] Also run Python 3.4 on travis. --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 302c91d0f9..1c029d25b5 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,6 +11,7 @@ env: - TOX_ENV=pypy - TOX_ENV=py32 - TOX_ENV=py33 + - TOX_ENV=py34 - TOX_ENV=docs install: From 075383509e507f88b2c105b4f5fde5b4d4af5e68 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 21 Jul 2014 15:52:19 +0200 Subject: [PATCH 119/315] Remove duplicated file. --- libcloud/compute/drivers/gce.py.orig | 3353 -------------------------- 1 file changed, 3353 deletions(-) delete mode 100644 libcloud/compute/drivers/gce.py.orig diff --git a/libcloud/compute/drivers/gce.py.orig b/libcloud/compute/drivers/gce.py.orig deleted file mode 100644 index c216c64179..0000000000 --- a/libcloud/compute/drivers/gce.py.orig +++ /dev/null @@ -1,3353 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -Module for Google Compute Engine Driver. -""" -from __future__ import with_statement - -import datetime -import time -import sys - -from libcloud.common.google import GoogleResponse -from libcloud.common.google import GoogleBaseConnection -from libcloud.common.google import GoogleBaseError -from libcloud.common.google import ResourceNotFoundError -from libcloud.common.google import ResourceExistsError - -from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation -from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot -from libcloud.compute.base import UuidMixin -from libcloud.compute.providers import Provider -from libcloud.compute.types import NodeState - -API_VERSION = 'v1' -DEFAULT_TASK_COMPLETION_TIMEOUT = 180 - - -def timestamp_to_datetime(timestamp): - """ - Return a datetime object that corresponds to the time in an RFC3339 - timestamp. - - :param timestamp: RFC3339 timestamp string - :type timestamp: ``str`` - - :return: Datetime object corresponding to timestamp - :rtype: :class:`datetime.datetime` - """ - # We remove timezone offset and microseconds (Python 2.5 strptime doesn't - # support %f) - ts = datetime.datetime.strptime(timestamp[:-10], '%Y-%m-%dT%H:%M:%S') - tz_hours = int(timestamp[-5:-3]) - tz_mins = int(timestamp[-2:]) * int(timestamp[-6:-5] + '1') - tz_delta = datetime.timedelta(hours=tz_hours, minutes=tz_mins) - return ts + tz_delta - - -class GCEResponse(GoogleResponse): - pass - - -class GCEConnection(GoogleBaseConnection): - """Connection class for the GCE driver.""" - host = 'www.googleapis.com' - responseCls = GCEResponse - - def __init__(self, user_id, key, secure, auth_type=None, - credential_file=None, project=None, **kwargs): - super(GCEConnection, self).__init__(user_id, key, secure=secure, - auth_type=auth_type, - credential_file=credential_file, - **kwargs) - self.request_path = '/compute/%s/projects/%s' % (API_VERSION, - project) - - -class GCEAddress(UuidMixin): - """A GCE Static address.""" - def __init__(self, id, name, address, region, driver, extra=None): - self.id = str(id) - self.name = name - self.address = address - self.region = region - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def destroy(self): - """ - Destroy this address. - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_destroy_address(address=self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.address) - - -class GCEFailedDisk(object): - """Dummy Node object for disks that are not created.""" - def __init__(self, name, error, code): - self.name = name - self.error = error - self.code = code - - def __repr__(self): - return '' % ( - self.name, self.code) - - -class GCEFailedNode(object): - """Dummy Node object for nodes that are not created.""" - def __init__(self, name, error, code): - self.name = name - self.error = error - self.code = code - - def __repr__(self): - return '' % ( - self.name, self.code) - - -class GCEHealthCheck(UuidMixin): - """A GCE Http Health Check class.""" - def __init__(self, id, name, path, port, interval, timeout, - unhealthy_threshold, healthy_threshold, driver, extra=None): - self.id = str(id) - self.name = name - self.path = path - self.port = port - self.interval = interval - self.timeout = timeout - self.unhealthy_threshold = unhealthy_threshold - self.healthy_threshold = healthy_threshold - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def destroy(self): - """ - Destroy this Health Check. - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_destroy_healthcheck(healthcheck=self) - - def update(self): - """ - Commit updated healthcheck values. - - :return: Updated Healthcheck object - :rtype: :class:`GCEHealthcheck` - """ - return self.driver.ex_update_healthcheck(healthcheck=self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.path, self.port) - - -class GCEFirewall(UuidMixin): - """A GCE Firewall rule class.""" - def __init__(self, id, name, allowed, network, source_ranges, source_tags, - target_tags, driver, extra=None): - self.id = str(id) - self.name = name - self.network = network - self.allowed = allowed - self.source_ranges = source_ranges - self.source_tags = source_tags - self.target_tags = target_tags - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def destroy(self): - """ - Destroy this firewall. - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_destroy_firewall(firewall=self) - - def update(self): - """ - Commit updated firewall values. - - :return: Updated Firewall object - :rtype: :class:`GCEFirewall` - """ - return self.driver.ex_update_firewall(firewall=self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.network.name) - - -class GCEForwardingRule(UuidMixin): - def __init__(self, id, name, region, address, protocol, targetpool, driver, - extra=None): - self.id = str(id) - self.name = name - self.region = region - self.address = address - self.protocol = protocol - self.targetpool = targetpool - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def destroy(self): - """ - Destroy this Forwarding Rule - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_destroy_forwarding_rule(forwarding_rule=self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.address) - - -class GCENodeImage(NodeImage): - """A GCE Node Image class.""" - def __init__(self, id, name, driver, extra=None): - super(GCENodeImage, self).__init__(id, name, driver, extra=extra) - - def delete(self): - """ - Delete this image - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_delete_image(image=self) - - def deprecate(self, replacement, state): - """ - Deprecate this image - - :param replacement: Image to use as a replacement - :type replacement: ``str`` or :class: `GCENodeImage` - - :param state: Deprecation state of this image. Possible values include - \'DELETED\', \'DEPRECATED\' or \'OBSOLETE\'. - :type state: ``str`` - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_deprecate_image(self, replacement, state) - - -class GCENetwork(UuidMixin): - """A GCE Network object class.""" - def __init__(self, id, name, cidr, driver, extra=None): - self.id = str(id) - self.name = name - self.cidr = cidr - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def destroy(self): - """ - Destroy this network - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_destroy_network(network=self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.cidr) - - -class GCENodeSize(NodeSize): - """A GCE Node Size (MachineType) class.""" - def __init__(self, id, name, ram, disk, bandwidth, price, driver, - extra=None): - self.extra = extra - super(GCENodeSize, self).__init__(id, name, ram, disk, bandwidth, - price, driver, extra=extra) - - -class GCEProject(UuidMixin): - """GCE Project information.""" - def __init__(self, id, name, metadata, quotas, driver, extra=None): - self.id = str(id) - self.name = name - self.metadata = metadata - self.quotas = quotas - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def __repr__(self): - return '' % (self.id, self.name) - - -class GCERegion(UuidMixin): - def __init__(self, id, name, status, zones, quotas, deprecated, driver, - extra=None): - self.id = str(id) - self.name = name - self.status = status - self.zones = zones - self.quotas = quotas - self.deprecated = deprecated - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.status) - - -class GCESnapshot(VolumeSnapshot): - def __init__(self, id, name, size, status, driver, extra=None): - self.name = name - self.status = status - super(GCESnapshot, self).__init__(id, driver, size, extra) - - -class GCETargetPool(UuidMixin): - def __init__(self, id, name, region, healthchecks, nodes, driver, - extra=None): - self.id = str(id) - self.name = name - self.region = region - self.healthchecks = healthchecks - self.nodes = nodes - self.driver = driver - self.extra = extra - UuidMixin.__init__(self) - - def add_node(self, node): - """ - Add a node to this target pool. - - :param node: Node to add - :type node: ``str`` or :class:`Node` - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_targetpool_add_node(targetpool=self, node=node) - - def remove_node(self, node): - """ - Remove a node from this target pool. - - :param node: Node to remove - :type node: ``str`` or :class:`Node` - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_targetpool_remove_node(targetpool=self, - node=node) - - def add_healthcheck(self, healthcheck): - """ - Add a healthcheck to this target pool. - - :param healthcheck: Healthcheck to add - :type healthcheck: ``str`` or :class:`GCEHealthCheck` - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_targetpool_add_healthcheck( - targetpool=self, healthcheck=healthcheck) - - def remove_healthcheck(self, healthcheck): - """ - Remove a healthcheck from this target pool. - - :param healthcheck: Healthcheck to remove - :type healthcheck: ``str`` or :class:`GCEHealthCheck` - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_targetpool_remove_healthcheck( - targetpool=self, healthcheck=healthcheck) - - def destroy(self): - """ - Destroy this Target Pool - - :return: True if successful - :rtype: ``bool`` - """ - return self.driver.ex_destroy_targetpool(targetpool=self) - - def __repr__(self): - return '' % ( - self.id, self.name, self.region.name) - - -class GCEZone(NodeLocation): - """Subclass of NodeLocation to provide additional information.""" - def __init__(self, id, name, status, maintenance_windows, deprecated, - driver, extra=None): - self.status = status - self.maintenance_windows = maintenance_windows - self.deprecated = deprecated - self.extra = extra - country = name.split('-')[0] - super(GCEZone, self).__init__(id=str(id), name=name, country=country, - driver=driver) - - @property - def time_until_mw(self): - """ - Returns the time until the next Maintenance Window as a - datetime.timedelta object. - """ - return self._get_time_until_mw() - - @property - def next_mw_duration(self): - """ - Returns the duration of the next Maintenance Window as a - datetime.timedelta object. - """ - return self._get_next_mw_duration() - - def _now(self): - """ - Returns current UTC time. - - Can be overridden in unittests. - """ - return datetime.datetime.utcnow() - - def _get_next_maint(self): - """ - Returns the next Maintenance Window. - - :return: A dictionary containing maintenance window info (or None if - no maintenance windows are scheduled) - The dictionary contains 4 keys with values of type ``str`` - - name: The name of the maintenance window - - description: Description of the maintenance window - - beginTime: RFC3339 Timestamp - - endTime: RFC3339 Timestamp - :rtype: ``dict`` or ``None`` - """ - begin = None - next_window = None - if not self.maintenance_windows: - return None - if len(self.maintenance_windows) == 1: - return self.maintenance_windows[0] - for mw in self.maintenance_windows: - begin_next = timestamp_to_datetime(mw['beginTime']) - if (not begin) or (begin_next < begin): - begin = begin_next - next_window = mw - return next_window - - def _get_time_until_mw(self): - """ - Returns time until next maintenance window. - - :return: Time until next maintenance window (or None if no - maintenance windows are scheduled) - :rtype: :class:`datetime.timedelta` or ``None`` - """ - next_window = self._get_next_maint() - if not next_window: - return None - now = self._now() - next_begin = timestamp_to_datetime(next_window['beginTime']) - return next_begin - now - - def _get_next_mw_duration(self): - """ - Returns the duration of the next maintenance window. - - :return: Duration of next maintenance window (or None if no - maintenance windows are scheduled) - :rtype: :class:`datetime.timedelta` or ``None`` - """ - next_window = self._get_next_maint() - if not next_window: - return None - next_begin = timestamp_to_datetime(next_window['beginTime']) - next_end = timestamp_to_datetime(next_window['endTime']) - return next_end - next_begin - - def __repr__(self): - return '' % (self.id, self.name, - self.status) - - -class GCENodeDriver(NodeDriver): - """ - GCE Node Driver class. - - This is the primary driver for interacting with Google Compute Engine. It - contains all of the standard libcloud methods, plus additional ex_* methods - for more features. - - Note that many methods allow either objects or strings (or lists of - objects/strings). In most cases, passing strings instead of objects will - result in additional GCE API calls. - """ - connectionCls = GCEConnection - api_name = 'googleapis' - name = "Google Compute Engine" - type = Provider.GCE - website = 'https://cloud.google.com/' - - NODE_STATE_MAP = { - "PROVISIONING": NodeState.PENDING, - "STAGING": NodeState.PENDING, - "RUNNING": NodeState.RUNNING, - "STOPPED": NodeState.TERMINATED, - "STOPPING": NodeState.TERMINATED, - "TERMINATED": NodeState.TERMINATED - } - - def __init__(self, user_id, key, datacenter=None, project=None, - auth_type=None, scopes=None, **kwargs): - """ - :param user_id: The email address (for service accounts) or Client ID - (for installed apps) to be used for authentication. - :type user_id: ``str`` - - :param key: The RSA Key (for service accounts) or file path containing - key or Client Secret (for installed apps) to be used for - authentication. - :type key: ``str`` - - :keyword datacenter: The name of the datacenter (zone) used for - operations. - :type datacenter: ``str`` - - :keyword project: Your GCE project name. (required) - :type project: ``str`` - - :keyword auth_type: Accepted values are "SA" or "IA" - ("Service Account" or "Installed Application"). - If not supplied, auth_type will be guessed based - on value of user_id. - :type auth_type: ``str`` - - :keyword scopes: List of authorization URLs. Default is empty and - grants read/write to Compute, Storage, DNS. - :type scopes: ``list`` - """ - self.auth_type = auth_type - self.project = project - self.scopes = scopes - if not self.project: - raise ValueError('Project name must be specified using ' - '"project" keyword.') - super(GCENodeDriver, self).__init__(user_id, key, **kwargs) - - # Cache Zone and Region information to reduce API calls and - # increase speed - self.base_path = '/compute/%s/projects/%s' % (API_VERSION, - self.project) - self.zone_list = self.ex_list_zones() - self.zone_dict = {} - for zone in self.zone_list: - self.zone_dict[zone.name] = zone - if datacenter: - self.zone = self.ex_get_zone(datacenter) - else: - self.zone = None - - self.region_list = self.ex_list_regions() - self.region_dict = {} - for region in self.region_list: - self.region_dict[region.name] = region - - if self.zone: - self.region = self._get_region_from_zone(self.zone) - else: - self.region = None - - def ex_list_addresses(self, region=None): - """ - Return a list of static addresses for a region or all. - - :keyword region: The region to return addresses from. For example: - 'us-central1'. If None, will return addresses from - region of self.zone. If 'all', will return all - addresses. - :type region: ``str`` or ``None`` - - :return: A list of static address objects. - :rtype: ``list`` of :class:`GCEAddress` - """ - list_addresses = [] - region = self._set_region(region) - if region is None: - request = '/aggregated/addresses' - else: - request = '/regions/%s/addresses' % (region.name) - response = self.connection.request(request, method='GET').object - - if 'items' in response: - # The aggregated result returns dictionaries for each region - if region is None: - for v in response['items'].values(): - region_addresses = [self._to_address(a) for a in - v.get('addresses', [])] - list_addresses.extend(region_addresses) - else: - list_addresses = [self._to_address(a) for a in - response['items']] - return list_addresses - - def ex_list_healthchecks(self): - """ - Return the list of health checks. - - :return: A list of health check objects. - :rtype: ``list`` of :class:`GCEHealthCheck` - """ - list_healthchecks = [] - request = '/global/httpHealthChecks' - response = self.connection.request(request, method='GET').object - list_healthchecks = [self._to_healthcheck(h) for h in - response.get('items', [])] - return list_healthchecks - - def ex_list_firewalls(self): - """ - Return the list of firewalls. - - :return: A list of firewall objects. - :rtype: ``list`` of :class:`GCEFirewall` - """ - list_firewalls = [] - request = '/global/firewalls' - response = self.connection.request(request, method='GET').object - list_firewalls = [self._to_firewall(f) for f in - response.get('items', [])] - return list_firewalls - - def ex_list_forwarding_rules(self, region=None): - """ - Return the list of forwarding rules for a region or all. - - :keyword region: The region to return forwarding rules from. For - example: 'us-central1'. If None, will return - forwarding rules from the region of self.region - (which is based on self.zone). If 'all', will - return all forwarding rules. - :type region: ``str`` or :class:`GCERegion` or ``None`` - - :return: A list of forwarding rule objects. - :rtype: ``list`` of :class:`GCEForwardingRule` - """ - list_forwarding_rules = [] - region = self._set_region(region) - if region is None: - request = '/aggregated/forwardingRules' - else: - request = '/regions/%s/forwardingRules' % (region.name) - response = self.connection.request(request, method='GET').object - - if 'items' in response: - # The aggregated result returns dictionaries for each region - if region is None: - for v in response['items'].values(): - region_forwarding_rules = [self._to_forwarding_rule(f) for - f in v.get('forwardingRules', - [])] - list_forwarding_rules.extend(region_forwarding_rules) - else: - list_forwarding_rules = [self._to_forwarding_rule(f) for f in - response['items']] - return list_forwarding_rules - - def list_images(self, ex_project=None): - """ - Return a list of image objects for a project. - - :keyword ex_project: Optional alternate project name. - :type ex_project: ``str`` or ``None`` - - :return: List of GCENodeImage objects - :rtype: ``list`` of :class:`GCENodeImage` - """ - request = '/global/images' - if ex_project is None: - response = self.connection.request(request, method='GET').object - else: - # Save the connection request_path - save_request_path = self.connection.request_path - # Override the connection request path - new_request_path = save_request_path.replace(self.project, - ex_project) - self.connection.request_path = new_request_path - response = self.connection.request(request, method='GET').object - # Restore the connection request_path - self.connection.request_path = save_request_path - list_images = [self._to_node_image(i) for i in - response.get('items', [])] - return list_images - - def list_locations(self): - """ - Return a list of locations (zones). - - The :class:`ex_list_zones` method returns more comprehensive results, - but this is here for compatibility. - - :return: List of NodeLocation objects - :rtype: ``list`` of :class:`NodeLocation` - """ - list_locations = [] - request = '/zones' - response = self.connection.request(request, method='GET').object - list_locations = [self._to_node_location(l) for l in response['items']] - return list_locations - - def ex_list_networks(self): - """ - Return the list of networks. - - :return: A list of network objects. - :rtype: ``list`` of :class:`GCENetwork` - """ - list_networks = [] - request = '/global/networks' - response = self.connection.request(request, method='GET').object - list_networks = [self._to_network(n) for n in - response.get('items', [])] - return list_networks - - def list_nodes(self, ex_zone=None): - """ - Return a list of nodes in the current zone or all zones. - - :keyword ex_zone: Optional zone name or 'all' - :type ex_zone: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :return: List of Node objects - :rtype: ``list`` of :class:`Node` - """ - list_nodes = [] - zone = self._set_zone(ex_zone) - if zone is None: - request = '/aggregated/instances' - else: - request = '/zones/%s/instances' % (zone.name) - - response = self.connection.request(request, method='GET').object - - if 'items' in response: - # The aggregated response returns a dict for each zone - if zone is None: - for v in response['items'].values(): - zone_nodes = [self._to_node(i) for i in - v.get('instances', [])] - list_nodes.extend(zone_nodes) - else: - list_nodes = [self._to_node(i) for i in response['items']] - return list_nodes - - def ex_list_regions(self): - """ - Return the list of regions. - - :return: A list of region objects. - :rtype: ``list`` of :class:`GCERegion` - """ - list_regions = [] - request = '/regions' - response = self.connection.request(request, method='GET').object - list_regions = [self._to_region(r) for r in response['items']] - return list_regions - - def list_sizes(self, location=None): - """ - Return a list of sizes (machineTypes) in a zone. - - :keyword location: Location or Zone for sizes - :type location: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :return: List of GCENodeSize objects - :rtype: ``list`` of :class:`GCENodeSize` - """ - list_sizes = [] - zone = self._set_zone(location) - if zone is None: - request = '/aggregated/machineTypes' - else: - request = '/zones/%s/machineTypes' % (zone.name) - - response = self.connection.request(request, method='GET').object - - if 'items' in response: - # The aggregated response returns a dict for each zone - if zone is None: - for v in response['items'].values(): - zone_sizes = [self._to_node_size(s) for s in - v.get('machineTypes', [])] - list_sizes.extend(zone_sizes) - else: - list_sizes = [self._to_node_size(s) for s in response['items']] - return list_sizes - - def ex_list_snapshots(self): - """ - Return the list of disk snapshots in the project. - - :return: A list of snapshot objects - :rtype: ``list`` of :class:`GCESnapshot` - """ - list_snapshots = [] - request = '/global/snapshots' - response = self.connection.request(request, method='GET').object - list_snapshots = [self._to_snapshot(s) for s in - response.get('items', [])] - return list_snapshots - - def ex_list_targetpools(self, region=None): - """ - Return the list of target pools. - - :return: A list of target pool objects - :rtype: ``list`` of :class:`GCETargetPool` - """ - list_targetpools = [] - region = self._set_region(region) - if region is None: - request = '/aggregated/targetPools' - else: - request = '/regions/%s/targetPools' % (region.name) - response = self.connection.request(request, method='GET').object - - if 'items' in response: - # The aggregated result returns dictionaries for each region - if region is None: - for v in response['items'].values(): - region_targetpools = [self._to_targetpool(t) for t in - v.get('targetPools', [])] - list_targetpools.extend(region_targetpools) - else: - list_targetpools = [self._to_targetpool(t) for t in - response['items']] - return list_targetpools - - def list_volumes(self, ex_zone=None): - """ - Return a list of volumes for a zone or all. - - Will return list from provided zone, or from the default zone unless - given the value of 'all'. - - :keyword ex_zone: The zone to return volumes from. - :type ex_zone: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :return: A list of volume objects. - :rtype: ``list`` of :class:`StorageVolume` - """ - list_volumes = [] - zone = self._set_zone(ex_zone) - if zone is None: - request = '/aggregated/disks' - else: - request = '/zones/%s/disks' % (zone.name) - - response = self.connection.request(request, method='GET').object - if 'items' in response: - # The aggregated response returns a dict for each zone - if zone is None: - for v in response['items'].values(): - zone_volumes = [self._to_storage_volume(d) for d in - v.get('disks', [])] - list_volumes.extend(zone_volumes) - else: - list_volumes = [self._to_storage_volume(d) for d in - response['items']] - return list_volumes - - def ex_list_zones(self): - """ - Return the list of zones. - - :return: A list of zone objects. - :rtype: ``list`` of :class:`GCEZone` - """ - list_zones = [] - request = '/zones' - response = self.connection.request(request, method='GET').object - list_zones = [self._to_zone(z) for z in response['items']] - return list_zones - - def ex_create_address(self, name, region=None): - """ - Create a static address in a region. - - :param name: Name of static address - :type name: ``str`` - - :keyword region: Name of region for the address (e.g. 'us-central1') - :type region: ``str`` or :class:`GCERegion` - - :return: Static Address object - :rtype: :class:`GCEAddress` - """ - region = region or self.region - if not hasattr(region, 'name'): - region = self.ex_get_region(region) - elif region is None: - raise ValueError('REGION_NOT_SPECIFIED', - 'Region must be provided for an address') - address_data = {'name': name} - request = '/regions/%s/addresses' % (region.name) - self.connection.async_request(request, method='POST', - data=address_data) - return self.ex_get_address(name, region=region) - - def ex_create_healthcheck(self, name, host=None, path=None, port=None, - interval=None, timeout=None, - unhealthy_threshold=None, - healthy_threshold=None): - """ - Create an Http Health Check. - - :param name: Name of health check - :type name: ``str`` - - :keyword host: Hostname of health check request. Defaults to empty - and public IP is used instead. - :type host: ``str`` - - :keyword path: The request path for the check. Defaults to /. - :type path: ``str`` - - :keyword port: The TCP port number for the check. Defaults to 80. - :type port: ``int`` - - :keyword interval: How often (in seconds) to check. Defaults to 5. - :type interval: ``int`` - - :keyword timeout: How long to wait before failing. Defaults to 5. - :type timeout: ``int`` - - :keyword unhealthy_threshold: How many failures before marking - unhealthy. Defaults to 2. - :type unhealthy_threshold: ``int`` - - :keyword healthy_threshold: How many successes before marking as - healthy. Defaults to 2. - :type healthy_threshold: ``int`` - - :return: Health Check object - :rtype: :class:`GCEHealthCheck` - """ - hc_data = {} - hc_data['name'] = name - if host: - hc_data['host'] = host - # As of right now, the 'default' values aren't getting set when called - # through the API, so set them explicitly - hc_data['requestPath'] = path or '/' - hc_data['port'] = port or 80 - hc_data['checkIntervalSec'] = interval or 5 - hc_data['timeoutSec'] = timeout or 5 - hc_data['unhealthyThreshold'] = unhealthy_threshold or 2 - hc_data['healthyThreshold'] = healthy_threshold or 2 - - request = '/global/httpHealthChecks' - - self.connection.async_request(request, method='POST', data=hc_data) - return self.ex_get_healthcheck(name) - - def ex_create_firewall(self, name, allowed, network='default', - source_ranges=None, source_tags=None, - target_tags=None): - """ - Create a firewall on a network. - - Firewall rules should be supplied in the "allowed" field. This is a - list of dictionaries formated like so ("ports" is optional):: - - [{"IPProtocol": "", - "ports": ""}] - - For example, to allow tcp on port 8080 and udp on all ports, 'allowed' - would be:: - - [{"IPProtocol": "tcp", - "ports": ["8080"]}, - {"IPProtocol": "udp"}] - - See `Firewall Reference `_ for more information. - - :param name: Name of the firewall to be created - :type name: ``str`` - - :param allowed: List of dictionaries with rules - :type allowed: ``list`` of ``dict`` - - :keyword network: The network that the firewall applies to. - :type network: ``str`` or :class:`GCENetwork` - - :keyword source_ranges: A list of IP ranges in CIDR format that the - firewall should apply to. Defaults to - ['0.0.0.0/0'] - :type source_ranges: ``list`` of ``str`` - - :keyword source_tags: A list of source instance tags the rules apply - to. - :type source_tags: ``list`` of ``str`` - - :keyword target_tags: A list of target instance tags the rules apply - to. - :type target_tags: ``list`` of ``str`` - - :return: Firewall object - :rtype: :class:`GCEFirewall` - """ - firewall_data = {} - if not hasattr(network, 'name'): - nw = self.ex_get_network(network) - else: - nw = network - - firewall_data['name'] = name - firewall_data['allowed'] = allowed - firewall_data['network'] = nw.extra['selfLink'] - firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0'] - if source_tags is not None: - firewall_data['sourceTags'] = source_tags - if target_tags is not None: - firewall_data['targetTags'] = target_tags - - request = '/global/firewalls' - - self.connection.async_request(request, method='POST', - data=firewall_data) - return self.ex_get_firewall(name) - - def ex_create_forwarding_rule(self, name, targetpool, region=None, - protocol='tcp', port_range=None, - address=None): - """ - Create a forwarding rule. - - :param name: Name of forwarding rule to be created - :type name: ``str`` - - :param targetpool: Target pool to apply the rule to - :param targetpool: ``str`` or :class:`GCETargetPool` - - :keyword region: Region to create the forwarding rule in. Defaults to - self.region - :type region: ``str`` or :class:`GCERegion` - - :keyword protocol: Should be 'tcp' or 'udp' - :type protocol: ``str`` - - :keyword port_range: Optional single port number or range separated - by a dash. Examples: '80', '5000-5999'. - :type port_range: ``str`` - - :keyword address: Optional static address for forwarding rule. Must be - in same region. - :type address: ``str`` or :class:`GCEAddress` - - :return: Forwarding Rule object - :rtype: :class:`GCEForwardingRule` - """ - forwarding_rule_data = {} - region = region or self.region - if not hasattr(region, 'name'): - region = self.ex_get_region(region) - if not hasattr(targetpool, 'name'): - targetpool = self.ex_get_targetpool(targetpool, region) - - forwarding_rule_data['name'] = name - forwarding_rule_data['region'] = region.extra['selfLink'] - forwarding_rule_data['target'] = targetpool.extra['selfLink'] - forwarding_rule_data['protocol'] = protocol.upper() - if address: - if not hasattr(address, 'name'): - address = self.ex_get_address(address, region) - forwarding_rule_data['IPAddress'] = address.extra['selfLink'] - if port_range: - forwarding_rule_data['portRange'] = port_range - - request = '/regions/%s/forwardingRules' % (region.name) - - self.connection.async_request(request, method='POST', - data=forwarding_rule_data) - - return self.ex_get_forwarding_rule(name) - - def ex_create_network(self, name, cidr): - """ - Create a network. - - :param name: Name of network to be created - :type name: ``str`` - - :param cidr: Address range of network in CIDR format. - :type cidr: ``str`` - - :return: Network object - :rtype: :class:`GCENetwork` - """ - network_data = {} - network_data['name'] = name - network_data['IPv4Range'] = cidr - - request = '/global/networks' - - self.connection.async_request(request, method='POST', - data=network_data) - - return self.ex_get_network(name) - - def create_node(self, name, size, image, location=None, - ex_network='default', ex_tags=None, ex_metadata=None, - ex_boot_disk=None, use_existing_disk=True, - external_ip='ephemeral'): - """ - Create a new node and return a node object for the node. - - :param name: The name of the node to create. - :type name: ``str`` - - :param size: The machine type to use. - :type size: ``str`` or :class:`GCENodeSize` - - :param image: The image to use to create the node (or, if attaching - a persistent disk, the image used to create the disk) - :type image: ``str`` or :class:`GCENodeImage` - - :keyword location: The location (zone) to create the node in. - :type location: ``str`` or :class:`NodeLocation` or - :class:`GCEZone` or ``None`` - - :keyword ex_network: The network to associate with the node. - :type ex_network: ``str`` or :class:`GCENetwork` - - :keyword ex_tags: A list of tags to associate with the node. - :type ex_tags: ``list`` of ``str`` or ``None`` - - :keyword ex_metadata: Metadata dictionary for instance. - :type ex_metadata: ``dict`` or ``None`` - - :keyword ex_boot_disk: The boot disk to attach to the instance. - :type ex_boot_disk: :class:`StorageVolume` or ``str`` - - :keyword use_existing_disk: If True and if an existing disk with the - same name/location is found, use that - disk instead of creating a new one. - :type use_existing_disk: ``bool`` - - :keyword external_ip: The external IP address to use. If 'ephemeral' - (default), a new non-static address will be - used. If 'None', then no external address will - be used. To use an existing static IP address, - a GCEAddress object should be passed in. - :type external_ip: :class:`GCEAddress` or ``str`` or None - - :return: A Node object for the new node. - :rtype: :class:`Node` - """ - location = location or self.zone - if not hasattr(location, 'name'): - location = self.ex_get_zone(location) - if not hasattr(size, 'name'): - size = self.ex_get_size(size, location) - if not hasattr(ex_network, 'name'): - ex_network = self.ex_get_network(ex_network) - if not hasattr(image, 'name'): - image = self.ex_get_image(image) - - if not ex_boot_disk: - ex_boot_disk = self.create_volume(None, name, location=location, - image=image, - use_existing=use_existing_disk) - - if ex_metadata is not None: - ex_metadata = {"items": [{"key": k, "value": v} - for k, v in ex_metadata.items()]} - - request, node_data = self._create_node_req(name, size, image, - location, ex_network, - ex_tags, ex_metadata, - ex_boot_disk, external_ip) - self.connection.async_request(request, method='POST', data=node_data) - - return self.ex_get_node(name, location.name) - - def ex_create_multiple_nodes(self, base_name, size, image, number, - location=None, ex_network='default', - ex_tags=None, ex_metadata=None, - ignore_errors=True, use_existing_disk=True, - poll_interval=2, external_ip='ephemeral', - timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): - """ - Create multiple nodes and return a list of Node objects. - - Nodes will be named with the base name and a number. For example, if - the base name is 'libcloud' and you create 3 nodes, they will be - named:: - - libcloud-000 - libcloud-001 - libcloud-002 - - :param base_name: The base name of the nodes to create. - :type base_name: ``str`` - - :param size: The machine type to use. - :type size: ``str`` or :class:`GCENodeSize` - - :param image: The image to use to create the nodes. - :type image: ``str`` or :class:`GCENodeImage` - - :param number: The number of nodes to create. - :type number: ``int`` - - :keyword location: The location (zone) to create the nodes in. - :type location: ``str`` or :class:`NodeLocation` or - :class:`GCEZone` or ``None`` - - :keyword ex_network: The network to associate with the nodes. - :type ex_network: ``str`` or :class:`GCENetwork` - - :keyword ex_tags: A list of tags to assiciate with the nodes. - :type ex_tags: ``list`` of ``str`` or ``None`` - - :keyword ex_metadata: Metadata dictionary for instances. - :type ex_metadata: ``dict`` or ``None`` - - :keyword ignore_errors: If True, don't raise Exceptions if one or - more nodes fails. - :type ignore_errors: ``bool`` - - :keyword use_existing_disk: If True and if an existing disk with the - same name/location is found, use that - disk instead of creating a new one. - :type use_existing_disk: ``bool`` - - :keyword poll_interval: Number of seconds between status checks. - :type poll_interval: ``int`` - - :keyword external_ip: The external IP address to use. If 'ephemeral' - (default), a new non-static address will be - used. If 'None', then no external address will - be used. (Static addresses are not supported for - multiple node creation.) - :type external_ip: ``str`` or None - - :keyword timeout: The number of seconds to wait for all nodes to be - created before timing out. - :type timeout: ``int`` - - :return: A list of Node objects for the new nodes. - :rtype: ``list`` of :class:`Node` - """ - location = location or self.zone - if not hasattr(location, 'name'): - location = self.ex_get_zone(location) - if not hasattr(size, 'name'): - size = self.ex_get_size(size, location) - if not hasattr(ex_network, 'name'): - ex_network = self.ex_get_network(ex_network) - if not hasattr(image, 'name'): - image = self.ex_get_image(image) - - node_attrs = {'size': size, - 'image': image, - 'location': location, - 'network': ex_network, - 'tags': ex_tags, - 'metadata': ex_metadata, - 'ignore_errors': ignore_errors, - 'use_existing_disk': use_existing_disk, - 'external_ip': external_ip} - - # List for holding the status information for disk/node creation. - status_list = [] - - for i in range(number): - name = '%s-%03d' % (base_name, i) - - status = {'name': name, - 'node_response': None, - 'node': None, - 'disk_response': None, - 'disk': None} - - status_list.append(status) - - # Create disks for nodes - for status in status_list: - self._multi_create_disk(status, node_attrs) - - start_time = time.time() - complete = False - while not complete: - if (time.time() - start_time >= timeout): - raise Exception("Timeout (%s sec) while waiting for multiple " - "instances") - complete = True - time.sleep(poll_interval) - for status in status_list: - # If disk does not yet exist, check on its status - if not status['disk']: - self._multi_check_disk(status, node_attrs) - - # If disk exists, but node does not, create the node or check - # on its status if already in progress. - if status['disk'] and not status['node']: - if not status['node_response']: - self._multi_create_node(status, node_attrs) - else: - self._multi_check_node(status, node_attrs) - # If any of the nodes have not been created (or failed) we are - # not done yet. - if not status['node']: - complete = False - - # Return list of nodes - node_list = [] - for status in status_list: - node_list.append(status['node']) - return node_list - - def ex_create_targetpool(self, name, region=None, healthchecks=None, - nodes=None): - """ - Create a target pool. - - :param name: Name of target pool - :type name: ``str`` - - :keyword region: Region to create the target pool in. Defaults to - self.region - :type region: ``str`` or :class:`GCERegion` or ``None`` - - :keyword healthchecks: Optional list of health checks to attach - :type healthchecks: ``list`` of ``str`` or :class:`GCEHealthCheck` - - :keyword nodes: Optional list of nodes to attach to the pool - :type nodes: ``list`` of ``str`` or :class:`Node` - - :return: Target Pool object - :rtype: :class:`GCETargetPool` - """ - region = region or self.region - targetpool_data = {} - targetpool_data['name'] = name - if not hasattr(region, 'name'): - region = self.ex_get_region(region) - targetpool_data['region'] = region.extra['selfLink'] - - if healthchecks: - if not hasattr(healthchecks[0], 'name'): - hc_list = [self.ex_get_healthcheck(h).extra['selfLink'] for h - in healthchecks] - else: - hc_list = [h.extra['selfLink'] for h in healthchecks] - targetpool_data['healthChecks'] = hc_list - if nodes: - if not hasattr(nodes[0], 'name'): - node_list = [self.ex_get_node(n, 'all').extra['selfLink'] for n - in nodes] - else: - node_list = [n.extra['selfLink'] for n in nodes] - targetpool_data['instances'] = node_list - - request = '/regions/%s/targetPools' % (region.name) - - self.connection.async_request(request, method='POST', - data=targetpool_data) - - return self.ex_get_targetpool(name, region) - - def create_volume(self, size, name, location=None, snapshot=None, - image=None, use_existing=True): - """ - Create a volume (disk). - - :param size: Size of volume to create (in GB). Can be None if image - or snapshot is supplied. - :type size: ``int`` or ``str`` or ``None`` - - :param name: Name of volume to create - :type name: ``str`` - - :keyword location: Location (zone) to create the volume in - :type location: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :keyword snapshot: Snapshot to create image from - :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` - - :keyword image: Image to create disk from. - :type image: :class:`GCENodeImage` or ``str`` or ``None`` - - :keyword use_existing: If True and a disk with the given name already - exists, return an object for that disk instead - of attempting to create a new disk. - :type use_existing: ``bool`` - - :return: Storage Volume object - :rtype: :class:`StorageVolume` - """ - request, volume_data, params = self._create_vol_req( - size, name, location, snapshot, image) - try: - self.connection.async_request(request, method='POST', - data=volume_data, params=params) - except ResourceExistsError: - e = sys.exc_info()[1] - if not use_existing: - raise e - - return self.ex_get_volume(name, location) - - def create_volume_snapshot(self, volume, name): - """ - Create a snapshot of the provided Volume. - - :param volume: A StorageVolume object - :type volume: :class:`StorageVolume` - - :return: A GCE Snapshot object - :rtype: :class:`GCESnapshot` - """ - snapshot_data = {} - snapshot_data['name'] = name - request = '/zones/%s/disks/%s/createSnapshot' % ( - volume.extra['zone'].name, volume.name) - self.connection.async_request(request, method='POST', - data=snapshot_data) - - return self.ex_get_snapshot(name) - - def list_volume_snapshots(self, volume): - """ - List snapshots created from the provided volume. - - For GCE, snapshots are global, but while the volume they were - created from still exists, the source disk for the snapshot is - tracked. - - :param volume: A StorageVolume object - :type volume: :class:`StorageVolume` - - :return: A list of Snapshot objects - :rtype: ``list`` of :class:`GCESnapshot` - """ - volume_snapshots = [] - volume_link = volume.extra['selfLink'] - all_snapshots = self.ex_list_snapshots() - for snapshot in all_snapshots: - if snapshot.extra['sourceDisk'] == volume_link: - volume_snapshots.append(snapshot) - return volume_snapshots - - def ex_update_healthcheck(self, healthcheck): - """ - Update a health check with new values. - - To update, change the attributes of the health check object and pass - the updated object to the method. - - :param healthcheck: A healthcheck object with updated values. - :type healthcheck: :class:`GCEHealthCheck` - - :return: An object representing the new state of the health check. - :rtype: :class:`GCEHealthCheck` - """ - hc_data = {} - hc_data['name'] = healthcheck.name - hc_data['requestPath'] = healthcheck.path - hc_data['port'] = healthcheck.port - hc_data['checkIntervalSec'] = healthcheck.interval - hc_data['timeoutSec'] = healthcheck.timeout - hc_data['unhealthyThreshold'] = healthcheck.unhealthy_threshold - hc_data['healthyThreshold'] = healthcheck.healthy_threshold - if healthcheck.extra['host']: - hc_data['host'] = healthcheck.extra['host'] - if healthcheck.extra['description']: - hc_data['description'] = healthcheck.extra['description'] - - request = '/global/httpHealthChecks/%s' % (healthcheck.name) - - self.connection.async_request(request, method='PUT', - data=hc_data) - - return self.ex_get_healthcheck(healthcheck.name) - - def ex_update_firewall(self, firewall): - """ - Update a firewall with new values. - - To update, change the attributes of the firewall object and pass the - updated object to the method. - - :param firewall: A firewall object with updated values. - :type firewall: :class:`GCEFirewall` - - :return: An object representing the new state of the firewall. - :rtype: :class:`GCEFirewall` - """ - firewall_data = {} - firewall_data['name'] = firewall.name - firewall_data['allowed'] = firewall.allowed - firewall_data['network'] = firewall.network.extra['selfLink'] - if firewall.source_ranges: - firewall_data['sourceRanges'] = firewall.source_ranges - if firewall.source_tags: - firewall_data['sourceTags'] = firewall.source_tags - if firewall.target_tags: - firewall_data['targetTags'] = firewall.target_tags - if firewall.extra['description']: - firewall_data['description'] = firewall.extra['description'] - - request = '/global/firewalls/%s' % (firewall.name) - - self.connection.async_request(request, method='PUT', - data=firewall_data) - - return self.ex_get_firewall(firewall.name) - - def ex_targetpool_add_node(self, targetpool, node): - """ - Add a node to a target pool. - - :param targetpool: The targetpool to add node to - :type targetpool: ``str`` or :class:`GCETargetPool` - - :param node: The node to add - :type node: ``str`` or :class:`Node` - - :returns: True if successful - :rtype: ``bool`` - """ - if not hasattr(targetpool, 'name'): - targetpool = self.ex_get_targetpool(targetpool) - if not hasattr(node, 'name'): - node = self.ex_get_node(node, 'all') - - targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} - - request = '/regions/%s/targetPools/%s/addInstance' % ( - targetpool.region.name, targetpool.name) - self.connection.async_request(request, method='POST', - data=targetpool_data) - targetpool.nodes.append(node) - return True - - def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): - """ - Add a health check to a target pool. - - :param targetpool: The targetpool to add health check to - :type targetpool: ``str`` or :class:`GCETargetPool` - - :param healthcheck: The healthcheck to add - :type healthcheck: ``str`` or :class:`GCEHealthCheck` - - :returns: True if successful - :rtype: ``bool`` - """ - if not hasattr(targetpool, 'name'): - targetpool = self.ex_get_targetpool(targetpool) - if not hasattr(healthcheck, 'name'): - healthcheck = self.ex_get_healthcheck(healthcheck) - - targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} - - request = '/regions/%s/targetPools/%s/addHealthCheck' % ( - targetpool.region.name, targetpool.name) - self.connection.async_request(request, method='POST', - data=targetpool_data) - targetpool.healthchecks.append(healthcheck) - return True - - def ex_targetpool_remove_node(self, targetpool, node): - """ - Remove a node from a target pool. - - :param targetpool: The targetpool to remove node from - :type targetpool: ``str`` or :class:`GCETargetPool` - - :param node: The node to remove - :type node: ``str`` or :class:`Node` - - :returns: True if successful - :rtype: ``bool`` - """ - if not hasattr(targetpool, 'name'): - targetpool = self.ex_get_targetpool(targetpool) - if not hasattr(node, 'name'): - node = self.ex_get_node(node, 'all') - - targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} - - request = '/regions/%s/targetPools/%s/removeInstance' % ( - targetpool.region.name, targetpool.name) - self.connection.async_request(request, method='POST', - data=targetpool_data) - # Remove node object from node list - index = None - for i, nd in enumerate(targetpool.nodes): - if nd.name == node.name: - index = i - break - if index is not None: - targetpool.nodes.pop(index) - return True - - def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck): - """ - Remove a health check from a target pool. - - :param targetpool: The targetpool to remove health check from - :type targetpool: ``str`` or :class:`GCETargetPool` - - :param healthcheck: The healthcheck to remove - :type healthcheck: ``str`` or :class:`GCEHealthCheck` - - :returns: True if successful - :rtype: ``bool`` - """ - if not hasattr(targetpool, 'name'): - targetpool = self.ex_get_targetpool(targetpool) - if not hasattr(healthcheck, 'name'): - healthcheck = self.ex_get_healthcheck(healthcheck) - - targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} - - request = '/regions/%s/targetPools/%s/removeHealthCheck' % ( - targetpool.region.name, targetpool.name) - self.connection.async_request(request, method='POST', - data=targetpool_data) - # Remove healthcheck object from healthchecks list - index = None - for i, hc in enumerate(targetpool.healthchecks): - if hc.name == healthcheck.name: - index = i - if index is not None: - targetpool.healthchecks.pop(index) - return True - - def reboot_node(self, node): - """ - Reboot a node. - - :param node: Node to be rebooted - :type node: :class:`Node` - - :return: True if successful, False if not - :rtype: ``bool`` - """ - request = '/zones/%s/instances/%s/reset' % (node.extra['zone'].name, - node.name) - self.connection.async_request(request, method='POST', - data='ignored') - return True - - def ex_set_node_tags(self, node, tags): - """ - Set the tags on a Node instance. - - Note that this updates the node object directly. - - :param node: Node object - :type node: :class:`Node` - - :param tags: List of tags to apply to the object - :type tags: ``list`` of ``str`` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/zones/%s/instances/%s/setTags' % (node.extra['zone'].name, - node.name) - - tags_data = {} - tags_data['items'] = tags - tags_data['fingerprint'] = node.extra['tags_fingerprint'] - - self.connection.async_request(request, method='POST', - data=tags_data) - new_node = self.ex_get_node(node.name, node.extra['zone']) - node.extra['tags'] = new_node.extra['tags'] - node.extra['tags_fingerprint'] = new_node.extra['tags_fingerprint'] - return True - - def ex_set_node_scheduling(self, node, on_host_maintenance=None, - automatic_restart=None): - """Set the maintenance behavior for the node. - - See `Scheduling `_ documentation for more info. - - :param node: Node object - :type node: :class:`Node` - - :keyword on_host_maintenance: Defines whether node should be - terminated or migrated when host machine - goes down. Acceptable values are: - 'MIGRATE' or 'TERMINATE' (If not - supplied, value will be reset to GCE - default value for the instance type.) - :type on_host_maintenance: ``str`` - - :keyword automatic_restart: Defines whether the instance should be - automatically restarted when it is - terminated by Compute Engine. (If not - supplied, value will be set to the GCE - default value for the instance type.) - :type automatic_restart: ``bool`` - - :return: True if successful. - :rtype: ``bool`` - """ - if not hasattr(node, 'name'): - node = self.ex_get_node(node, 'all') - if on_host_maintenance is not None: - on_host_maintenance = on_host_maintenance.upper() - ohm_values = ['MIGRATE', 'TERMINATE'] - if on_host_maintenance not in ohm_values: - raise ValueError('on_host_maintenance must be one of %s' % - ','.join(ohm_values)) - - request = '/zones/%s/instances/%s/setScheduling' % ( - node.extra['zone'].name, node.name) - - scheduling_data = {} - if on_host_maintenance is not None: - scheduling_data['onHostMaintenance'] = on_host_maintenance - if automatic_restart is not None: - scheduling_data['automaticRestart'] = automatic_restart - - self.connection.async_request(request, method='POST', - data=scheduling_data) - - new_node = self.ex_get_node(node.name, node.extra['zone']) - node.extra['scheduling'] = new_node.extra['scheduling'] - - ohm = node.extra['scheduling'].get('onHostMaintenance') - ar = node.extra['scheduling'].get('automaticRestart') - - success = True - if on_host_maintenance not in [None, ohm]: - success = False - if automatic_restart not in [None, ar]: - success = False - - return success - - def deploy_node(self, name, size, image, script, location=None, - ex_network='default', ex_tags=None): - """ - Create a new node and run a script on start-up. - - :param name: The name of the node to create. - :type name: ``str`` - - :param size: The machine type to use. - :type size: ``str`` or :class:`GCENodeSize` - - :param image: The image to use to create the node. - :type image: ``str`` or :class:`GCENodeImage` - - :param script: File path to start-up script - :type script: ``str`` - - :keyword location: The location (zone) to create the node in. - :type location: ``str`` or :class:`NodeLocation` or - :class:`GCEZone` or ``None`` - - :keyword ex_network: The network to associate with the node. - :type ex_network: ``str`` or :class:`GCENetwork` - - :keyword ex_tags: A list of tags to associate with the node. - :type ex_tags: ``list`` of ``str`` or ``None`` - - :return: A Node object for the new node. - :rtype: :class:`Node` - """ - with open(script, 'r') as f: - script_data = f.read() - metadata = {'items': [{'key': 'startup-script', - 'value': script_data}]} - - return self.create_node(name, size, image, location=location, - ex_network=ex_network, ex_tags=ex_tags, - ex_metadata=metadata) - - def attach_volume(self, node, volume, device=None, ex_mode=None, - ex_boot=False): - """ - Attach a volume to a node. - - If volume is None, a scratch disk will be created and attached. - - :param node: The node to attach the volume to - :type node: :class:`Node` - - :param volume: The volume to attach. If none, a scratch disk will be - attached. - :type volume: :class:`StorageVolume` or ``None`` - - :keyword device: The device name to attach the volume as. Defaults to - volume name. - :type device: ``str`` - - :keyword ex_mode: Either 'READ_WRITE' or 'READ_ONLY' - :type ex_mode: ``str`` - - :keyword ex_boot: If true, disk will be attached as a boot disk - :type ex_boot: ``bool`` - - :return: True if successful - :rtype: ``bool`` - """ - volume_data = {} - if volume is None: - volume_data['type'] = 'SCRATCH' - else: - volume_data['type'] = 'PERSISTENT' - volume_data['source'] = volume.extra['selfLink'] - volume_data['kind'] = 'compute#attachedDisk' - volume_data['mode'] = ex_mode or 'READ_WRITE' - - if device: - volume_data['deviceName'] = device - else: - volume_data['deviceName'] = volume.name - - volume_data['boot'] = ex_boot - - request = '/zones/%s/instances/%s/attachDisk' % ( - node.extra['zone'].name, node.name) - self.connection.async_request(request, method='POST', - data=volume_data) - return True - - def detach_volume(self, volume, ex_node=None): - """ - Detach a volume from a node. - - :param volume: Volume object to detach - :type volume: :class:`StorageVolume` - - :keyword ex_node: Node object to detach volume from (required) - :type ex_node: :class:`Node` - - :return: True if successful - :rtype: ``bool`` - """ - if not ex_node: - return False - request = '/zones/%s/instances/%s/detachDisk?deviceName=%s' % ( - ex_node.extra['zone'].name, ex_node.name, volume.name) - - self.connection.async_request(request, method='POST', - data='ignored') - return True - - def ex_set_volume_auto_delete(self, volume, node, auto_delete=True): - """ - Sets the auto-delete flag for a volume attached to a node. - - :param volume: Volume object to auto-delete - :type volume: :class:`StorageVolume` - - :param ex_node: Node object to auto-delete volume from - :type ex_node: :class:`Node` - - :keyword auto_delete: Flag to set for the auto-delete value - :type auto_delete: ``bool`` (default True) - - :return: True if successfull - :rtype: ``bool`` - """ - request = '/zones/%s/instances/%s/setDiskAutoDelete' % ( - node.extra['zone'].name, node.name - ) - delete_params = { - 'deviceName': volume, - 'autoDelete': auto_delete, - } - self.connection.async_request(request, method='POST', - params=delete_params) - return True - - def ex_destroy_address(self, address): - """ - Destroy a static address. - - :param address: Address object to destroy - :type address: :class:`GCEAddress` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/regions/%s/addresses/%s' % (address.region.name, - address.name) - - self.connection.async_request(request, method='DELETE') - return True - - def ex_delete_image(self, image): - """ - Delete a specific image resource. - - :param image: Image object to delete - :type image: ``str`` or :class:`GCENodeImage` - - :return: True if successfull - :rtype: ``bool`` - """ - if not hasattr(image, 'name'): - image = self.ex_get_image(image) - - request = '/global/images/%s' % (image.name) - self.connection.async_request(request, method='DELETE') - return True - - def ex_deprecate_image(self, image, replacement, state=None): - """ - Deprecate a specific image resource. - - :param image: Image object to deprecate - :type image: ``str`` or :class: `GCENodeImage` - - :param replacement: Image object to use as a replacement - :type replacement: ``str`` or :class: `GCENodeImage` - - :param state: State of the image - :type state: ``str`` - - :return: True if successfull - :rtype: ``bool`` - """ - if not hasattr(image, 'name'): - image = self.ex_get_image(image) - - if not hasattr(replacement, 'name'): - replacement = self.ex_get_image(replacement) - - if state is None: - state = 'DEPRECATED' - - possible_states = ['DELETED', 'DEPRECATED', 'OBSOLETE'] - - if state not in possible_states: - raise ValueError('state must be one of %s' - % ','.join(possible_states)) - - image_data = { - 'state': state, - 'replacement': replacement.extra['selfLink'], - } - - request = '/global/images/%s/deprecate' % (image.name) - - self.connection.request( - request, method='POST', data=image_data).object - - return True - - def ex_destroy_healthcheck(self, healthcheck): - """ - Destroy a healthcheck. - - :param healthcheck: Health check object to destroy - :type healthcheck: :class:`GCEHealthCheck` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/global/httpHealthChecks/%s' % (healthcheck.name) - self.connection.async_request(request, method='DELETE') - return True - - def ex_destroy_firewall(self, firewall): - """ - Destroy a firewall. - - :param firewall: Firewall object to destroy - :type firewall: :class:`GCEFirewall` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/global/firewalls/%s' % (firewall.name) - self.connection.async_request(request, method='DELETE') - return True - - def ex_destroy_forwarding_rule(self, forwarding_rule): - """ - Destroy a forwarding rule. - - :param forwarding_rule: Forwarding Rule object to destroy - :type forwarding_rule: :class:`GCEForwardingRule` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/regions/%s/forwardingRules/%s' % ( - forwarding_rule.region.name, forwarding_rule.name) - self.connection.async_request(request, method='DELETE') - return True - - def ex_destroy_network(self, network): - """ - Destroy a network. - - :param network: Network object to destroy - :type network: :class:`GCENetwork` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/global/networks/%s' % (network.name) - self.connection.async_request(request, method='DELETE') - return True - - def destroy_node(self, node, destroy_boot_disk=False): - """ - Destroy a node. - - :param node: Node object to destroy - :type node: :class:`Node` - - :keyword destroy_boot_disk: If true, also destroy the node's - boot disk. (Note that this keyword is not - accessible from the node's .destroy() - method.) - :type destroy_boot_disk: ``bool`` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/zones/%s/instances/%s' % (node.extra['zone'].name, - node.name) - self.connection.async_request(request, method='DELETE') - if destroy_boot_disk and node.extra['boot_disk']: - node.extra['boot_disk'].destroy() - return True - - def ex_destroy_multiple_nodes(self, node_list, ignore_errors=True, - destroy_boot_disk=False, poll_interval=2, - timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): - """ - Destroy multiple nodes at once. - - :param node_list: List of nodes to destroy - :type node_list: ``list`` of :class:`Node` - - :keyword ignore_errors: If true, don't raise an exception if one or - more nodes fails to be destroyed. - :type ignore_errors: ``bool`` - - :keyword destroy_boot_disk: If true, also destroy the nodes' boot - disks. - :type destroy_boot_disk: ``bool`` - - :keyword poll_interval: Number of seconds between status checks. - :type poll_interval: ``int`` - - :keyword timeout: Number of seconds to wait for all nodes to be - destroyed. - :type timeout: ``int`` - - :return: A list of boolean values. One for each node. True means - that the node was successfully destroyed. - :rtype: ``list`` of ``bool`` - """ - status_list = [] - complete = False - start_time = time.time() - for node in node_list: - request = '/zones/%s/instances/%s' % (node.extra['zone'].name, - node.name) - try: - response = self.connection.request(request, - method='DELETE').object - except GoogleBaseError: - self._catch_error(ignore_errors=ignore_errors) - response = None - - status = {'node': node, - 'node_success': False, - 'node_response': response, - 'disk_success': not destroy_boot_disk, - 'disk_response': None} - - status_list.append(status) - - while not complete: - if (time.time() - start_time >= timeout): - raise Exception("Timeout (%s sec) while waiting to delete " - "multiple instances") - complete = True - for status in status_list: - # If one of the operations is running, check the status - operation = status['node_response'] or status['disk_response'] - delete_disk = False - if operation: - no_errors = True - try: - response = self.connection.request( - operation['selfLink']).object - except GoogleBaseError: - self._catch_error(ignore_errors=ignore_errors) - no_errors = False - response = {'status': 'DONE'} - if response['status'] == 'DONE': - # If a node was deleted, update status and indicate - # that the disk is ready to be deleted. - if status['node_response']: - status['node_response'] = None - status['node_success'] = no_errors - delete_disk = True - else: - status['disk_response'] = None - status['disk_success'] = no_errors - # If we are destroying disks, and the node has been deleted, - # destroy the disk. - if delete_disk and destroy_boot_disk: - boot_disk = status['node'].extra['boot_disk'] - if boot_disk: - request = '/zones/%s/disks/%s' % ( - boot_disk.extra['zone'].name, boot_disk.name) - try: - response = self.connection.request( - request, method='DELETE').object - except GoogleBaseError: - self._catch_error(ignore_errors=ignore_errors) - no_errors = False - response = None - status['disk_response'] = response - else: # If there is no boot disk, ignore - status['disk_success'] = True - operation = status['node_response'] or status['disk_response'] - if operation: - time.sleep(poll_interval) - complete = False - - success = [] - for status in status_list: - s = status['node_success'] and status['disk_success'] - success.append(s) - return success - - def ex_destroy_targetpool(self, targetpool): - """ - Destroy a target pool. - - :param targetpool: TargetPool object to destroy - :type targetpool: :class:`GCETargetPool` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/regions/%s/targetPools/%s' % (targetpool.region.name, - targetpool.name) - - self.connection.async_request(request, method='DELETE') - return True - - def destroy_volume(self, volume): - """ - Destroy a volume. - - :param volume: Volume object to destroy - :type volume: :class:`StorageVolume` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/zones/%s/disks/%s' % (volume.extra['zone'].name, - volume.name) - self.connection.async_request(request, method='DELETE') - return True - - def destroy_volume_snapshot(self, snapshot): - """ - Destroy a snapshot. - - :param snapshot: Snapshot object to destroy - :type snapshot: :class:`GCESnapshot` - - :return: True if successful - :rtype: ``bool`` - """ - request = '/global/snapshots/%s' % (snapshot.name) - self.connection.async_request(request, method='DELETE') - return True - - def ex_get_address(self, name, region=None): - """ - Return an Address object based on an address name and optional region. - - :param name: The name of the address - :type name: ``str`` - - :keyword region: The region to search for the address in (set to - 'all' to search all regions) - :type region: ``str`` :class:`GCERegion` or ``None`` - - :return: An Address object for the address - :rtype: :class:`GCEAddress` - """ - region = self._set_region(region) or self._find_zone_or_region( - name, 'addresses', region=True, res_name='Address') - request = '/regions/%s/addresses/%s' % (region.name, name) - response = self.connection.request(request, method='GET').object - return self._to_address(response) - - def ex_get_healthcheck(self, name): - """ - Return a HealthCheck object based on the healthcheck name. - - :param name: The name of the healthcheck - :type name: ``str`` - - :return: A GCEHealthCheck object - :rtype: :class:`GCEHealthCheck` - """ - request = '/global/httpHealthChecks/%s' % (name) - response = self.connection.request(request, method='GET').object - return self._to_healthcheck(response) - - def ex_get_firewall(self, name): - """ - Return a Firewall object based on the firewall name. - - :param name: The name of the firewall - :type name: ``str`` - - :return: A GCEFirewall object - :rtype: :class:`GCEFirewall` - """ - request = '/global/firewalls/%s' % (name) - response = self.connection.request(request, method='GET').object - return self._to_firewall(response) - - def ex_get_forwarding_rule(self, name, region=None): - """ - Return a Forwarding Rule object based on the forwarding rule name. - - :param name: The name of the forwarding rule - :type name: ``str`` - - :keyword region: The region to search for the rule in (set to 'all' - to search all regions). - :type region: ``str`` or ``None`` - - :return: A GCEForwardingRule object - :rtype: :class:`GCEForwardingRule` - """ - region = self._set_region(region) or self._find_zone_or_region( - name, 'forwardingRules', region=True, res_name='ForwardingRule') - request = '/regions/%s/forwardingRules/%s' % (region.name, name) - response = self.connection.request(request, method='GET').object - return self._to_forwarding_rule(response) - - def ex_get_image(self, partial_name): - """ - Return an GCENodeImage object based on the name or link provided. - - :param partial_name: The name, partial name, or full path of a GCE - image. - :type partial_name: ``str`` - - :return: GCENodeImage object based on provided information or None if - an image with that name is not found. - :rtype: :class:`GCENodeImage` or ``None`` - """ - if partial_name.startswith('https://'): - response = self.connection.request(partial_name, method='GET') - return self._to_node_image(response.object) - image = self._match_images(None, partial_name) - if not image: - if (partial_name.startswith('debian') or - partial_name.startswith('backports')): - image = self._match_images('debian-cloud', partial_name) - elif partial_name.startswith('centos'): - image = self._match_images('centos-cloud', partial_name) - elif partial_name.startswith('sles'): - image = self._match_images('suse-cloud', partial_name) - elif partial_name.startswith('rhel'): - image = self._match_images('rhel-cloud', partial_name) - elif partial_name.startswith('windows'): - image = self._match_images('windows-cloud', partial_name) - elif partial_name.startswith('container-vm'): - image = self._match_images('google-containers', partial_name) - - return image - - def ex_get_network(self, name): - """ - Return a Network object based on a network name. - - :param name: The name of the network - :type name: ``str`` - - :return: A Network object for the network - :rtype: :class:`GCENetwork` - """ - request = '/global/networks/%s' % (name) - response = self.connection.request(request, method='GET').object - return self._to_network(response) - - def ex_get_node(self, name, zone=None): - """ - Return a Node object based on a node name and optional zone. - - :param name: The name of the node - :type name: ``str`` - - :keyword zone: The zone to search for the node in. If set to 'all', - search all zones for the instance. - :type zone: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :return: A Node object for the node - :rtype: :class:`Node` - """ - zone = self._set_zone(zone) or self._find_zone_or_region( - name, 'instances', res_name='Node') - request = '/zones/%s/instances/%s' % (zone.name, name) - response = self.connection.request(request, method='GET').object - return self._to_node(response) - - def ex_get_project(self): - """ - Return a Project object with project-wide information. - - :return: A GCEProject object - :rtype: :class:`GCEProject` - """ - response = self.connection.request('', method='GET').object - return self._to_project(response) - - def ex_get_size(self, name, zone=None): - """ - Return a size object based on a machine type name and zone. - - :param name: The name of the node - :type name: ``str`` - - :keyword zone: The zone to search for the machine type in - :type zone: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :return: A GCENodeSize object for the machine type - :rtype: :class:`GCENodeSize` - """ - zone = zone or self.zone - if not hasattr(zone, 'name'): - zone = self.ex_get_zone(zone) - request = '/zones/%s/machineTypes/%s' % (zone.name, name) - response = self.connection.request(request, method='GET').object - return self._to_node_size(response) - - def ex_get_snapshot(self, name): - """ - Return a Snapshot object based on snapshot name. - - :param name: The name of the snapshot - :type name: ``str`` - - :return: A GCESnapshot object for the snapshot - :rtype: :class:`GCESnapshot` - """ - request = '/global/snapshots/%s' % (name) - response = self.connection.request(request, method='GET').object - return self._to_snapshot(response) - - def ex_get_volume(self, name, zone=None): - """ - Return a Volume object based on a volume name and optional zone. - - :param name: The name of the volume - :type name: ``str`` - - :keyword zone: The zone to search for the volume in (set to 'all' to - search all zones) - :type zone: ``str`` or :class:`GCEZone` or :class:`NodeLocation` - or ``None`` - - :return: A StorageVolume object for the volume - :rtype: :class:`StorageVolume` - """ - zone = self._set_zone(zone) or self._find_zone_or_region( - name, 'disks', res_name='Volume') - request = '/zones/%s/disks/%s' % (zone.name, name) - response = self.connection.request(request, method='GET').object - return self._to_storage_volume(response) - - def ex_get_region(self, name): - """ - Return a Region object based on the region name. - - :param name: The name of the region. - :type name: ``str`` - - :return: A GCERegion object for the region - :rtype: :class:`GCERegion` - """ - if name.startswith('https://'): - short_name = self._get_components_from_path(name)['name'] - request = name - else: - short_name = name - request = '/regions/%s' % (name) - # Check region cache first - if short_name in self.region_dict: - return self.region_dict[short_name] - # Otherwise, look up region information - response = self.connection.request(request, method='GET').object - return self._to_region(response) - - def ex_get_targetpool(self, name, region=None): - """ - Return a TargetPool object based on a name and optional region. - - :param name: The name of the target pool - :type name: ``str`` - - :keyword region: The region to search for the target pool in (set to - 'all' to search all regions). - :type region: ``str`` or :class:`GCERegion` or ``None`` - - :return: A TargetPool object for the pool - :rtype: :class:`GCETargetPool` - """ - region = self._set_region(region) or self._find_zone_or_region( - name, 'targetPools', region=True, res_name='TargetPool') - request = '/regions/%s/targetPools/%s' % (region.name, name) - response = self.connection.request(request, method='GET').object - return self._to_targetpool(response) - - def ex_get_zone(self, name): - """ - Return a Zone object based on the zone name. - - :param name: The name of the zone. - :type name: ``str`` - - :return: A GCEZone object for the zone or None if not found - :rtype: :class:`GCEZone` or ``None`` - """ - if name.startswith('https://'): - short_name = self._get_components_from_path(name)['name'] - request = name - else: - short_name = name - request = '/zones/%s' % (name) - # Check zone cache first - if short_name in self.zone_dict: - return self.zone_dict[short_name] - # Otherwise, look up zone information - try: - response = self.connection.request(request, method='GET').object - except ResourceNotFoundError: - return None - return self._to_zone(response) - - def ex_copy_image(self, name, url, description=None): - """ - Copy an image to your image collection. - - :param name: The name of the image - :type name: ``str`` - - :param url: The URL to the image. The URL can start with `gs://` - :param url: ``str`` - - :param description: The description of the image - :type description: ``str`` - - :return: NodeImage object based on provided information or None if an - image with that name is not found. - :rtype: :class:`NodeImage` or ``None`` - """ - - # the URL for an image can start with gs:// - if url.startswith('gs://'): - url = url.replace('gs://', 'https://storage.googleapis.com/', 1) - - image_data = { - 'name': name, - 'description': description, - 'sourceType': 'RAW', - 'rawDisk': { - 'source': url, - }, - } - - request = '/global/images' - self.connection.async_request(request, method='POST', - data=image_data) - return self.ex_get_image(name) - - def _ex_connection_class_kwargs(self): - return {'auth_type': self.auth_type, - 'project': self.project, - 'scopes': self.scopes} - - def _catch_error(self, ignore_errors=False): - """ - Catch an exception and raise it unless asked to ignore it. - - :keyword ignore_errors: If true, just return the error. Otherwise, - raise the error. - :type ignore_errors: ``bool`` - - :return: The exception that was raised. - :rtype: :class:`Exception` - """ - e = sys.exc_info()[1] - if ignore_errors: - return e - else: - raise e - - def _get_components_from_path(self, path): - """ - Return a dictionary containing name & zone/region from a request path. - - :param path: HTTP request path (e.g. - '/project/pjt-name/zones/us-central1-a/instances/mynode') - :type path: ``str`` - - :return: Dictionary containing name and zone/region of resource - :rtype ``dict`` - """ - region = None - zone = None - glob = False - components = path.split('/') - name = components[-1] - if components[-4] == 'regions': - region = components[-3] - elif components[-4] == 'zones': - zone = components[-3] - elif components[-3] == 'global': - glob = True - - return {'name': name, 'region': region, 'zone': zone, 'global': glob} - - def _get_region_from_zone(self, zone): - """ - Return the Region object that contains the given Zone object. - - :param zone: Zone object - :type zone: :class:`GCEZone` - - :return: Region object that contains the zone - :rtype: :class:`GCERegion` - """ - for region in self.region_list: - zones = [z.name for z in region.zones] - if zone.name in zones: - return region - - def _find_zone_or_region(self, name, res_type, region=False, - res_name=None): - """ - Find the zone or region for a named resource. - - :param name: Name of resource to find - :type name: ``str`` - - :param res_type: Type of resource to find. - Examples include: 'disks', 'instances' or 'addresses' - :type res_type: ``str`` - - :keyword region: If True, search regions instead of zones - :type region: ``bool`` - - :keyword res_name: The name of the resource type for error messages. - Examples: 'Volume', 'Node', 'Address' - :keyword res_name: ``str`` - - :return: Zone/Region object for the zone/region for the resource. - :rtype: :class:`GCEZone` or :class:`GCERegion` - """ - if region: - rz = 'region' - else: - rz = 'zone' - rz_name = None - res_name = res_name or res_type - request = '/aggregated/%s' % (res_type) - res_list = self.connection.request(request).object - for k, v in res_list['items'].items(): - for res in v.get(res_type, []): - if res['name'] == name: - rz_name = k.replace('%ss/' % (rz), '') - break - if not rz_name: - raise ResourceNotFoundError( - '%s \'%s\' not found in any %s.' % (res_name, name, rz), - None, None) - else: - getrz = getattr(self, 'ex_get_%s' % (rz)) - return getrz(rz_name) - - def _match_images(self, project, partial_name): - """ - Find the latest image, given a partial name. - - For example, providing 'debian-7' will return the image object for the - most recent image with a name that starts with 'debian-7' in the - supplied project. If no project is given, it will search your own - project. - - :param project: The name of the project to search for images. - Examples include: 'debian-cloud' and 'centos-cloud'. - :type project: ``str`` or ``None`` - - :param partial_name: The full name or beginning of a name for an - image. - :type partial_name: ``str`` - - :return: The latest image object that matches the partial name or None - if no matching image is found. - :rtype: :class:`GCENodeImage` or ``None`` - """ - project_images = self.list_images(project) - partial_match = [] - for image in project_images: - if image.name == partial_name: - return image - if image.name.startswith(partial_name): - ts = timestamp_to_datetime(image.extra['creationTimestamp']) - if not partial_match or partial_match[0] < ts: - partial_match = [ts, image] - - if partial_match: - return partial_match[1] - - def _set_region(self, region): - """ - Return the region to use for listing resources. - - :param region: A name, region object, None, or 'all' - :type region: ``str`` or :class:`GCERegion` or ``None`` - - :return: A region object or None if all regions should be considered - :rtype: :class:`GCERegion` or ``None`` - """ - region = region or self.region - - if region == 'all' or region is None: - return None - - if not hasattr(region, 'name'): - region = self.ex_get_region(region) - return region - - def _set_zone(self, zone): - """ - Return the zone to use for listing resources. - - :param zone: A name, zone object, None, or 'all' - :type region: ``str`` or :class:`GCEZone` or ``None`` - - :return: A zone object or None if all zones should be considered - :rtype: :class:`GCEZone` or ``None`` - """ - zone = zone or self.zone - - if zone == 'all' or zone is None: - return None - - if not hasattr(zone, 'name'): - zone = self.ex_get_zone(zone) - return zone - - def _create_node_req(self, name, size, image, location, network, - tags=None, metadata=None, boot_disk=None, - external_ip='ephemeral'): - """ - Returns a request and body to create a new node. This is a helper - method to support both :class:`create_node` and - :class:`ex_create_multiple_nodes`. - - :param name: The name of the node to create. - :type name: ``str`` - - :param size: The machine type to use. - :type size: :class:`GCENodeSize` - - :param image: The image to use to create the node (or, if using a - persistent disk, the image the disk was created from). - :type image: :class:`GCENodeImage` - - :param location: The location (zone) to create the node in. - :type location: :class:`NodeLocation` or :class:`GCEZone` - - :param network: The network to associate with the node. - :type network: :class:`GCENetwork` - - :keyword tags: A list of tags to associate with the node. - :type tags: ``list`` of ``str`` - - :keyword metadata: Metadata dictionary for instance. - :type metadata: ``dict`` - - :keyword boot_disk: Persistent boot disk to attach. - :type :class:`StorageVolume` - - :keyword external_ip: The external IP address to use. If 'ephemeral' - (default), a new non-static address will be - used. If 'None', then no external address will - be used. To use an existing static IP address, - a GCEAddress object should be passed in. - :type external_ip: :class:`GCEAddress` or ``str`` or None - - :return: A tuple containing a request string and a node_data dict. - :rtype: ``tuple`` of ``str`` and ``dict`` - """ - node_data = {} - node_data['machineType'] = size.extra['selfLink'] - node_data['name'] = name - if tags: - node_data['tags'] = {'items': tags} - if metadata: - node_data['metadata'] = metadata - - if boot_disk: - disks = [{'kind': 'compute#attachedDisk', - 'boot': True, - 'type': 'PERSISTENT', - 'mode': 'READ_WRITE', - 'deviceName': boot_disk.name, - 'zone': boot_disk.extra['zone'].extra['selfLink'], - 'source': boot_disk.extra['selfLink']}] - node_data['disks'] = disks - else: - node_data['image'] = image.extra['selfLink'] - - ni = [{'kind': 'compute#instanceNetworkInterface', - 'network': network.extra['selfLink']}] - if external_ip: - access_configs = [{'name': 'External NAT', - 'type': 'ONE_TO_ONE_NAT'}] - if hasattr(external_ip, 'address'): - access_configs[0]['natIP'] = external_ip.address - ni[0]['accessConfigs'] = access_configs - node_data['networkInterfaces'] = ni - - request = '/zones/%s/instances' % (location.name) - - return request, node_data - - def _multi_create_disk(self, status, node_attrs): - """Create disk for ex_create_multiple_nodes. - - :param status: Dictionary for holding node/disk creation status. - (This dictionary is modified by this method) - :type status: ``dict`` - - :param node_attrs: Dictionary for holding node attribute information. - (size, image, location, etc.) - :type node_attrs: ``dict`` - """ - disk = None - # Check for existing disk - if node_attrs['use_existing_disk']: - try: - disk = self.ex_get_volume(status['name'], - node_attrs['location']) - except ResourceNotFoundError: - pass - - if disk: - status['disk'] = disk - else: - # Create disk and return response object back in the status dict. - # Or, if there is an error, mark as failed. - disk_req, disk_data, disk_params = self._create_vol_req( - None, status['name'], location=node_attrs['location'], - image=node_attrs['image']) - try: - disk_res = self.connection.request( - disk_req, method='POST', data=disk_data, - params=disk_params).object - except GoogleBaseError: - e = self._catch_error( - ignore_errors=node_attrs['ignore_errors']) - error = e.value - code = e.code - disk_res = None - status['disk'] = GCEFailedDisk(status['name'], - error, code) - status['disk_response'] = disk_res - - def _multi_check_disk(self, status, node_attrs): - """Check disk status for ex_create_multiple_nodes. - - :param status: Dictionary for holding node/disk creation status. - (This dictionary is modified by this method) - :type status: ``dict`` - - :param node_attrs: Dictionary for holding node attribute information. - (size, image, location, etc.) - :type node_attrs: ``dict`` - """ - error = None - try: - response = self.connection.request( - status['disk_response']['selfLink']).object - except GoogleBaseError: - e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) - error = e.value - code = e.code - response = {'status': 'DONE'} - if response['status'] == 'DONE': - status['disk_response'] = None - if error: - status['disk'] = GCEFailedDisk(status['name'], error, code) - else: - status['disk'] = self.ex_get_volume(status['name'], - node_attrs['location']) - - def _multi_create_node(self, status, node_attrs): - """Create node for ex_create_multiple_nodes. - - :param status: Dictionary for holding node/disk creation status. - (This dictionary is modified by this method) - :type status: ``dict`` - - :param node_attrs: Dictionary for holding node attribute information. - (size, image, location, etc.) - :type node_attrs: ``dict`` - """ - # If disk has an error, set the node as failed and return - if hasattr(status['disk'], 'error'): - status['node'] = status['disk'] - return - - # Create node and return response object in status dictionary. - # Or, if there is an error, mark as failed. - request, node_data = self._create_node_req( - status['name'], node_attrs['size'], node_attrs['image'], - node_attrs['location'], node_attrs['network'], node_attrs['tags'], - node_attrs['metadata'], boot_disk=status['disk'], - external_ip=node_attrs['external_ip']) - try: - node_res = self.connection.request( - request, method='POST', data=node_data).object - except GoogleBaseError: - e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) - error = e.value - code = e.code - node_res = None - status['node'] = GCEFailedNode(status['name'], - error, code) - status['node_response'] = node_res - - def _multi_check_node(self, status, node_attrs): - """Check node status for ex_create_multiple_nodes. - - :param status: Dictionary for holding node/disk creation status. - (This dictionary is modified by this method) - :type status: ``dict`` - - :param node_attrs: Dictionary for holding node attribute information. - (size, image, location, etc.) - :type node_attrs: ``dict`` - """ - error = None - try: - response = self.connection.request( - status['node_response']['selfLink']).object - except GoogleBaseError: - e = self._catch_error(ignore_errors=node_attrs['ignore_errors']) - error = e.value - code = e.code - response = {'status': 'DONE'} - if response['status'] == 'DONE': - status['node_response'] = None - if error: - status['node'] = GCEFailedNode(status['name'], - error, code) - else: - status['node'] = self.ex_get_node(status['name'], - node_attrs['location']) - - def _create_vol_req(self, size, name, location=None, snapshot=None, - image=None): - """ - Assemble the request/data for creating a volume. - - Used by create_volume and ex_create_multiple_nodes - - :param size: Size of volume to create (in GB). Can be None if image - or snapshot is supplied. - :type size: ``int`` or ``str`` or ``None`` - - :param name: Name of volume to create - :type name: ``str`` - - :keyword location: Location (zone) to create the volume in - :type location: ``str`` or :class:`GCEZone` or - :class:`NodeLocation` or ``None`` - - :keyword snapshot: Snapshot to create image from - :type snapshot: :class:`GCESnapshot` or ``str`` or ``None`` - - :keyword image: Image to create disk from. - :type image: :class:`GCENodeImage` or ``str`` or ``None`` - - :return: Tuple containing the request string, the data dictionary and - the URL parameters - :rtype: ``tuple`` - """ - volume_data = {} - params = None - volume_data['name'] = name - if size: - volume_data['sizeGb'] = str(size) - if image: - if not hasattr(image, 'name'): - image = self.ex_get_image(image) - params = {'sourceImage': image.extra['selfLink']} - volume_data['description'] = 'Image: %s' % ( - image.extra['selfLink']) - if snapshot: - if not hasattr(snapshot, 'name'): - # Check for full URI to not break backward-compatibility - if snapshot.startswith('https'): - snapshot = self._get_components_from_path(snapshot)['name'] - snapshot = self.ex_get_snapshot(snapshot) - snapshot_link = snapshot.extra['selfLink'] - volume_data['sourceSnapshot'] = snapshot_link - volume_data['description'] = 'Snapshot: %s' % (snapshot_link) - location = location or self.zone - if not hasattr(location, 'name'): - location = self.ex_get_zone(location) - request = '/zones/%s/disks' % (location.name) - - return request, volume_data, params - - def _to_address(self, address): - """ - Return an Address object from the json-response dictionary. - - :param address: The dictionary describing the address. - :type address: ``dict`` - - :return: Address object - :rtype: :class:`GCEAddress` - """ - extra = {} - - region = self.ex_get_region(address['region']) - - extra['selfLink'] = address.get('selfLink') - extra['status'] = address.get('status') - extra['creationTimestamp'] = address.get('creationTimestamp') - - return GCEAddress(id=address['id'], name=address['name'], - address=address['address'], - region=region, driver=self, extra=extra) - - def _to_healthcheck(self, healthcheck): - """ - Return a HealthCheck object from the json-response dictionary. - - :param healthcheck: The dictionary describing the healthcheck. - :type healthcheck: ``dict`` - - :return: HealthCheck object - :rtype: :class:`GCEHealthCheck` - """ - extra = {} - extra['selfLink'] = healthcheck.get('selfLink') - extra['creationTimestamp'] = healthcheck.get('creationTimestamp') - extra['description'] = healthcheck.get('description') - extra['host'] = healthcheck.get('host') - - return GCEHealthCheck( - id=healthcheck['id'], name=healthcheck['name'], - path=healthcheck.get('requestPath'), port=healthcheck.get('port'), - interval=healthcheck.get('checkIntervalSec'), - timeout=healthcheck.get('timeoutSec'), - unhealthy_threshold=healthcheck.get('unhealthyThreshold'), - healthy_threshold=healthcheck.get('healthyThreshold'), - driver=self, extra=extra) - - def _to_firewall(self, firewall): - """ - Return a Firewall object from the json-response dictionary. - - :param firewall: The dictionary describing the firewall. - :type firewall: ``dict`` - - :return: Firewall object - :rtype: :class:`GCEFirewall` - """ - extra = {} - extra['selfLink'] = firewall.get('selfLink') - extra['creationTimestamp'] = firewall.get('creationTimestamp') - extra['description'] = firewall.get('description') - extra['network_name'] = self._get_components_from_path( - firewall['network'])['name'] - - network = self.ex_get_network(extra['network_name']) - source_ranges = firewall.get('sourceRanges') - source_tags = firewall.get('sourceTags') - target_tags = firewall.get('targetTags') - - return GCEFirewall(id=firewall['id'], name=firewall['name'], - allowed=firewall.get('allowed'), network=network, - source_ranges=source_ranges, - source_tags=source_tags, - target_tags=target_tags, - driver=self, extra=extra) - - def _to_forwarding_rule(self, forwarding_rule): - """ - Return a Forwarding Rule object from the json-response dictionary. - - :param forwarding_rule: The dictionary describing the rule. - :type forwarding_rule: ``dict`` - - :return: ForwardingRule object - :rtype: :class:`GCEForwardingRule` - """ - extra = {} - extra['selfLink'] = forwarding_rule.get('selfLink') - extra['portRange'] = forwarding_rule.get('portRange') - extra['creationTimestamp'] = forwarding_rule.get('creationTimestamp') - extra['description'] = forwarding_rule.get('description') - - region = self.ex_get_region(forwarding_rule['region']) - targetpool = self.ex_get_targetpool( - self._get_components_from_path(forwarding_rule['target'])['name']) - - return GCEForwardingRule(id=forwarding_rule['id'], - name=forwarding_rule['name'], region=region, - address=forwarding_rule.get('IPAddress'), - protocol=forwarding_rule.get('IPProtocol'), - targetpool=targetpool, - driver=self, extra=extra) - - def _to_network(self, network): - """ - Return a Network object from the json-response dictionary. - - :param network: The dictionary describing the network. - :type network: ``dict`` - - :return: Network object - :rtype: :class:`GCENetwork` - """ - extra = {} - - extra['selfLink'] = network.get('selfLink') - extra['gatewayIPv4'] = network.get('gatewayIPv4') - extra['description'] = network.get('description') - extra['creationTimestamp'] = network.get('creationTimestamp') - - return GCENetwork(id=network['id'], name=network['name'], - cidr=network.get('IPv4Range'), - driver=self, extra=extra) - - def _to_node_image(self, image): - """ - Return an Image object from the json-response dictionary. - - :param image: The dictionary describing the image. - :type image: ``dict`` - - :return: Image object - :rtype: :class:`GCENodeImage` - """ - extra = {} - extra['preferredKernel'] = image.get('preferredKernel', None) - extra['description'] = image.get('description', None) - extra['creationTimestamp'] = image.get('creationTimestamp') - extra['selfLink'] = image.get('selfLink') - extra['deprecated'] = image.get('deprecated', None) - - return GCENodeImage(id=image['id'], name=image['name'], driver=self, - extra=extra) - - def _to_node_location(self, location): - """ - Return a Location object from the json-response dictionary. - - :param location: The dictionary describing the location. - :type location: ``dict`` - - :return: Location object - :rtype: :class:`NodeLocation` - """ - return NodeLocation(id=location['id'], name=location['name'], - country=location['name'].split('-')[0], - driver=self) - - def _to_node(self, node): - """ - Return a Node object from the json-response dictionary. - - :param node: The dictionary describing the node. - :type node: ``dict`` - - :return: Node object - :rtype: :class:`Node` - """ - public_ips = [] - private_ips = [] - extra = {} - - extra['status'] = node.get('status') - extra['description'] = node.get('description') - extra['zone'] = self.ex_get_zone(node['zone']) - extra['image'] = node.get('image') - extra['machineType'] = node.get('machineType') - extra['disks'] = node.get('disks', []) - extra['networkInterfaces'] = node.get('networkInterfaces') - extra['id'] = node['id'] - extra['selfLink'] = node.get('selfLink') - extra['name'] = node['name'] - extra['metadata'] = node.get('metadata', {}) - extra['tags_fingerprint'] = node['tags']['fingerprint'] - extra['scheduling'] = node.get('scheduling', {}) - extra['deprecated'] = True if node.get('deprecated', None) else False - - for disk in extra['disks']: - if disk.get('boot') and disk.get('type') == 'PERSISTENT': - bd = self._get_components_from_path(disk['source']) - extra['boot_disk'] = self.ex_get_volume(bd['name'], bd['zone']) - - if 'items' in node['tags']: - tags = node['tags']['items'] - else: - tags = [] - extra['tags'] = tags - - for network_interface in node.get('networkInterfaces', []): - private_ips.append(network_interface.get('networkIP')) - for access_config in network_interface.get('accessConfigs', []): - public_ips.append(access_config.get('natIP')) - - # For the node attributes, use just machine and image names, not full - # paths. Full paths are available in the "extra" dict. - if extra['image']: - image = self._get_components_from_path(extra['image'])['name'] - else: - image = None - size = self._get_components_from_path(node['machineType'])['name'] - - return Node(id=node['id'], name=node['name'], - state=self.NODE_STATE_MAP[node['status']], - public_ips=public_ips, private_ips=private_ips, - driver=self, size=size, image=image, extra=extra) - - def _to_node_size(self, machine_type): - """ - Return a Size object from the json-response dictionary. - - :param machine_type: The dictionary describing the machine. - :type machine_type: ``dict`` - - :return: Size object - :rtype: :class:`GCENodeSize` - """ - extra = {} - extra['selfLink'] = machine_type.get('selfLink') - extra['zone'] = self.ex_get_zone(machine_type['zone']) - extra['description'] = machine_type.get('description') - extra['guestCpus'] = machine_type.get('guestCpus') - extra['creationTimestamp'] = machine_type.get('creationTimestamp') - try: - price = self._get_size_price(size_id=machine_type['name']) - except KeyError: - price = None - - return GCENodeSize(id=machine_type['id'], name=machine_type['name'], - ram=machine_type.get('memoryMb'), - disk=machine_type.get('imageSpaceGb'), - bandwidth=0, price=price, driver=self, extra=extra) - - def _to_project(self, project): - """ - Return a Project object from the json-response dictionary. - - :param project: The dictionary describing the project. - :type project: ``dict`` - - :return: Project object - :rtype: :class:`GCEProject` - """ - extra = {} - extra['selfLink'] = project.get('selfLink') - extra['creationTimestamp'] = project.get('creationTimestamp') - extra['description'] = project.get('description') - metadata = project['commonInstanceMetadata'].get('items') - - return GCEProject(id=project['id'], name=project['name'], - metadata=metadata, quotas=project.get('quotas'), - driver=self, extra=extra) - - def _to_region(self, region): - """ - Return a Region object from the json-response dictionary. - - :param region: The dictionary describing the region. - :type region: ``dict`` - - :return: Region object - :rtype: :class:`GCERegion` - """ - extra = {} - extra['selfLink'] = region.get('selfLink') - extra['creationTimestamp'] = region.get('creationTimestamp') - extra['description'] = region.get('description') - - quotas = region.get('quotas') - zones = [self.ex_get_zone(z) for z in region.get('zones', [])] - # Work around a bug that will occasionally list missing zones in the - # region output - zones = [z for z in zones if z is not None] - deprecated = region.get('deprecated') - - return GCERegion(id=region['id'], name=region['name'], - status=region.get('status'), zones=zones, - quotas=quotas, deprecated=deprecated, - driver=self, extra=extra) - - def _to_snapshot(self, snapshot): - """ - Return a Snapshot object from the json-response dictionary. - - :param snapshot: The dictionary describing the snapshot - :type snapshot: ``dict`` - - :return: Snapshot object - :rtype: :class:`VolumeSnapshot` - """ - extra = {} - extra['selfLink'] = snapshot.get('selfLink') - extra['creationTimestamp'] = snapshot.get('creationTimestamp') - extra['sourceDisk'] = snapshot.get('sourceDisk') - - return GCESnapshot(id=snapshot['id'], name=snapshot['name'], - size=snapshot['diskSizeGb'], - status=snapshot.get('status'), driver=self, - extra=extra) - - def _to_storage_volume(self, volume): - """ - Return a Volume object from the json-response dictionary. - - :param volume: The dictionary describing the volume. - :type volume: ``dict`` - - :return: Volume object - :rtype: :class:`StorageVolume` - """ - extra = {} - extra['selfLink'] = volume.get('selfLink') - extra['zone'] = self.ex_get_zone(volume['zone']) - extra['status'] = volume.get('status') - extra['creationTimestamp'] = volume.get('creationTimestamp') - extra['description'] = volume.get('description') - - return StorageVolume(id=volume['id'], name=volume['name'], - size=volume['sizeGb'], driver=self, extra=extra) - - def _to_targetpool(self, targetpool): - """ - Return a Target Pool object from the json-response dictionary. - - :param targetpool: The dictionary describing the volume. - :type targetpool: ``dict`` - - :return: Target Pool object - :rtype: :class:`GCETargetPool` - """ - extra = {} - extra['selfLink'] = targetpool.get('selfLink') - extra['description'] = targetpool.get('description') - region = self.ex_get_region(targetpool['region']) - healthcheck_list = [self.ex_get_healthcheck(h.split('/')[-1]) for h - in targetpool.get('healthChecks', [])] - node_list = [] - for n in targetpool.get('instances', []): - # Nodes that do not exist can be part of a target pool. If the - # node does not exist, use the URL of the node instead of the node - # object. - comp = self._get_components_from_path(n) - try: - node = self.ex_get_node(comp['name'], comp['zone']) - except ResourceNotFoundError: - node = n - node_list.append(node) - - return GCETargetPool(id=targetpool['id'], name=targetpool['name'], - region=region, healthchecks=healthcheck_list, - nodes=node_list, driver=self, extra=extra) - - def _to_zone(self, zone): - """ - Return a Zone object from the json-response dictionary. - - :param zone: The dictionary describing the zone. - :type zone: ``dict`` - - :return: Zone object - :rtype: :class:`GCEZone` - """ - extra = {} - extra['selfLink'] = zone.get('selfLink') - extra['creationTimestamp'] = zone.get('creationTimestamp') - extra['description'] = zone.get('description') - - deprecated = zone.get('deprecated') - - return GCEZone(id=zone['id'], name=zone['name'], status=zone['status'], - maintenance_windows=zone.get('maintenanceWindows'), - deprecated=deprecated, driver=self, extra=extra) From b09c1bef78e13688e3c5739e5fd620e226a16672 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 28 Jul 2014 16:07:50 +0200 Subject: [PATCH 120/315] Add two new default node states: - NodeState.SUSPENDED - NodeState.ERROR --- CHANGES.rst | 4 ++++ libcloud/compute/types.py | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index bef4c9d103..4d7842ee73 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,6 +7,10 @@ Changes with Apache Libcloud in development Compute ~~~~~~~ +- Add two new default node states - ``NodeState.SUSPENDED`` and + ``NodeState.ERROR``. + [Tomaz Muraus] + - Fix to join networks properly in ``deploy_node`` in the CloudStack driver. (LIBCLOUD-593, GITUHB-336) diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 9f7a308cce..602d8e849b 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -184,6 +184,10 @@ class NodeState(object): :cvar TERMINATED: Node is terminated. This node can't be started later on. :cvar STOPPED: Node is stopped. This node can be started later on. :cvar PENDING: Node is pending. + :cvar STOPPED: Node is stopped. + :cvar SUSPENDED: Node is suspended. + :cvar ERROR: Node is an error state. Usually no operations can be performed + on the node once it ends up in the error state. :cvar UNKNOWN: Node state is unknown. """ RUNNING = 0 @@ -192,6 +196,8 @@ class NodeState(object): PENDING = 3 UNKNOWN = 4 STOPPED = 5 + SUSPENDED = 6 + ERROR = 7 class Architecture(object): From 054f29a94eef433fe208641b5f63ca2f0c613672 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 28 Jul 2014 18:52:44 +0200 Subject: [PATCH 121/315] Add new driver for VMware vSphere. --- CHANGES.rst | 4 + .../images/provider_logos/vmware_vsphere.png | Bin 0 -> 71341 bytes .../_supported_methods_block_storage.rst | 2 + .../_supported_methods_image_management.rst | 2 + ..._supported_methods_key_pair_management.rst | 2 + docs/compute/_supported_methods_main.rst | 2 + docs/compute/_supported_providers.rst | 2 + docs/compute/drivers/vsphere.rst | 76 ++++ docs/examples/compute/vsphere/connect_host.py | 8 + docs/examples/compute/vsphere/connect_url.py | 8 + .../vsphere/connect_url_custom_port.py | 8 + libcloud/compute/drivers/vsphere.py | 349 ++++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 1 + libcloud/utils/decorators.py | 57 +++ 15 files changed, 523 insertions(+) create mode 100644 docs/_static/images/provider_logos/vmware_vsphere.png create mode 100644 docs/compute/drivers/vsphere.rst create mode 100644 docs/examples/compute/vsphere/connect_host.py create mode 100644 docs/examples/compute/vsphere/connect_url.py create mode 100644 docs/examples/compute/vsphere/connect_url_custom_port.py create mode 100644 libcloud/compute/drivers/vsphere.py create mode 100644 libcloud/utils/decorators.py diff --git a/CHANGES.rst b/CHANGES.rst index 4d7842ee73..5282f0b703 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,6 +7,10 @@ Changes with Apache Libcloud in development Compute ~~~~~~~ +- Add new driver for VMware vSphere (http://www.vmware.com/products/vsphere/) + based clouds. + [Tomaz Muraus] + - Add two new default node states - ``NodeState.SUSPENDED`` and ``NodeState.ERROR``. [Tomaz Muraus] diff --git a/docs/_static/images/provider_logos/vmware_vsphere.png b/docs/_static/images/provider_logos/vmware_vsphere.png new file mode 100644 index 0000000000000000000000000000000000000000..152c73369e901c6d0bca15803e29a926b212de02 GIT binary patch literal 71341 zcmeFZWmMJe+cpRYxG8Dr?hYvlflY&?bO{1VcQ>*DX^@l@N$Kv825FEE3F(%UoNND| z=l%4oS!-skcV@oKE$VjHFRn9=^Ei&{4p&o=!$K!TM?gTpQjnL{KtOl`9wQDzP{A{9 z&)?>Nzfj*P$w?zT!hd~jD@*{-pgGFxxqufQ;lB}Y=FmQaC!e}1D9b!uM#sSCBbql< z-$6j2Mo^HJ)bw09$n^3h&}x3je$!^o0F!`32C}0=kiId0V&e>?9s1M-DHjrUar!`T z>={6JCA_yI6hroDu@RH?BlA~gYSxHv)vS>b@mljwXv~lEM~`EViiNe7gO2hVr&Gr@noLPLQ5 zU$5Yxqx{#$AR$QbPlK1CLI3^coc}#v^Z&&$|HrBSzi^%ZRuK#bpxBnN8`9D|s z{|}4*c?bTV3;+K+O9cHtfAij_TU{SX4Mk%u4F?E~C*FVZI43RwDskUZ@Ec8`K!V%f zBLX!7@?ZJ>D}kHaKSg8a`gWoRnI;_PsY=l5oe>0)#X?h3^1nYz?c`V+&|EGx>$dot zKO7ah$4a2dLk$Zouo~wZxomL3Y{Pb&qCZi65Pa5*Cd#zxDaGo>EQBx^=+m+ z0VGL6v|=I0CPT-TW$@>gxboC+_5|#S%oBJqVJ5IoE`s- zU_DsSiCX{6o2aAtZtMZ?V9pOAe{&jw8XWzG`qzwoB3i~9#5`3 za?KW>TTasBzns^zoELri&+FxhcN;kBiKi&|DF_4)3oIScpKeg*o4~7{yyBNU|BepH z;W|1rI?4yyYE~SiAO!H^`--Z#q(ogyD`fdLqRPAbdFfp}jzPdOM)q%qbX9yL z&J@%JK`bjPk`vFilROLlf+4kFM+jQT9Dm&FxK9edMBU~q?(c}}U3LulO4*=_;d?(d zu8WITOUk9yu%Wa0%B9|7l;pq55k8#1MD+7n&xrP+RLKS(w<}rpOdvoe%%@;GS-sf*9p^`&Yc?R$@m%?*Bj)Gl6Eji6`za96Bj#0{i8V1|t7xH*wpbawSV> z@a(>VKkPMiZiiRU+gd!j zHl;>?piwNQew4&gh|p#T|F7HGAEovwInsolmFy*ikdSar#}(%+rL>D_Xf~&K;i)nt zFF!xmYyWb>}nq^oOah?$?m*K4vU1j-kWgNZs~zWMjBb7CTSU)R{|yQ6OoYU)}jG<73NJOZ2u4@qdKA7O3tG*8J92h-T&Z6Z^P-g(9Tr@*e_?D@rh zc&i>KFE4Lge7vc)+&A6-l!06#^a*LGmKNd9pFb---r^dk-Lq&{Jdz+uxF?X;TL0$w zBlF4nS{}keU03;m#`az*XNw&ogAVgArbPXT{P`zxFX+Ef=)udk4hJ*_Q0FBQ6hnf}|`w{~hMXA)uZgzDGx*L0&oH!yr)U_zX2ea& z(-r@k?JX|R`*}@yHaAIJ0%*9%=O`&!7R9N%OZlD7ZvAqG_4sEd)y5voPbyTYpYYiWKO-3Y^`L^pYWb5Ziun z*KEV}b7)8%gt7#6pb58S`&HBxExZBBD?(7Fxz&9bh`rFr{FfrGUKwM_hnF5O4mkL{ zoAXSmOn&4a+QHs_ru$7|;m~JlxFIVE?>&o_55Z?OJc2y4XFgp|M%X?_cuv-gH%sYQ zru}IDCYAc{!I^-F3S4do%X3UFN}2xvcMU~m;&+|g+P_+1%RW~dbxlKRT8E(CNtSH1 zJlTur_!rP?NtEo-`FQ|LG&&SqL)FhC>B&s;lwBu&Me!s%o1ZKS+)9VU;ap zLSmWd*pW(q&``+!h_ib?ci_8NGkMvnb`Ikn@wbUl8?F!h&YBn>uf`eoE6*#Pb70gS z2|>5=+`#tHEtcftpQ~HtSfu~H9w@UkK3b^YS|LZrYNS>=0z%WsS^RvhjAbd#XtPXU zjqi63GQ>cFdZ(P<-TyFW7?U4=e6r;2j>sEHN3gmwhlEk*7*c?#G1b)6{BeAhP9uNL`kzYF83L`%f*(jt0d32_7X4KKf^o&u5d$|qHXOY+oYHeM zTzf0(JXTs53m&xzBa2eEh{=BQb50l5kz|Z|a*O-{YfB$G&B52M+HZ+pJeRj#R~UjC zN6z3tO%AaaPnWS5Y4Os(l>8^?QHjut<<*9Al9Q55dmBRLYApmo46ghYYFx6?W{&5! zCZ4x-kx-O>0Z|a@iZK^rl7#g_i78|`G<#Qw1jd3B3ofu}$`lm~1{@bOp}Xx8vq%a& z&Ia{z`ue1$M;~oR^Y5T~m5mKr<8_YX zQ}-h=1c|R+l3_)Qq;1&HfG}ZhCAuDY;bIB}5(e@?oM&pzB<#4<`Gnf$qf{$}wW0cJ zO1KL6GZlOd^2PDXrG%4HQ;005y~WDR92_zriSPUaq*3VqQ?s4?@$k%jE(!wS+X5{~(%46KE%id##NFE_%aZ z2#$n(q=2Fdf-MJD6st^^Z6f2aFH&90$GFKAkr9C)C-QoZF6AsN2c$&a;!9L}Jl+h> zHB9AdzE7GSD)Q*@)3qp~l!C5C>-)$4^$h$k&OgBe5{s7LSCV3Px*8}a5fSL6x27V+ ze*T_IoPYk*#XxqQgi*|ODN=`i^eU#*g z)CWgZHt=|YBR@9qaZADF{MEBWN<;Hs7uSm*R=T{^E;M{WQekYBE@P6{zU@VsX=w1; zXdT%iKr4B{dhPovF-gb7K^+YtJgT%nd+bTKr%t_K3zf7vhCyh{uWq9x3-cyTeq;sG zax@yd-Xz=GH*F-mnrMR=Gs6au*!aE)^cT`U2^eT*Wg?oM&t2q{7j@c1ho6@gI9ib+ zdE+d49LbH@Q;dw$efX|PgLiiGj6xWuSM%Oh|CT``dQ%bOF1rl{Oy`?lC`qDr)NJfVbq@E!GPiHExXO z%3)`Ctau4+XSjT|(hnv-uD4q}84>(#GF_~tTk{_4@?aj{&CuSSv)Og`qj8&ej%xO+ zovA|XG(m@zuiMRedB`Q&WnDj$*s7d26$zd{fA{!s4{EBFm6g1zs@(hcjQX{fL6lLueH05&A8E!(=tu_LynrZc4)`LMTO^SFOj;Q zo>xy*rm$Pr6Kv5ywu;ujskfDeRqs&Vk7k6cir*8E3ED?3+Dz5k8AMTtJm2V#l^e?v z+u!v3?YV(9oGysn@o-~Pwrw?APH}T}9Q*t6;fk2o93o1osuAnc9g1~*cQyuJxKiCJ zq>zx1y1#LoTU+nWcP3J~O_AVB<=rIL{%?!yV6g%F<;#~l(Udn_$qafY%abK*r(`~S)Sh5!cALXmHov$`cKF^bOa^g``Qc_n}H+?86FBf>U zN()_DGW@f-Nh2soG?KI@0tEHCVUZ_vK8fUNEC_+A0iR<;VPoY$LAQaxn9O zYlRgT7srX=L%w>QZSBmI83bpiWQusK-yV1LS9Cl?OioS?$aI{$y6%i4E;qZ`j9<-z zpsxye^d}%7pcWVJaEEs8$^bk7RZu_!Q620-#Mv)5)DhOdA(N8zgdRpWT++ z{zn977v!`$Oz0ovHN}(x$EL3#BYtS%#U=>(%@r!Q9L43*ZrsGxXWcggJ6<%?_+@OU z&1#r`9X};&Hd~~>W3nwH)0g~NIR{}wv-12zO>=QFR+%n$*lM>Fxg}6}Y4b^eBEnJ4 z0ozT)h!!UfqN#vfjEv-u`1X5f#5?R_rODNb0q7=x&iZc}MR&*fh+qYh5Dtr1sn}FY zAx4Y-(D%(x1G2aYC}JIApL6I_i-?dJR}k4Zka4Dzs6f}w#>D&UW{t@ja=fOL1=qfO z$WllZ%%$nL+ZxS`WXkeHdK+->*0psA*1x>{#u^@LW%_lBbFErhTGv+{kI`0B1*TI4 z%4p(L|Ni|We(@f&yyZyv{AgJu$A!zdl{i-HlK9pA`4o)G1H>nfX&17g+qBle(2&yg zol**C@8jLr<7&D?I}g>+;9zG@I6jZpUvqN{i~PGbN(k~vL&rVVnCKZMfWV~{x-^a} zUfZUB{|q*+e#9W{y>-98xoA9U-mkP@7GB&4eu~|P!POcFB817JLqjBpu9u*apDEu3KV&JAk z8nw93Qy%~B5T^{db2{Ijvk)&hXiN=(n z#J4T7CjgKW;hxNGi z>=~u^4h^WG-xLxU;dtTp^6+$kE_U^^5@+~ZW&p(jFxlMP1Ygq!uHfX{2BYe5miAAb zZSY+^QyOX$LD$TIt}G;wc~WwYKlC)i>Qafsn5JQga8kf{ca_aFoDbd;1sG~Mw^fTe5$h5RKYjgo;vjo+jfQTb z^!yD5{9Tz5qq@serkm-hu`J&@J&g(mG_c&vGB}lSsg);aMzXCxHtFXnqIR$U@HBKX z8aU~E`J@Ij$b{?zJ}q7sVe^OcudoWyPoIrQBhX){8UMnw*Lh{Qa$dPDY2&CWHU2hw zIT3bloR!mLxpkIfpcGDC#>dA8J*D(SXjt*K$?W?4v6I@^dzazv&>^7vXt`y7^9~-t*y4BG zF(OBe!hUNYD9J#K>cZ4hFnM3R;IW#dzxy{9Ap8FPd#H8$Y#F(ksS%pmwO z6Q2d6BO?)u`-^T3e|fV(tUSf-vh_NOl%F;Kcf)VLD+<(S8NuqgvZlX^)X_R-uwPwM7pP{($Wi$blHq2ck|LL zjT?AEw(w4p_UGDg=NfpZI6VwJr)qMV+ORl@>-`@IK{Ih$3oIx0Iz@o@2hfRjcX#Vg zay?nt*`+)D@70WrjSDw?R)d~8f;%t38U&t*8BAc>Xxh#yY4CeS%#Fa+eod<)dfL<2 za1U-gH;5U~5p!0?v;gWkYTBlJoyN!V3&Jf6zpsCXxHjHC-VoJ0{5C%BUoJNi5xqO< z28Hr<=9U+Dv*Qp)1G(GMpTUGvQ0zD?j{g;Ad832ID7@Ere(nmsn8rdX-qol-5b;Un z`;j!dnEjPDk@ow`Mg7P#i=pH-c;RU|ZYSf<$ZH1^-0F{@#{KdZZty|xw}~uVTyhQe z%htnQygPGMbON?>2o9Y$4*KTZ2G!#a zcqy%}uFlKe2h3~~6{vv^XW0|^N-4r)4VDwMhAt!g*C1%%eYVuNjg&J5LphDF(;@|| zkN-6y(OX|0aJGaUYTSSE0SKsa`2DK%;nL@|&)FM*cU*;Xuv@<>PpnhbAo%8!K^9JEPB@=`PPzvQkme~Q{(CRQ>IiQ{D%5WbFq-3F?@+vg2mFFTBz z$m18?ogZI1LdD1Msdtb#4Wi8$Z%29XoR+Z!tq=N_pf85Pr)RoXS3y;^A0%JoXr_oD zdi=?<$8Xc&RGzt3FPFAs5iv1}HwCXdDLj@CQ-<7rou&&q^od<91^+wS7QFV_Uv3fH z8q1Cquv;K^JJ`++h$*RP7p8rkLjx~11uB{PHpkG}a-%A+=BdcjTO;Wc-((^aTekgP zQ3<;rd;(~!m+zr@zq}hPT-@j5-%T#t>k(|_pR5i_^=j7t#(=4%^ExD+^HY-%gj7Ya{W-0%t1G?CvL&x&-NgV_D zT#@0m^#u)APW9D4dP<*zDpcI?v;uf#x$bzp?a=37Coy!LkP&*kJ3CG3xI+QGuUqi9 zZ!qXu?+zF0h z^X;7XwfdWChqxRXMq7fIu0muyO8Ky2DL(nRNz9DX(@<&Ly@y|!CF@X9voaHwoCtys zp>}rKNNUlz-Ir?~vo}@s-bC&?$t+BTrrgnXGLr84)~Q6EQm(=<_-e&n*c7oJ z0*k9@k?*Imv}5?_*oY9+^o>zEerLxSqk~yTIrUTzh$oK^$B*^|w5H&y8+X1%l1kGP zZ||-69QOif>-Q@o4s-!>fCLoL;!Pl>HKW29zXKlcadmvC01`0tJ?&+o!9y_Viy|TB zHbL+QncCQ}TxX;6nier|cT`mdAUT2>d4xa^Y_~-+SfmkjA%QPJ0gLiF*oYO60r9sH zN@g#w5RZTW28AB}S;Js}*BSY;_vB0U>sYs{PBR2q7$`kg5i!YyxM^rRaVXqqh%wif z=(hit7ht`Y=&khC(X!Ly{gq%Ulftaq@Rt`)L2+WG^gEx>4-rNQmh`=t)@fV|B^&M! z#ioiu!Faw}zi8hbEB*jMAQtoHd(LHS*b-`EjUCc$k%NtyviL+IRaPLcZI|S=6Ez_n zfnrywwKN;ge*G>?i%)I=%PnWv>RcRWE5I~9S9cBiQh1j+i8M`u*;LLtSipjCx$l4K zuo(15eu72!e2J2aPg%;s4Pkovm4V0B!sVOmv4Dp+B|R-^sp0|t!LXDbHat{pN^+fb zy=vViX9mv3Rirym^l`_M5))0}1C^*3ukWk3ikaPS{r{nWY5Q3+rxEGt5?Bw%-TajG zhudR0ZsOP9Hy*$v9+Mw|jR{2?SO#AEWepmCSF-tSv<)|ge*Qe^d`gwKw9m}K(iukO zPkj5~L)1&USkb=_kiovcPSP}+v&Y1)443?G58a+oC?7!cqW%e!vTcRml`^xk7GKnh zcpUYh5E%xF-|wnkBtLmR)HUgMwH)?hOcZ~Awj$PM5l{r6Mi^+^0XQE1MpcXuTkJgU zOV5FOi@LV<^MG#t>0BHD$%o2=5dv$^F{hA;tf7vz$cP(-@#*9v+E7BuOe2%>k7TBABBpn^-%?b_DANlyi%84{3q7_zv0QE; zHTa30Kb|gRJH8*0_!$ZB=S$co4>!AwSOg6vo^1?Xz9|E3?h-og3IdDWtmYgm>3qNb zHyU0ilC{&lRh{RWx!9jAjFRV`tG2*E#Uh1Kh`7&b z|GBx?4+FDj{y`HvxcvZw+Oef&`(2X2hg~oT&|Adbf+s<7lQv|l8c7$7Ex8#^dF!uN z$Xhq~_1iZjfFq12du9B%9adUN@l$grKsn5f(W^8;eE9u%8(?X>(;Nfls5LN?_k+=u zZ0S1I?ErvO(DG4DIzOVIiO(z~_ucR0#*SCK754?_%+fO8Hlz2!^mM(K#~~pzGa7ts zv1jT>&=aR=Egg6!x1NnY!9lP2IW|_|w-djnCFCijiQ5YiiC|7JV~j;Nu#DDJazK|Z zD}S=-WYzZq%>wsraapn*|4pV=LdJVvbAHAgA-iU{K=s(Tb*F5RR9_ii+Y}@_qTy5Ai8;|%~ZM%|9#TyQ)l`W zzbhkn@wDotaD_nrGBO6T`GGFtdzjb6hN9uI1qyh<&WN`@Z zxyorUkYgkPkGH=OVbG(#XfmR7wK!abQtfN?x-o3TABUH+t|D;)`=YkB@+H;r1*vPi zioWfdcnKlAHUk(q0_*v-Nal&$)GWi5bkhfdTk7fJWP?>*&uR0|k#S;H^V3|RSmC+L zEmS?nav$DF%|9esqS^}M@bSw@^DpIF5$zY?aM%c)*s=V|b%JBj_JRFK@rTn`mWH1t)nIwREyKZQd@Zk# zdEbWzJr@05ARPW ztHIbaH!Zd8r~iE6aLO1o*wjANRBprjZdgF`^LP*>Vle$+tm{1EyY5#Vuv`t^^lzYR8FlOi7EDC zsS8Dnr~h0+A67goVjT!4i_>~EfE@e{e?93^Ycj~oZ`3kfS0SmEQ7 zU(E2EY&_C_INj6x|J%~Zv)Jz3R0tc=8jGGsw#`DG8!dYservb)>pPqw(bx*UxR`yb z8WAZnLI1*)dS2GU><@q-JzzA^5xv>TVM z!f@;ZXO&m4H)0)#U2;)#xB8T!*y1-vVCF3*Qq!Ezb8(j8>Ms9ouEWe@Q86Nk2o=g--7128foo1`o{ zT8<|wfr#ONrFL0!lcPMo4Z|WQG3;{;Yhq=J<3K`uEjtIr*O@iJ^w#i77%tT6gpsy| zAEVzX^ii~338l8xIx*#%bjk8f#?xm8i`Fkb5TQq?GJWfcYh(S4nIg$wV#uOAnzz=) zqZlVkTzkn@w)b&>5;MjJDqKu0)8e4@ zPw`3Kbf}WYv4Z-jn?or~dD2EkEUc_u*^hT0aPXp; zvVHOBWTSq7W)Leh3ThkxAx@jCaPZGEVPJl=)a0i4>20eg4^Xiuz-)jTz;Wuy2Bs0p zH*>fM89ql%PQCtQS3L?$nJ@rS(jL7p4@g|lj9C~!NCd%$yP;%`Zg7;3bvCmz^DjJ3 zRwd!2o^HLZUf0jUnd#h<^C?v(WqLs4t^H)H=m9VV?@X+CU-;o9OdO7B>sA~9!{s!{ zkYaMO+6f;PV&{^?z$9+BP}>RE3bR+2V02o`^jPTxby9C%ZK~sLlMBv%Nnwk*_Ba)0 zIHLm)6#>W4DaPI`fod$MP_YR`z=^=MF{dY9!OVftg$&iBzo1l<1Til|F?OoD2b(xdd1cP0hGwOuv1L`cYX999~wMl_&r5l=ExuwZiUBXY- z9O(^c-E}o3YU)Uzs0D4@BC8o6I-Cd=T93;u#CN%`xOF38ixd6gkTPC~_!9BSpX7b; zi@I+(u!Yg4P_4U4;t4uz!_bt6kna19q6pH71j!L4wd3!QilnI5T@`~OIn8D;WY-Sq z#g+LYcCo;{pLjcx0o{g+`rde&_CPnuqgV%=%;GBlxyRAcmujqB`8_pVUE&0u<&J>I zMrb%7Vgf-Xy-(GIhGSVV@7h;SD3 zV5tfJ#fQ-z&?fik@vOfA4!d!;AieTkFS_r;zipwz+9_+XS%5?K2v#)qE0{J(|M|XJ z^4y4a`}-M)E^B>c4n2VNgNgZE!cnE?K~=xw{l#o9KvhnWuvxHdMW`;ym}CN&`!i+T zfSP#26u(Lk1(57Zz^R8l-c&pmYZ0A-cw+o>t6g#!lB^+o;Ng>2`Tln3FFT~>^84ol zTiB^kF>Pn{GUD=@8pH<)Mk6z92$54z3PZ6f@qZG=%ySV~Yn)(xm-Z4(F$I-{CIoke zo;JA>CGfIer+afUD3mkHg!HjEMb1jTke*wKrQ0}oya}CkaG-w3gt)Ld>cH(QoA4;H znY&R8tzlGwtzuEE<`(~rpuF7N7j-d(`^C~I^8~(0>te>(xW8XR$z^b91gO&N+o;k! zMn6)r%eei^am23=RrpwJlN5u&*+^}bnRxFX5KOiU0Jo!|1;L<03K+123e5ZZl7HO+ zhKq@Q(06w---+Kw3H8)dR@iM1_HcjO&;RJuPZ2g{g6^`0l?ZsEwcj3f1J=6I zaSaM*$aolJnIZs+UmfD==)qSN4FcG&r_B$bLq?Sh-Q(ZPYCmA=$`%9+hsSmbpNiKs%R}UhfhPXs8B0XPWlun2?uM*O)sqNypTZq z2~UaWchW98t=pB_@_bU~KT%wh-Lfs5WvKSJ461YqXs&Am3V{}Pi{BO##b3t;9YLOI zC>El-l~S^(n8bMSxx~d(7jI#xk-5#ea7?B*#wR_!W>v&F*Wq8N_J%^1T&I*3(jNi+ zU|ad#6wau4W<#70szKFBy%FaV$cZaLHwP1&%hs^f_&&aB|L+CHw=IU8cMn$`{c!dJ zE?WVn&8jlbg!<1&s*n@yOu12{|NTvG+x>cP?t zI}r{E0dNO+Wm*8#nC%_Z*q7WF?VPTjH3Rg;4Q^*+clw^9u`wl}uwY<<>#JLCyIP4< zP8XQ2Pz&t#OvIJtw;q*_NtQ(a@c1O>5494rxV5yz1t&3To7UP}p-Z|%DPA$eJ{}vq z`iMf=lf;R-m#rF{ut{kZbeE78Z+kL%)FKENJ$uIY3v5lHtUv3?Z0ZowIun`BOu(RRf zNT$eDk-|dwgT429IMX`b02q{hEY#WXSdM?qBcB3wA^P!dTinUc1yBHhqra95HaZ6P z(FD){aAACSQBzxceZi)p%uV$Z?3aV6hIF*Pa{q&6V{MVOX)%Xi$fUG#-8}NQbM?wB zf(S|W&G<>AQh!X1BH2IwRyhBRMvG92meB&?SyYBWBO^ z!-6fV>FC@|hg!=P#j*Ur49ba`=yp=p2<`raNfeCt>QC+9$hwShqHe(4Et60yTEW~z z=F1nA(5_)~tNbo?X}kutR@}*gB7$(!j&535oS3Bk9-iJmE67a=?ui!SMOJRyoXY0*CvTrC%bnbPQ8-(whp$8Q+iWEL>*th|Fu@hiC&10Zwcz2S)- z86E9gYI5Q6yKx0B9Hzt8UIW?|m+euOs<}jGB zA+Y5=9liE5t?7ih>IPfCd}B4_r-)2+0Y&eSCaa9{m?W12++Dan zN>Y6cvflCL=B+Kqb?PJe0GLQbP=*0%@_lwTVfY>}W7u$90H2n@C}{?)7H-9_HHhFd z*VT#-He7HAwjsEN0WdilaIByMN=2;HsO35EqObw6fTz}OQNZG;!DUR02tIcM7Oc|u zpEDf20((lwDPT?0`|UJK6}7AVlz}m2y~qscn~t3*L_MP0Tpf3KI(lQ$C!oUjE#-Is z$$@8V5oaad6FC-WoInK&H~V70+`I-D=t!G%pmKg)iE$YG%m5hEaD{Vr$c_O{$0f}W-z(=1S^h>x8YKsY0Zn!%}@+kLRb%3Mr8%9)=CS^ zk4X{b5gQD`#MK^Yn1wUg{wAa2kDhRC{1J|_Vs@_&ylIW%Yu5@EE&4^GVJNA%smVs7 z+s4@}oEgVv_Lw`E*(Xj2n$WPduGc6*pa|wYs3RDVZ$M;eyvCrDXQbR35XKIdT?pajID( z4+=PP-OlpiEGMjMJ^}i6KOg6c`Mh++5tDPk)w0Li;qYp>FD#kEV9x>f_0LpiIZV#x z9oj$J+Z(#uARuHwt2LtpI_r&a(pafYH$PLEK|^0*w!eM}@}Vbi>2v{BnrK$w@n*JT zyiheev3Ui|xju$8AF};#tjda&8Acy%8BygVi~b?0)TpOrJ(Y7oS=4CXRE7*a!@}Lg zDPXasrw9v))>HX7py@(a`;$`EoMd-WuTImhcnfnXh;C4S*Ue;^CdP#; z>1aXiPU=089rNsr)yV9LDZ{s29iLq8W(+e)k01|nJdRi3 zDzjl%NBi}9B-i@S$RfMC+&)D0P^v>{`)}q$w&jAGf)G+7W&tmu*|99GZVK@=2mv=QTP>1N6)I zmktw8GV{bfJ7{p7H9PEOLs}$)BANS$A$wZ2n5C{hFk!aCRki~k~gL_`xKZdS3jNBreTXR6Bmd@Z5q2`(Xww#%_g zHO(kF1kc-imPnClYe1t2PjUAB?m22me-pA{TA-j*Fq@IY6tdwisp#&}c!8vqm*zLL zsG{iYyt4Gh1y!WcQdAZ@KcS^vPDl!j5)*dAEN#cL;t5o9V0chqOA!X;e93LrNFjRn zcZ0)Q!;;ujUaQGGI{!GPbX+>w)%+CW;x~8xfYv&lkfT}yvK`o_q1Coz0Jk2A9Jf9J z+WP)Wm6w|Y?7Ec*04z#-d3jC7T)@>_D?=cn$VL$d0Xg)=mX8a3qQJ;ssPw+rOMTma z9PsFa@MC8p*9qiMe=VtDHbvpaX%rtg`ii1%0n)6Y3l`j2keHMNR9=v%z2G!;L|V8c zDP4dC#jFq0^?0Stcyt0@TW8<>%#D4ORy}(Qc$ogVDzj7g7 zkzI|R6`MRQ%ER{BW_MvwS3+DeDt_rlnD6Z}Yk8 zgU`#ob8z<=Fl1>m#hbvLv3F~iUCM}vtVGTcl>xcxmrJHg2VEWg~RW%Qe&G=ZkDe1ZN4U81IvRHDIAoFI9R%0&v zwhL{*>IEqLyn9oqqMW7%w%jR{X*kuA$C&HvdIA59^>k((Bd{tzgA+muv4Yt^Nda#DIAu$?frs%Dxl*i>e zsvsy*Q9WcgcwC^ZNdg<0Vd#oOG;kpIvl9S`ok^a>=0YzC659orNkKZ-tc)s)t{`{k zX6>EtPCebu^q_=Gw^P@$NffbX_ZwOQNmKo#E$>)|J+XL_xacS7>X`_0bGER+TjEU3#9Q|ym4&tT&kT=cRtc2eCf z2;aj*c)~)YeqeW0Ac_0cQdwzP2>E!rl-_&AdS(7HD#2PMuVGU~Z;@9^Vx{LJg03Ko zLHLdg-zNZz2O8WA2Mjv+udYB7POD8C02GAj_Gsqs{8pd;fI$!(!n`kKvzDd#&b{26?G(m4XTc8N>+jRv+GNF{v4kNYzFe0QE>b#Nd^ zL=1z{0d*N)fY-$@fw71lcxObVp%zg^;DP<9(feFjr0aX<2nl(44Pt;XB_O=gnw5+{vfTElm|7#efPc76=b*TXXmRtbziYQ9{~qQYotK zpzXoTIGZvse5)#oBhwww>j4x;&occr6pJjG_nt_PK#$4tw)=Q>+E3M= zH1vzO6%94T&>2m?&YJM-8fXpu?KlbJbV*(M>N-QKLZ6Lu+|T%D*ABPZNz&+}aVKZI z^e$3~jlxWi)8D_?I`qO*`)n3sq&SCHorIOkoM5TtuDPL3x|#9WoGbaLr0<)Ypp=WC z%zNXf=Gz{!vQsnQ=5!yb1Q#?xjbZd*1$m4#*vUAgd?P5pI-=_RoY!S>wI(*5u8fz= zOZmlXCnnR1v|@iqF^%0s56p3AN$o+&6>%xG>9Brr_0;n-?^P7Z3$TG|4K~_j+Kme5 zNEKeLTplj=Yo>^O$&mvIZLG0%2z+rs;rLR!p`y+142TKPzY`NZD|!}Dt^qS41?C!! z7>2CW2RI#9Tounc9SgK1oD?0GyX({IGQ3zd`Q+IPd0=|SOJGthpY`*qzklu+}!+fO~F4A>IPA-8twREpS73nf2{ ziKtD+O&_%vFiA3VV(oIHXG$tZ?5UI0D(vfiHf6O=`=RrqzFTKaQeTD5e6vnU-<{Gh z%hj?A1@ACAPtwI_bhd)s@w;*E_#gY`wTjO%UOrH|x{=ObHRj%fYpThO%Vy72I1 zfUgry)Wb%<41X^2SaOnU)8rGz8?@a?9u9r~A;U>_a?AeHNqE{PCV_T<7-o##nDNBC zi{7o+y%%%-GmzRQF?b+Ep*klYODP5?#C{)K8Hv7>7=*BhkpkOSWJ!XnA4W)@E(=>0 zqZ<}0!8iUlj+fMxDU2FaIJj;WFs+Ng43%Lr zfwfd6W$p1A6$;E6vS?Zn?stQg#Mo3teX2^4Yoh})vi4+o7Mq=$myBXw)f15@5J(D{ zjLe97^DI`yti`grqM(%&jy@E+kfCzv)3bsw%9_Iv+jJ%f5LMo?8OSH4)F7vw@to8W zi5?}e>h?e$+gPzS0&|%S2oQ$mnakOeL5kiZI9#VSqv-Q0|s% zw!frC$g%vewq5$w;*3uS_FcbwUl;qY@NQnh=jXy2y1JiYU)?a%xU>O!L;+*-8Zb)4 zE=(KdBuj(FIF_Xt&%V8UOvt$tl30#-mYO*9Go;Q zhCX?fXGysHht39EsHWPCc z{S+8xJ12q>wExYR`%PPO1{sMbb^)=RsfI<=N)Lxr2I`v}7dnzY<@hsX{#uetqbuas zmXl()ptRM&2OrJvS*{1d1PCz2kN!Z2Q~2TohGe&Ldiwm=`gktDJ`Je+xswKm&shKZ z_R1`G9J!g`wnxAdzaupOo73Qwewt0W!rX)S^=fcxw_?nD-M7H)Juo;J&XQs|n(^{_ zTm13M_$_l*WiA}Q1IwUIoK3sSwhG)>3t|TDMgoFd2-x-V4p>0vKo@*na`NTN7t85l zLSSSUa^3p{_8vGQ!B>mZsQD{c9T=C#a*K00y2Rs zL8o)RGIbbk5z$TIPU@xPUlTjTMU^QGjqducnqC-2~+v3G+yV+ z1dS{D(uP1w*>1s}B{?h(UTDI*L577kAHi1uLc$o9j=@H>t;yI>i4s-Q6X3VofbV~S z^Qu5M{pB?Zb}xdxODG2Qw&D75IbXjbfIBW*Vcg~j_o=|gJ%B7odwpod#Kbs{HNU~P z91NS0na#yZidyuU-9J`D-0b$e9N=tW_ zbV&&Uq96?t(gKReUC+7q_r8A}!Sg}(-p^WVt~ti|&RAbo*Vaz=+E!C!an%!)!E=LX znJ+X;oVsiFDaKSu3uSHtY^D|IJgKcPhL7bsKWBsK;BPiWeA?oWR)s-u;xhGGfErlxAHudT)RN{Kj(n&Oiy`lbzGA1;_lE!SWhZunS1)eV%@uZZ!?=>@FXM(> zZ#u-wIY^Y%{J)t#VZTnUHl{$Do?bvGE{*a6Z-lbR-60ToN*6(`2wd2}NKL-#qA8Hd zN)+Zo$M9CwDnc3w$V)0?VS7`#fWcCkH8R461Pp;hO-et3aGkiE=EeUV^bxa0E&P`4 zz)Dt`Xo0+eX-_5Npe7_}Wsc63s_B+Jn+Bb*Q_$Dh1b1+Guy}P_AM^pi(DdXsU1wxR zTiM>ORSypNh7<^6CGohUYX)_jr#Z1tEnq^tU?`m{ldG<~M+BkZd8QD~7eRQ*Wn zctVu7KYuAJQ!ZqPin~w2ZR~|}=X^{n6Yr1OdJ?QJv#$Lm%-_V^-Q|Vr8Qi9Xqp#O6e1eAR_XIgRdSc`S}R~K7}>;{i5mf4dAz_y$G{0|MYyh zkr8^E=8ZBxMuW<7bh|~&V;$)YhjApWT+HWvUssTPtmPgJsL~j|l+ZR$gdAHS;^#BD z)KD0w&#<_Fl=KROu(D2C?9uY5mqfnQWo~^8@~~Kv+K#jVs=QGG{fdW7xwvUUP!bt7`9u zRp%)`1yk(5OqSwYDNNFHru0R0|P}YLVqokbT5LQ!J>0UT%i&X zE!d(-1)PaMaN-{HUw}QO4{~O+JvUBEp~eAqcg#oP;F|-`p|Qa~$pzwnezdH(%H)3z z(tSM+$eRQ9rvEZ*VGfHakXq(j#~2?VI{i0353?sivsHqkTv(eeel_Z2ngh#=2 zjP+YLi;sG79WSXOPm8egwldCwR1|cmGbZ?CEO}f6l%i^`e z2B|9L6pyH20qQ^CDo!(+oiJQ&6Kzyk#K@&YTeR4nI>a$RM^gZH`jr_x?W zgiUHTnW!KP9w#0s)-(C|$#M_JHm7DTOivp)x5_P$-V$huKWu3uwKH-p*XTt1O4*k<7nSdsE{9#0cgZy=UDj z>H{uM)})Su=@%;n0LlVUrIM2dqGUsMB}tE^Bx#qbKq?z*&Qph+_DAg)C&v(CkDEyU zslkkn+GZ)QKu^Z38+v73QXPg(i>ijXP>vxxj*}yit%s?^>`=&{RJD*4HTX7d@w~m09PvPAk={ysK!}pDe~(zDSfH{0X??N z(yMu`TEiX+U&r>4E08h*4o~+!;8@Q%?En!x^P{gV>}8EFAcwY~`$|^&QUDMx7QTAe z{~%Dngq_cVTOkG{TdQvw)|f4ES5HzzO zGX|8L1HH)qii$RO4I_bS`8gwT>=Jv>$2w4A`Qn~e?_8gI%A3bf$!|TMv2Dki!T(B& zx58L-VG+gf6>e^2CGz_cny7@pDwL{>ahhsD=a`0vO0~@x4i5#bGHe9W%%WMNqJBln zeZw&wA}lF4=WVLx{|)bC6qj#rX-=OWavMc#(R?(I%T;>6aoO7e>o*VoM+mFN_#5HT zV=_EyZ3Z(hcWe0RDLQfzQLl`NhD3(s^I@=x{^(mpAl-&kx9s}1Jy%}d$#8O5jh40- ztgg3#?D5~v_-`l0+q=mM#S=w$(S^Ou=?F6i``uN&rC5)gj4HNXXvU4cG<$f)BERuLJywE2>)zUR3I;J9cwK^q{f1bCZEidU+S9cH*bAGi!58e{XYWrkz4Y)nJ@^E;8CRG0MAlBO14Xw>M9!6&u$7$6z>(b>hhk|<47EF& zr5u26*1H-y&nebe^n9)N2dELjcXan!fwV8@>)Y~N=t4bCRxs@8;08C;M+TPv5{uD& z@RUJMg07|r`rYld5)>UYF)^vLG0@isk<73@VC!yhnr8|DMq;BzTP*M+(}X6}d_&)Z z9ZV^dU|YM{c>xD4Z!ok{E&CQ@^x0WkV!jLgE-^_>8|5zdCcsWB0rvL zs_L~TziYT*_hC&-fUj57%HIW7x6?{=0wQ-?EpKmS1hTvXGr( zc{hpNbu+xDzeTxZ-pB1IjaL1~hNh7^Rv$S?9qFhjjFYcX3KkZ0*Mu?UrowvSraYGw z{nNnVjt>KCXh3?HiU0MM@eiY!nH+T+Urr@+xTI&t<1nFEz`yB)~s8_`72A04D01`TqP{!%+<$EbNq9hcl zPZhe%{4R^?g%{%S!6!XSnYp2FVk23FoXIq^rQc=Mf1j!5r79d6lQmMWyVoAt=BLUchUO0hw?4-!p7*@KxeJmlWl%MPavgQ_8g6-q|}sTAmL&c zrgQ`&_B>ucueP*pmAbv{i(sZ4DwvOt_*#m>{lGR2lR(mi`s2y*c+7nLK>K*7u+EdZ zv{6rS$#9-pIB9EOXPsF z(D;2VFz`s7d`n=>2f6W@Hg!8)P6?fqT;&IowH7_1KbgJ)qv$3u(Vn%ql`W&O&v`C| zf&*t>GE8(iO6Da-A*iu_iM|5@74LJxM|tF0}hk*Jn!j# zp!Oy~N`IICNoz*VxtmNyLs3D2bm;GPYoNwMR>TmuT&f~Vdd5_uxU8ML$tWiKt_?%I zoD?;mEH$rGnz{_J^!};^5dy(wH#Aud5t%owo}Zm5pE|kYMNrJdwx4BD`cm3F(q(;> z7HT$~SWNw`3=KW!1NM`x;<3I37h6?va*+HbS`DGmgocf z)5pqFzkTEdxPylwx3l-O2s87jroI7DoIGSz$_qJ$l5^noQ>HBG+7WpX5;AM5AO4+3 z{!e+sJHwd~186h@i|iybiO8^p*0xQBtJ@ZMXp7{NeULHd~7ld@X_PPQkw= zfPv~bh+^W-hdQ#IPl~kJT2k@Thr@`Dv`bqjU8VLr`YqCAC12b6W1&8YC1e=CJPdb) z0N0ayO3TGMhw;$)kF29?MOsh&Oq4Et9{Xbm9I_rC|E6s!{E{Dz8_Xo zr<2`Ou?89RdV1I-lf%BITa&xA&OVoS`#*csIhosicgRT zOQb;{vZ2!)C2#wSxGA>$b~7KUyEGocsF`Gkv*b~6k5*XB(Y=W&@*O3n!s zOpNdXV-uoJBKRZpW6cFogjNo20OusbegL0OZ=9@Xb~=Ct zEI5&7utD}osI06B1xd#QkXPPQC;#J-F!EPZmsnlHp!Me%*3=m zqEK1IJ!2$6@KQACxp|1@-d*( zmT~trl~FBZ$;QEpTIu|;xqeW3SNrJzj^{VGHd^w;KfTv?F!T@Wq9A++&cVrv4xghh z1OT-IMJ06hT7li0_Ud@ml({@fp6=#$?fy6z_m{p{(9OoHc^%io*+Nusf>9~Jb7oIM z0R;o7S$k@wI7q#J&(#!&qRRclvWJ+AaAt7?_$N6;8XIWjpP=t5XTfO(LwKV@$2M>Z zfU2PQJBR0!SBZ`o3PXk@)C70XCK=3H0X5p0Q zgn{6#&;5b|e1)OUK)-Qjpb$^Ks3QLqpU`DnVxnQJ*e=5Q$5-u4;+LrzNLWMibZEzX zJi#JJ$F{_MDd(tJE_C8+>Wa4!i*Edh9jQcM`#^btgg}?3949 zMILnA8vP{07Lqp4gujDut7uy=l+FT9yl0~v?CgW>o?E=sw}o=A4v5q|Ms1QLm^h8f z9m&^N_gN5}7TreX^{#MqMFu3Fw%>%_($(f8{R}Sl7#&J79L*`x{MlqUg$(0Ph&RZl zxyAUK|HSBpPjm)&-M|~xh1KO;m57V{u?pSDLsi_6DLgpWKafs$w>c;PIVg9Mv6qCU z`c20B)GMag6K2%2H&__{KjqfXSOUbIud_er-k4B-Ot58S zKkV!?2ZS=(ss(}clBs--p4cK~n(YR# zxehq=|A1!_l$2%FD8U~fKVy_E?Xz!xX!-WX1}uP04s_cm0*(NBCgNlgA1M!*_>A9u zD5KOURgxiyR%F7~dEU7EtPfr5W3>eeM(~aW%I`PhG;Fs;JJ(h_4lD46aO1RD@bSvRnq>w&adWeOY zloLU6nnrn(P(P~q(!SnqX!Src#$T0layqFGx`5FkzxJ)xtOE`J zg9k-+i7e997lO?fb~7mN&0?2f%httSB6x&dZ);NNkDV7=7}yku8Bk({HAU=%xOx;v z2jrwIUzCvi{?j&%$HMqai^tSjM9%v`CDSt@|6TQCZ%fU(SH7RJ5z+-(_9hlRSe8sE znsd395{p*JsP6Ve1sV!%HQ4=_MYwx8^=(+sV!p5a z(<|>1NEU$Q{2TZhA~ziDx)rUlg6qn4 zl9NbXn^+qiHr9>aEVn>Eu`#?P(uOZk7+mI6BijH9aPU#0$PiLJg=L&=%**+m|9Jr@ z8n+OuNoV)OeT^@>U^D%yHm%tx#<*%mm&~_PHmV(8+SQBBSpuIHR19qiHofH5MS^`6 zHq!Ym7s34_j7u^K!#|qp+d#d3r_t$S=kw$6kX>zUnjM}#T-Z4|r0{;VS!p)WM$^079)!dW#cs9@9#C_)z z`GIbsb~Vaf!{Hn{b}XDC@{w30?hIbMrekZ|4r=9b4kX5ED0A6y&NUp7nK(^`Qjvxt zDdt}C_TwoEL@WZV$_mtT;n&9W?m7=K8COb=bzy2hj&yfU9oeLKAx58wwiL>oP~kZR zB+aIsJ=BtXpdkgkwmEN1-FRU?ADLx>{SuVjA|jbUVcP- zJA*pU(ptc|)1PTpM*M5ag5RfPJ~-v<-89qZVkDZHk!!#RseDpVO@NM^7|-N5$>C4i zZm*YyksRewj*!nW3W5J+=!TUndi z*85DVH8>#hFvo06VO%-JqmB(R%OBwncKZ;Q?mfBk1ljqABSt3de<3;+T}tew2?xo= z>Sw92zc+a~%B5SEM+)xXIB;9*y908TECg8+`0j(Zn3L1+74%F)FaK5}vh{@fb_-7Y4l}OTV_Z|=ybEN}5Mua{^B|4%@X3qcYHOv0 zzfVWC38Dw-=ZuM^K+ov=i~G?1768+@!RTJ*HC7`5jp8f3vYuE_k`4T^C&a&A&)Zv# z50DIb5AE?zSZ?B=3Olf$z|iKdFP}G8#~PN}HgB7P_cO;avU3o;<8_cjeIMyDGD{kk zupfy&9uJso*Xq>$ zlEmQHQA>R7BaX@PF(3~ubys3AZf2Zu<+Czc|0_ZLVufCcOl0XM`zepDL?OY7C+-SF z_(>2kP*d`^?bV^)RWmJqRD3x zB|31((2Yn#F@_qdTA7}wq^96 zsNbr1DAr7;TnC4bQ2PzCb&WOg0|)F*^xf;NyYp-tFgQvQVR1g7Q8s;Sd8Mq&0`Vql~G$%CUoH_yT6 zP)8fcZ>K$XfH~wfL{aZ#+=+_IC!k%8dCW_;8%HkyBTaEx&2pV}tUJg-(&3ZgCMaA} z;nDjc5V)Ud$qe%`92O(n+QUkmm=+X8R`)kSZx>FgC4W>QKHiLPFKzAF6PUl@haX8K5R375xNgA3q~jfKs-384fV0X9Mq^# zie~y| zH9p#Fh!aYiN4l~ShV$B_jvLAHbTLpk*A5(dQu@1M5_1v9{-wd1bTWHmf}EwEt1SIz z_o;i}XC9nhY%=nXSqQNcqb3*rZQd^C{?_99HtG1HTiZbFUA#HYR7-#33;t8_z%{vQ zuD?%&-00Pe8B7J@10R|fJ={*wIBhrAXZRN}wqy}qHY@ju6e=E8he3N{avqjD1TSSTg5yB7(qTKbNe6SW=q+1ljW^x{@Bcgn}Y& z;ra?HW23ML$>qHSO(D*xIDj-j%J3Oz;|cO=*T+U=kk#}5$)02&%~GqPHef_76RXZ~ z9aj>LlvFZ$^pH<|^%@H7RVm6@CPtn~Mf0HA+`6Glo+q(9*(x%4W5(8IDUzlGFA8k@ zVKh}D^>kLe?|@TjkX?vn|jQhiyjUa6x-nh#ST0VTe_*R!~|<@dDB)*FGGA zzZh_->)E~ivaBZt)-J@540-+nB;dP&DIqZUAprqY#x-}-;=dnG^sxc>1t+(AfP8`^ z(&^iiR8V?tLH`2qkb8ng+!i2o^#L0VkM$6y>soJ*XDkZV`m_b9Lo!v?__>`XTC{B! z7oV<$zL}2yN`ZRPcS(W(t+&;G*rjCEHX_;1!vPWDnkUe=( zZL8jia!jDtWtXeUqokZ6t#=>u5dxI*zbHM-l~I=pekHMJ0FCIS?|pRcW8NkBZ|M$tu|N7z6?mfHC#aS7NPpa$)A^zfR~+XD7xwQp|0fKmwA zjk4PInnAb~C@d(T*RB@u90b{*OCcNkydl;ULfmx`)eOIEc_+x z?VCiG+71jL5KX>r0K(g2hkV~RpphNBt+72oQBe31QLQ{0GWA?Ni<|avLT;bb133oE zQ1XfmY8V9kD17yKQ!!^N5ZDd@K3P*;*{R_#&d0IgIgN+@tErOA51sT5v1aNrNTx5AGFAXmSI6wd-LF8NH!%}T{Q7-0 zv9g1s;3@uT$}{#9w9wYrZLhPaiT0}X4xJ!8g!6>1r=S=v*z}&> zq1eHI#G%pk9?a|k?-Md8=QJ(yZUfdVOf#RK|DBJN6xpO@i>G3jJ z+|L^t7)RoGYjs}}a%^>=rXp#3)r3lQYtiSDVpj z@!vHtA!bN~Pt1@Yhjkm|Z;2DnGa-?C&` zm#&*B`MMFcY?Q}wM(aP@6-MHQr`wxh|4N7=4rt6NsNoIS3y7eZZBuiRFF!NXRv5J# zSv@Ff)NMgORq(R1!y)b1jjB#S!Yb=WM6te<8#03PykJMAcc2(LCU_kc%c#Ub@Y~Ca z6p^7(J`*p(^A1x?L6I)Ed}ZtQBX1Mat~1fo#!wpr8UbP;xquwS%=m<*s3)jn1z9uo z(YKTG#&%B>Z8E5KB3ZW2#h`r;bR$$<-hncfpUoIzro9Am0~9YyeEs6bX?YKHe+OXl zw^tof!1o7fZcyN1Ehb_)76fP3LCvQKih6+BO>MsV2?!H|gy?j}H7w-w+Qw9hW=rUg z(Zdlij7hr5kb4(Cmd4E1ivRb<`Ir?Vy zQbJ14<|M}XXw@ijm7=o2!3GciXFQD=nOq4Q!Xpu(?7Q;UK^$Q)N>GKMHzV}^UKLMk zMl9FPi#RP5rhBzHvNa*>T(wy)O8TIgrjLJVK(uKuYq?1o#n@XoRt9N>WvOZ#z?f@Y zWkWqw%OtR|IN1HSXD)sibiNKrXpreGiTJ9F%fp_wB)=B>{PK9}$O%)R0`Gcq^6u-S(|uAOL1qIR@0=a34NLB(0Na8nXAi+=}az<+X7IG(_^+RIjHaTv*~Y}3`4w&q<0?^GsVsmOm!AG&#sL=#@r*osM#A^L7VqHR3O&a_g$mF+ZL|Lz;q&kD1N-R=GSrsiE$r1r#s6Tf-UK1FQxdD$zvErC;7Lvc$FYf&p#i>^a5*5$DM!i+|iW+hw9jyEV7j6*2dTmPyS4ib(6X6uya!r`&GuLdM5=}muMp@4Gg{V|>>KcfasyYve@J3* zbLW$s2C<4f?Xre{aFod%cQyR-q+y?-P0@JlNEQ~%9ELs`!tE@UzM5&9eiw}c?eL#S z<|`b*Q`M}<49wLbFPMA7Y#umYJ%F*WDAd=ZN1{)`IJjhGE>O5Rkw`EjdGOTvUZAx8 z&MTdhw_Qw0OZb?L#x;(r&*vb9^Meo1q#PDK%vp$zWA^KhIM~;6+1WW|o9%oIamjh(JDhI|;cYenq`7?QTG=L($}Wv% zTNZDE3Ro>iAc7Hen*I!>Pr(cWNy#lp^@UU<2*6ujjHtZ#@2TB_@^6qvDdN3r38dfN zD+&O~grLX(_OumivNEO!F$ZFYt=oZW!7B;%Q@I_cDWN)Ml5b`RB(qNE-~Hp~Y&6x@ zjsR49h=!6S;6ef#L(fjUb3x4L?TdfE6Kdradw*vop7K)&7~83|q>vWzP$@*CW5)2O z81xmP8wjD$hx4P*+Y>(Io=PVbXr1GK5Vg09z;I@_&bnPEKeDK!*E2(;F1l%H1rfL3}+1FS(K4ZKYpbtdI4hXZqHj)y%Tn6?bp*RTqjXQ zyKXQJHW?Ziicz1Ho%(T8xUICq6)v}bZQnj)y+figUh}kP)3ENMS{yp~p@bQo5Q!t|;4*?}S zXU09B0#AP)q6?T;L`2*nr1@oUHt2_s$U{iUnX2yCn z5oc~l&jKhRTD|1WIx3Ph7#@NEF0(w30W8P0UI~ENiD9F}w*i@LXubo;ej}OBm=)}R zdKJPSxW%o8Tyi?H0jcEjGqgA1xo|GI%n8^-{#qce*z1;(jb$t80R0YgfkuU-#tqhWn=r8%< zd70U}lhG-qmdQ6rB12Ifw(=5Z!i+NcX8tRgZZlE11`o&H%WD;Ls&l{k7)vNG`8;eS zFE~xxV1|)o4^zozedURZlZhutm2CjIc?kH>^Dbs`1l>#)4;1FnFGnf8al?RgS4}On z^ydaL#Dez-0v>{IJc4IhUmh&rfI$gh?Ss6(@tD;3evj2k4d0Agqw^ z*$|;8V1U?wFpq}&U$B;j!5-JxD6D%greZ|vx6T_VxF6qx`$klU0ZthH26d&x&Ijv+ zysFh|s0oooirRbc`lPiEySQMRh2LcXUBS+cfW_556-`4_sV)xIL+{FIG$^9M`^?6D zXxgl7qP3pQyTqEzj11-XIZwrXuB(Wi^)`==swO-(F9D@=KX={t#b@+xLdxz#VFP@T zmya!um`oTNCplmX9{dQo1%l5}#lC4#Ow_=@<3CM7JPKmwJT@czY+4ic04GDc*C#-P zUHa3+)R2gnc&D3z30y-V4fTNM_3PIZlp$>(Tm{M5^WW=0tqrAmE$$FP;1s0~oIgK5 zS7a?@V`1rA`*$Vuck|JQvD=dws3#82)gj>ZgHI%$C$jJ>Ftz}daC7bcpMQ_xHiamZ z#h#J67k43B;kaSRD$wHy^<0Iu>b+mbvWzHF=0M2l6GYK3PE>SxjGqzKTg>q@Eu}jU zu!h}@Bw-TqD`^`C6J@l>&@kA;(H^0AWl*Y=$}{MV3zza@F&?R7{NBc@A3ur+_o1Y> z4b{aO>g0$f%uq1)B69uB1j`P^`o(fqG}OSLCsAtG`eqvc-$OOY-wA(F2qz*ie|0c& zMrs)o%9Pad77wyO!{)%TwM|q~VxyS$daUfNxMejp21-O;k{IFmn4Tvf`Uu9_Zh^-q ze__iU@J_&Z8Q+&{LfPSm;NIOXbDZcyPe>4Pwl%ufeAdI&2sG;_mY^;)_Wd|*4*^te zfImIe-kdy=rDVP>t=3SXS02=n+`(6_5dF=mD?rExN|-=M4=|qQMIm_JjK$nZLWtDY z6{#&3@xCf)wsyh0`UjIKZ%4<@ZIk8J z;}}cAE`7R~EdMT&C_HsYNyT7zT!Gvc<`PJoY`(g4-0hqBk5I(ATs`@mXCg(MreO=4 z`I_mAoszNimitfbC)Vj+_eak{UVq^!vB$x(ZXv<4t@UT^S*D{)qlrpliKH0;SJgy7 zwpwxt(+u2Cb9 zjzN<`vEwdOi4X z$tGKBSwx9bN(fG@YoDe)na%)z22l6-GdBrTA{?%nYv&K0RjJVk} zdWQ*XL1YyjmWt!jDC2Y^C%^s@h3mg1L<|OdsR^6=1ZbL3Ut_am`Ut2Xlt)j?*6JA$ zWp_T8v*v@j6F~;)%5MXA$@qa&um3+BaS@n8#Xjn!j9Lz*vDw+3As{utu6|;04J&yHkI&cv)&+C8Bu zLeyYa&bxCfm(u1&c0>`OX?z4q+Cx@*im}DNLi$0Jt}d{c18!&M;@K^6WO3Bx8u7VE za=)4ySsJ5QOKorZ-1XEaEjhUI!q0DsLG#4XRo!EH;fg;TSA86mpUINd75GgV0K?)g~-hIB;nTw;MZ>Y)>*CJT1c$fzltk zr!xFL`Nd3>fG3XjDT@x%8oG!W)8c^4G_bWhA*Y!^PDTivUupr*1y;M~FYD}m!FU?b zcC36fo(;lBkSK~lSV854NPenhnhy{DxVuv|U%Pj&nqPzdy zQ3Kp9K#v?8z6}P>vUB&p5WyX!p}Zl$@lW-qp+&$m4hSn^p!iJ!e}@fL$3@FCutS4b zmEq@(RcN{xDASmV8}fbTRN4Vc;&8rIV|7?CB02E*g0u2F$?qfHaDwKT$SLpe# zC*Te;030l5xodyRv`0cgz;^@K)Rb=@hFV$CLSt1-pZBzAk4S{vctGW|8%$42D;xs! zz4$Z#j{NSeRe}{jY_d}1CW#)fKuA{TD8KmQ-vzdZi@lkr`=y_{{GUU2bh7(@d!qar zgC#0y5G4tszqEt@fBs+S6-DTu%YJBbk|nSgK^R9~l&?^f6WmANUfw~4GPv5-E(rsC z4g#9p&}3{p&o2(bhwb1RfdBD>A(XKPb0{JJz0d|I=a6?|THMhD#NNO?3;GT;Weze& z=dA`~t;sbQRcm%D$j|yxH5lQ3ziY<-pBErS84WlqzI6FN0m@Gdvwim~iQB5#ZhhwT zH~zdlQfZIU2os1oPVA~;-sXj_H`LG&m+%zKguiV6!-ThXw4Bishcx)?`OU-1ht0CD zHA82s6@ncjP;AVKaB{ZenZi9YGtmeglVkVr44tRO`@Xy%XZWJZwLW2MVNW{81DE8) z`qy&UofZ04+#-7EJuz*7M|N@1(D)eL56|#XI{!L!T z>bEI$ZDADED=7UiBmCpu_*km;ZUw2x;d=PA|8BOyMhOak4-XH6`8{BOk*k+Zpn!lt z0QkVpmOlI=21jY7_btSaf#8r&!_L#XrP@H!Q2b(&FH6LmN$~miJfAKwRt@Uy1OV>E zEI1>&JD8x1vJbRnUS3@-4=xKlL6?ruKaFQk z@}@;Vkq#Pd1s?bzkY8(XUSNZ!XNlvzdmkBz0>NJ2=lhXEr4tA?^SQ8}cGhlz&1VEy z+M$rIJRJdmtUP*T?n3~kg&kCrVFAV$37FM{0d+M4#1Y_2Xei0pZ)g8r1<(*ZSg2}n zUFL<3Nl-rzf@y-l@Mw66?EF8wy*@R=I%)nD zd`{6{GM>g_jwY}YtShvHz?^ZB?NJ;iv1Lr|4RMZp)JzI*-n@@PNd%9x9w^h8Y&QnP z6pRt!Y_&K^|CnXlL)oV+XjdV!9@%oWBWe#`$RB5rp^K&VQeG4^3w!a&FeB?+omf@u zQZP=oX);!}>DQNMDUmG6j!Z1#Oc6ua*N4pWO9jp^M7~lBk=iNwz9gBf3A^7=CK`gS zSJ1pY=n}iIus}lxdf*Uf5hBz8(BfuQ3GHKKD5!V}%8P>5R+0oB(1!;Dj0#@d|NE~X zu5<Kj8astQq(T9zd-Evmm#C>;%mOf`%AC z$Mh4-?D(?Bz&-TeuL7=c-^@i|m|qlK7C(R{bP>473j?`Yr_OD#)VvXrRW*E-9kJ=Q_svD`D zD7-MEaX(YCJbKTvNfbSTf?0rs{AQVT-&3|k^_4*ZvN|8OT9#?+XIj3v!<+9KT@2gg zM})QmDygF)T@+L_CrqmKs;IUM{PCuWu95Ri(nI004pQ!dT;Eicmf6H2+ct81bhd4B zhk{WOtk!zVe{**s#E?VBKzy*3JIhr9I;4|vvV|fL3#A# z8!TM7;l?A*B-ar}Dbp*sD)v&fpOKoxNUGsX%2caGXE}I(OT8$zl+6YfCXbC_c1q(K z1C!(aw?b$FO)XzB)*>04Lo*P(v?yWGDMLT`W6Z^hElv88>szKy8`a@sgS7qboqHG} zvo`A`6if0p2d3K5*u-pl5Ko_FXqc#)Z$ZEQ>+r(sh#ZI*5}rhT)pD{Cd-o&7yIzV=!e~Mpx{HTtY%Ojnfx6M1!I( zCnLie-_j$!qvuqCx^EW6aKT^eKzc!7Zq5i=J>!V|Y@aT)yp#pRq7$ynBPlAev z&uF@;W9g@hV-G!l1bM0GzOn47`R^q`A5;k;I)$S#`#ohs^AMA%XIx$!qo`SUM^5-$ z*0Q=fQ>@U`z?jfibq2iHh?E!@d(=pzJz0|RqHX0dslW*@tra=Lb76aSF#_BSEXYvFj^}F>DABp_0c;@g_?NqS`L<~Kc;3-640HQ3|VWB z{j&NVHBd5mV5;!{9=O8TNFh~z8Fs!PT++ZI=`YzB-)#CT_JvNlny3;LUSRBglN2I$ zT94J?lqTm#E2ITDixf<_fMeu$cnZaVsK!^@U*uUng}Sd%+i(BSh&D^0MLnWWmHpoM z)b|wWL(82y#-3jsVwPBAyZedx6R*3fr6JLv`W$KU6?`A}1^U-k3xRL#9s34m9->5` zpke38;E)tazM)~zGbLMoDZF_`>Jv|K`LsCdC%iU8m7)CqJyS&5%7_e7l?72thD^9| zsc0Nr3V+>?5;J*SP0Rm#x1#=KgodP`u?IXXeLzy z9n&q>aATwEYazoWBdp@rI5scZ&T&G-ue7ee560x)e)h6jY%2E|vJ7ecHIH}6IxJq2 z9E1K+B`kqsCh6Niz;6B>CP|B&j0!uNtis4`_A1(JAmv5?rw?^oOC93}H0##!{Ly3g zKUGFA|L@8!lqHCkUzwo1v?U8!=EUOS(XKdIWW6T{O;k|AfG>C(368KMh3Ovfk|Y%i zQYzZ{a$2(}vJlC0!=z~nXknJ_IhL8l-?9lb+nqzy(ur!&ids6loUs^Wid_C7`ZbDZ zelgoxsdJ#`UYG=C>ZRy3prgo=FV3zIm5tWtCQ2F47)LV59rl%3N%GAgb!Q$2YciYAlc1q?}G zZT`3K#IqQkOR1aKKbr8FoTGf;C{n6Gi8!jzla}LJaXFqW`Sll9NewZ{Y8*foc@qBP ziRCRNH_TIh3+;z!brU8UDN0U^bTVxLI(GrH%)GZc{OFvjZ8PO}Qdb=E1xCYoBNq8! zPDV>W@ITri)+pc@4<9-D%BxswQ)>L~`HUD(m&{7_W#ihsuk;@7CsM0XWU6m$!p@g8 z&6Z>UB*(((lsGUa$l|yNH{rH*AZ*vbOn7Iu=d{YX9cHUC<8^X7=JMiAWj9-!(Es;o zDD)Dbd;5NueVQOcf(tizJ(Z3s!-P90XrR%42gi!9)O(^&c0AIu9TIaU{R2^-o-AHv zmEawF7}Y)KzH;Isv4%Wr-^V6iS{=V83D79S+ zYcwMu6<3Y6eP@$w^lV<(t(jH~BL5!7(P{qpB8oOE z6JA8^p~oj+F#N*ll1TaD$Id&;@d@lux>IKsrqQ_N;)pkMEoxM zq^($e&)mfxVP+aXQ)~rDu+T#l<%HA9-%@laduW=9S8)ZjisVH+7h!VU-`m`27*fe- z-zS$15bz8?{Qn;(20G?0y){lZHu4Id_iKF|dzO<&rpee}vS*RpI5~1_MmbikJ(0(< zUk}Q2RgWfkQ5{Ft!H?5E)g!&(e1MMHPC_J)Byn)IPwAJaD{I6M8JLX8NtQmnF5qRB zWo1t!j=Lz@iY<$S24i7^+H$fAr7?#*FAGc<%WG60Gt<}!IbPA=Vi0h@fJX?+2)*z+ zo>1g&m*f^B!1UBw360R8lcg3<)HxN}u+aU#2QT>5<@??F?Gc&;9iIf#8t4Q2jdzKv za+C7EV=!fg)L&w6b;SM{(>Iw>3F7)UAFEznJI;QYdeJP$87VV?#8^g0;DL#nO@`&= zz{vj!y<_r#VOw>QL!ePPa_8SsCoOhXcAcdkD=d3E5z8{aBM4FRHN?FVBSoa{jTAbw z7j>?kZ0pMyb7v-dR-jKq1Nv5L9sf=-j7`>VB|2+=`=wp^daV6m;sowAJ z!x=w&r(vm%YpWH>V+>UsdvD7{C^CwBfDiVmiOC3>vg6XANd2~R2L9F{d?T`SZ zRCY0@zS_!OINIBXbNylBHm>~4Ytgi=6-YU2gEX9~&Y+~>hG*nPTIfOKNrGyFTuhme z#fk31ADuQRSKYy5jC#Q))Rcxdc`RX-!xG948^GxMDo?q?iHdoG5HUu3IIZZr`i$-g zp=#6ro(DEWK#`?s5lv0sgNTDoYW+jczB)FQ7n+I!<=eVm!U1%gjvIj^pRvTVqz zzT@U8lZeA}omK;uL`eyK%=-(SGCE}1-BFUcZ#P?IKep<-pxX;R$z_NbwfuY?oRHxi zHE4xKr%g2ELlm{i6w)@2aaH*tF;T&Crd@`G=92L>LxjA5-nB?devij>#~vL&iDK?F zIbD^{YkJ0HB2}+7m2uRI|2sCls7wqH*1xH<3esamG)750J^E$5ko~$| z^hcc7|B90no~ux7(39pIbtE`5ev2iDVCtZ@aC|N}Gw!5Hv#aCSSXqO)-e81}{n2jTN?&=KlRB6;=1$pQ7C%1Cz|EzJrItngo-UEmAGc+*86T9=_2<+_GQU{<_xllPhfkqXT0R$KHeFz_RYAmmN7sqzOA+_2 z+%mV+;qX3Ffv>X!=J;L+YMd6ZDo^@+F;h5BO1@%xwKA4kZ_I%8mTPh{T3KQ&YSq=s z4`HgRWI(1%_0FZ;DtAX3Ee)4+pQzC|x7wYNRk@=Bk1%J|>WzDfw}M$5!*AzZ%)oaZ zvbd(#)TesFsyo?=$Mz&%&q#)e{`ZN;vyk4zjbo*x6XtH6h|uEXp~HNIWcGDgT&r)xrkU(MOW8~Yz*Y;(2uE2_eQW!e>S8x zbePljmdM$jjpG(x-Xgxg3R*|TY1!)&k$(z3jGieNcnT!(d(=&%pBmf=v2mLRpQoxg z+Tjf0Vox6CbfkU^Qeo=i6kX47NBeo!sXrlOr_4}r6Sp6uYuy8_7Gp&3bWCI}0ayJv zf^N5XR(G^~o6JVQ{sDzpu-mh@7j9mPIyEo83voXTiVM%q>~V>=#DB9`0Do5h9>7EK z0-CkXd4>RjO}IpMoOFo1)5auY}JdK8(9< zn`n;8t9NVat1C7ct4eCai3qpQ@cmyF>n-GqI^8>8l_cm~+ICvJ{VDX_!`Gs@o!JZt z)a)F@qVWzk#VH%JF0EDC`DH0n$JdE-Q!(FOBW}U3>F&_H?L+w5{etMjXzLMu(u-o7 z%mJ$WjuDqkM|hr_-iUx7;*%(yoSjb)Rx`{lGP6mMN6pK4M+%pqX+nMVtMQdxWMW$h zBa?KkehF8tklhD?oW7bZ9hGZ8+)O#kzvCuex6t3H(IBRNZomI4P2DNW$k#n-lsa=( zdTmw#JD$;msdHP~txF5*g2~e8bq-S}x9EDQO6dNvFe;tW;3V2XZ!YCbjvp^>Z+U#^ z!PAjw%8xkAX7QqtPfhbu6MZW-CST(3PHMuecjcJ(Z-YmU#1?faPD#M4@Nis#Dd)~% z7~ek_2xpXt#s->aC$t)>AMfgb-WdyA<~=~K@c`Ez(zSq~bI3>s6`2w=RUuON<|*`c z;7%~_g7a09-q@Bn-CvzUk%~>%@+NxS6!KiclTdWTS~nI7uQem zvs?LC=wa8pmgsbTKfO6yD}8@<<{ha@*H}J5Tgf5roTV|9kZn3M6YC9~KAv~hF$?PN zJl@ANSH`ir_ZHl{p0w>)YyEQW+GBz@`3hpT_Ajy?ytF%SOn#YOgFBfuBPwe}Ou1C} zQtVsD<~v5;{ULn2m}IOUVSPhx4?L7G21RZQkabyONDg+!s)tD^-b@tnM1nezkgE$w zJVRpZ%r60G{sgD09(sgeXzT`@x(7%?-zvtDJB4~Z9?_9BfHO3fQFPDTpJ~u?zEF&S zp3ZA(`C)``y(+VTwQTR?D2Jg;y8N}5v|Fqh!=+|}+y|)Q)-nwSXc%WS#ku!Tdd`(n zuH(Am(%;o3d3%5+H@1wiCi#6PNM9X%=ui-$fJi4CkRF4g)IT18wel#-F@a^~2}snB<{gVm2aq?7OXjCZ z>Y`*#tg6|qXkK$GxcPAH;CZHqOv=F%nrN3ZHtZs2YB_1t0sJrB;b#stA$M>lUJg5)StIbTiH^)=`nWGnz zSid*eTC+c;0H;XNG4+lcr4z6Zw867 zh153W8#!2Lyg^1rX4oDm2`S`>z7ns#d;L2}3R*=129QW`BqRywP+B+T|64notlD%7 zzbQg=%fhyc-fIp%tZAf<-n;NNjSQ}mHaDINV# z7j*#Mnm@nC>47))m6Cx~&y$a>18IIG9_o#k7zZ3on&-G}2IeT%GBRIeP-4B!#Lk=u zTx_YVJ|rec6H1v@4bkFIJ>_auVQX7 z#2x!ziQ6xokD@K4Z0PnR*I)5;Yr31%pR^#B^mB#LfdKQ|VnTm!x@J%8Y*2?OGkfy> zrsSiyE&AW|?L!revz|XHF;|s6RXo-a`_6A!6?5c`8H4dJE&lV<9}x199EBuyo6rvmb#jhipG?pIk7J3^UCnVn;K$wZAo*iDK52 zulehxdWPyDl6KR`;(E!v9v&B=0-Yg>23`qy#HZ&>ra;4d#qD!R}~ZV5qC3 z6AD?5Rv@Ch7;z5_(|f1C2FWmt4}-3}@5@(1XL}7XPk=1sehVlXVD_SQQ~AHuVZ&AD zLLKVY;r31EPL1>53aTfROF4zCb^?NYbLDe?2PAH(9%c?dM!gvmXGXPUp{Ut=aS%(a z&7m@UuSqq)CE4YzVxz96q`ps~ot1%mDCY6nOVng#wC-zmk|zR8IJYyN_78v0_TY?q zv9ux^Q>UyjN{heamP+@H=f~}uoYjhbrRT9s$+;>--^ACS*Wrln`lapPP99H77JQq> zr+b(ngBMPSO-4zHsptQA76JJq;lj)Gcu3jWq2b!kFsO{%p^NWo0W&4ql(Sy}%}7>!_EV_Em*=c*d816Sr6(Cr4vw zaZhcPpT8mVuwIOi$E!mSClJT9rA^)c`kU)9(FZ%~vlA6xosKkWa?75vGON5|B18)? zVP#azZJB)dpa#|R#;Et5OTLwDzQpWSKIp_@lahw8*Oq_c06rul@nvd}a-A0(oADxo zY^P2b!y&N@a9>nALY!rL4F5H$WB)MV@+%T1G?=P)ohl{oO}q#mrwmIIPTo?l*nkN0 z?xl_|(;}XxF|OC><+Zk{UMmt)EY9Z4k?E{A3|rzv#*)NHUg!Mu>&Ne1|AUxZGg=I$ z!F@P2zMnu{^T@$-*g z0YR$%tvQyE<;<{0K?XJFGNe-hZ?VY*<#~0l_X?ijdr|Zt1Q9@*sa7R`&Kwc;g(cYn zbH(tz6(%6xxsfRdhE6iu!MtM~3;!nOR*e9DfUZxzmXjm->fjl|m>k z_TKe;j1+uKW^|0=8w_(!)fFC0A9zhVrqS}Z?=kg9*I-O(8%BYK=+bG+bycNqcy`siJ4Ppb$M*+X z6sd|4ev}_!AzP#BBlYk8o!LIGeS^k6grUf>v2iO_=P85DmUEF=31+pvFk-*uDglQ| z2jr86D$)8Wm{I>X7fDI$SKTFAOlJ%=%Fxoo2ksXGc|f^Moq{B}Q+RxXa(u@j4rkoU#>ic4E{dSZXX@s&Nv z;%atxeISF9+oaolx*yqm#kL(nZcl`6r12|)C>vYbQ}a3Q8ACRF|FmGX#lU`!$oe;H$ugyyt%i3Qr|a!A@=d@6A?`bdL#dfQN8n{>f;=%ia7wXP~>20grYPkDV>%xlyudE#)2W zAOEifFha9boy(L_=M+Q_e@I#-oB~lh!`AW{QYt9HdyQ4Q!ALK@*u}1B<+&zCfpffYfNz0;b8#zf z`+2|o!E5!00}a@b(E<_gO%f(kYX#Fft68%OPwlyX?aQGH_#H4#e>2n|Z(u4L>-s}V z+WBRnLw(nf<%!i?mi#y6Z|osT14Pu!cTT*@S?3Z$n9eUP$7RemJFse zaDHML((w{+&PM@i-qA6UxWaCNQ@;JERf~JOLP-{9e6!EK)jE?w0DwfB2StG-*JWJHi^^$e%(v@)XK#($!mzU9_%tBfEBtuG@wA9nNu}Iuo-Fo(7fZ$J4IdP$DqO8$^8yg3ER1xN_ ziy4ui!NmN}Y(RSa{w*#sI&WVrwDZ6;XMTSE)88a;z{#^c_rqC&IN$fcyekJt35eHw z;g+lgrUax!w(d(r=JQD2D9rKmA-7AzGZEtK{}W7@G+bW^yyODbRW;Pl(7;1F9mtIu zAu&USE@G(!C6_btPCbOKBhd$l(eOPouwVLk6N`Xa2J}jZ;0J;^^591!G%^J4w7$O1 z;+JIn-fn~n5q<=mAIJjn5~PqhLQl?YIz{{7#71uQ2w4Fz*`0uV(gwykEG0PUHV}P2 zOvCvsI#A$BZ$ht6rjg5*TrRg+M;rYN$L5nVe?`!XuR$H%k+wT2t@#?MlC^Ii+o0VW z@;2%U&ZtR|x$bh)K4w^iGwcN=I*To7e}*bE|4^xmwa5htdzpY>W;r`u)5HL0oUIqG>Bqpods9%r z#M3JbQe*X(Vq~C|hl=d}y+Cw08hgR`%^d98)C-rBXz_1N=;A&H9Y{47MeE=HvkkZt z23!U$D?jXUKpeZ)8v`m#DBK`J~W*3j7}t3nxdJ4SCf@0neXE5OFQg#C6$mb=2Mas&OoS zQ#2l&s5jzn@*sCo#Cn27>r2PewhAc@iHPFVq|vPhU<|yrgQRc|x z1sm~XAar{~cyj{v^Nz&EIjk32dK1~M%bEcy68-sv z_o98D^Sq>)=zA_D6mimYb8L4z_XvIrL@)9kWzuWVD>uE0?{+~+o46LwJlLG7+*H8N zRraHe!D_p8!<1B2kvR2&-cVw2>9^I2mDEm>qs)Z`hIwl>?Yo@=*;;Zl58X^DhpAl! z(o9-RE(*DO+dfgUPocvy@dI9j6r;?mLqu&?IQ37NDHLL}i=O2|3IbD$aM0gBpT3FR zV<19k)DzC7C;y({ zLC-4!Nv1~t#UPk^niA`RsA4@pl#aamHmRBWPa%REfrNu2Bw3i<;0?o4zY|}*b@_oY zdIQR9vz{&+(w)y22a_pjcy;20x-O;}o+$cXYB4#Qe{6A{eTvOXPcyAkh;c=WSGbF1 z*WZzS1SY_eGAWF@vUk_m6@@#^tTw-y@V2!MyP!R(!FiF2q5A!mp9T}FjUAED8uPY| zaf`oBw-ux3nHdw;t?y2&kwYXaJJA;AT2=@B4EZu0+@BjFTrY{TqWebObrovqhJ3l z_WcG*y4%@lZ|0kQtZ3c2lR)`!Whm5}2 zW-nHsZ;LSq7#4()(FzQ&ye`4TWCe=!i+%Hdu|Sa)>KAogY$8I8Iv`6-bNljt%&c=@ z8&uN3rWgez01Lzl1>Y~M9p|zAM!z3+Kt>fsbhRF^fA0K|(!kt{!EXOG;(pxEm~_D+ zkC+X>)Uz)+id@?fNNTE`A%!(4%jn?fw;%HY@)shWMSeD@x)D+^{Bpbb9~gR$!NFny z#yQGoRi#>=c@;Xlo7F?T+yeOd_46ZGbU#XG@!^H3GIgXHdB@LXRn+sS5vsfz8$HLl zGZxC9I)`p?BZ@f3Rq+vxomBOEZayp~yrM^I^A0qmv3?f}(c4ZBOY~-yo}`6euf<_+ zx;~ST_Bi|{XP6MnU14X!$_UgV9H!^#of*T-vm`sa@6>6nYiT|h;j%V-co?O$QM2!T zavYD!>G1sZZoOF9PbICBXJmnYexg#(lFQSLQ;(R?CZ+1X)S53N$*y;Z)6&MNlR1uN z!9hW;%E}6;y!t{o0n^VT5MZzWAKa=To4XyfxQHDG0ZV{wVqNA(AnY6)aQb`#^=+*k zQ?+D0EJwr^j=)OZ9^C;oGGv#GgP#09?v7I8V(E&BLinA^CNu=fDsL{m8jOx`U92D@ z3rfum{hw!00$G7T9nmH0R~Q$zwm~6?U}=#R&g(Q;Qt~dLj0_F49VhMjbBn=B3mQsF zvz$MQ3I$by7Yn|xm2geJ?&JMEc&}f<|C9R(GEv>U>RA(j_RxuLYX@}a=Z%7Baa5u^7Woz=T$Jn5j!|9`Tya(T` z0%E~{*n)mn^PnrCR}eajfb*;kf}QqYk))*Bu&|KEfmr zKj5VX`L=X+fFv}2OaaIkVzdZFgEh2G7MSbGVI@FOiENad=55pvKf-J`_KHSK;3Ej2 zp-{vECH;u!OUSQd)&$g0f}82ngqazlO}$5sL?C*K?-4au7p3*rUdGu8<=Qj?G3t z<_U9eyU${V9HsSoiU$27=BbE+rHzq&za)BEX$kg!D` zeqORl$M6S7rSM>!FxpLD>M|N*jXc-gaa8Q5Ek(3lv7I$mgw+LY3ck8`{zay)-VGBp zok?yubsk7oQKF}dd^*G`rJm7IqtxCz3mUY@`kh2{^?GEQJV~0^_}*y4Z*p z7#(j|0xK*R8Ywx?!+5CZ3>JjJ2l9A-+BNFuuRiI&ME51nNXy>6+iE~+MIaO#R$HLL zwXA*VFRe6Kfe_|{5H8ZqIcaRa!iU)AbStDf6^n?L zFJM*1fd!JKb#!mf2^qmrDSuu1co&vd>lgB3zzy}mWgr?LhZ~T4Iu3~VlRm_*YXRYP zM(stmLm8Iv@uMb#7#s_k)rg#q%Rh1YhpcSiuq7eu4|*PW5_gfp6mAhqoMVpO-a=5r zkmC|3imYQJZyb?Hr=es@U+vz1Lx)8EfLZLfrYQT?&JMEpL4hs^RcPT=e1Q&ouK!*s zqy*iq2CAb%P;$PLqD;tss^Y9#nY=;1;RGS(PD9$Xp#M@aCJ)uDGr`>T_tM2ZrS~?E zNeI6uQ5Ca1O3vdajW2Z)Wz^tqyw*96^3(Qb!bL24!!np2}7 zonB_#pH$lZJ8pOjiGB^up)qrcvybC_EY{xYS;}JLHIRjoCCNr-1uQMsFZfXFbFQIA zQ*aLX{3}togH5@QL~B75d1*viD_qPjpbX4jVqG!SIJW=gQ}p5SZs>oJmq6mEKZL_x z9`1JtVhR{!4Mz4FIki9<4}Y}L>qut_;p@K8`G*TxO8o}4_oOy5{XvomgB`r*YHTTl zT__NP7t6>#!u5C$H}YRs)s~--mNS?FY;6Ax*bpP9CGxpLT&Z)_)p=uk54`y(I3&g2 zCHz3(7iDD}u9xs($3gNZ6*R2}1|NO@@dJq<25UW%a7uu3i0n7X@&t|(JI|v%V2FuX zq6MQoq>e#CvF^@|eMQbK*k`aBz;M~#i+G%cB)=d(71%Ep<9V{#O>IR**8pPJ3*M8| z^rtZ;`Dit^Nw~Fh1{$Tu1oQ&r(S^4Hs2|JPjK|N8{`qUlg%vEL?CevZ>LN4T-2d;P z6aIu08=Y+?hy4M%Sv_$JuH`H$-$z}BUurw8H*|u{V#l*qWlXDg32l7NBZKFjh;^Np zvt$M4B?#`J5H=ovOXZO1wwMzQuHGtDuDMxi`aVVt`~BgEQo93pR7DC z$IwWq==veAWj(&jBxGi9k4bl!iw<-c^3Ug?I*&X^k7u zu}gwAfT6E~>{fqv?+3*q$H4F4(2S@amM5RK-dT{Rce3b*V;idAUc+z}Yi+?WA0K{< z6MFrXSi=LnA=a~cH9D@z1g~=g_vkPMhS^{&pW#*`#S5J5jZ#cmmwl(u*|whrIOgT5 zAL|&0raR(MH#U|0o@QgCgdc`xQzZ-(CpeA|)>!s=5~?u~sWDmhd0Fj`NS|mpJa7s%k5NE_Y#tH7{C^#vF$ath;#mSc%O{vD5Q`+4y{GQ$ zgwt5?Rxf#8oQrNczdwV4 z>(PkNjKhK^60r4ByisM>N#Vbc13Ez|*y|rHdCv&VJ%YPcG}00|eyaIOLe$x(M_Vy{ zi(SW^OBN-RdODWhS%Y@+;JzB(cZ|WmH(riQ@4rv_qZjdb-o(h`@q4~U+k`3c%ygbP z7XPq}E15*PYr2$Mi{1qD=Dj5;?7uZImE(1?okLD|$KsQv6N$sEWYuWlkSt-#g+MRM z0In3Nt32G>m zt};*?feSma!4;M)qQ=PCy0FYBL9hw^DU7u~@hvS9NChT+IXA}Nwli{fr5j=hLZB&x zaPN>&*wUszsfHfJ1Wjds2aXkzTHu;tjYrQ5PE4)QGq~WPbE_C#Z8#Vqxk2cb0dtjL zgTgv6zePpm0mtpZX`3@>yFRX5aq||z{<@;M>)k-6d;hhRBnk_{M1%fs%>W9Q`Ppw| z)=ep3C&;dW?;)TE`FppUst_x1+5;kJDS7n%)k|q3Xw<~L!b*=aJ zHAB7ve)^WDn3g>rJMCrK_ghiKKVQfYq4dd>R5MnmuA=Oo3Zr345iSsZ-`3;MxM*iz zk?Y1I9OF@vFhapzVDu-dD;IRvi->0upEBNm$892R#y~`n+NCfaYP9tsDZQafHZtMe zs4XJk5IM7XTdb{3z zwm-80aF_qEnRSr4TAb^Ni$n?`0X&Lw4L$`;7M{@N?1qNfWVwDO7`Bk~4780u;E_Yj zmd-UBP`~evY}*L|A4*2x5-?f6qOi8KxGQW>4SlzC=GCEb5aHT~z4cf*Om^wVUzbbM0^7be-EZcB~m zJN)O<6Q@zL-nb!E!q3-Zr7L=VpVSmDxRbxRPuj4R5$)PkuM-@S z?Sz$S`Vq}Nh(syBnz&gpJ(~Hu?gT%EK#|UqxgO>6UAuR52~NbD*LV~X;tb@Z-c>6r z{}ygnOeDBaU~6LBE+|C%jUngL3@NSJFQ%Zofnv6aTqB_|J+s*-tT+tN61XF)r_mfC zu`e*Civv`c_d5$4_uA=;3RiW&8t?Q0_GV~r_MJZmgVtH_xpu9>_`y&=LY7q0TiL)QfKIE zLulkw!xi+3CKo^XH0=9BxlQ&WI3}Z+hOgboRjF!I{F=ji-12>St#q|gd`W1^0-M7Cz6ro z7x>%luVrcq1U!d+%CzKBcO^P*&^%l12%kau5RLs;D zX5tUiONy#QstH9NbfYmoOXym4+UIn#^Txtd(2@)Z zXny+ZJIWU0tuuJ~)KK<2Oz(?>dINm{0DV0)%Pa7;kt&bS-gTpf(bQ!5j2`?@a2cyD zG(gDqe?$ZEUMM#rdD{QzgtkDyWAz+LMRPV?cnAL7#qk5_+epp`8YKmMXt?Fo_sFJv z?&sPRt5{aVx|Ar6pL{X&qI+{UI?ynaaQKY1`+S1F>qh~( z_^{?EBuKhJa&ES#&yvREMbm3qaB`DHhuz{Zl2pp{Mep7H5Yp6YFENI8S5Z&g1^VJ@ zG#`nvt^ELj!-5x5j~tZwZ-#~Ev}w?hlFhRzZ+pluq8KNyd$o0}+sNQ%rx6c~yD^;Q zI>t#KTy=fEefzcyTW3qMPiuj#UvfU1E!7M*lTA5eq6ivHVnh%bQ?u)Giyxr-{?6De zI;4#z4@8ufmVs6l=?@`Pmv!JN^dOJ2C+?=Zz*ZU9eh~%>LOKI#ASIZjDWsl?Ae3!t zuVFr}RF|Qg*SFkT-YdQ?h1viCsd>=;SM9Zu0Qnl&4I{t*MgANoVS3Q4E$Xb3b&_ z-4t6Wf7*vde5)<3kRyD z8rgz4aE1CnCEeEv__B2GeWi~2Bf(GafQQ`tWzA4r2YNe5hZ~_jfsPG9KxBzO;YRY; zgOi?F*TYyA0jD*=IS)+CM8K^f(*X@yVRC(Fo;Z^2enFbYkG^Q!b!_KB;IX3VcR zzQ?O}?F~>YzNx7phH-GkV(-hMhT^B=1N%rL3r4}ln{BX;9s*voxaf%=@RvW2k0*QY zl=H?KKhVqiF%1Xhq7`&#q6qL&c45ZIj)tpU&#A>r7s@J#f9g$n7kf-mN#hWDoz!(( zPZ|rpI8J{}izlaJi=vH+%b`^ESlvxy-tUpTF|;k`;NGx!@>ObiFuVt|HZ+GZh4_pA zWo>IUxA-eO)){Xu25FLz#!&_1>KCGEH2sC-y^Yeuagts$g|8T163A3Cm)JH} zO7mldp;I<%tW=EsWc7hEw4}7^NoT}KF!l|FLHrl`MAx-$_Q$8Ob&E%O)MI@s${STQ ziHI4-b+`obcBJVj{rTz>_IUlES>)RLYH$R-PnusoO;8j0R3)_iGRQ&Y}HsE_odtrq$?+?uviRH(&XQ;21oyn0qvPLOl!p(9~b>Wo#Kp7{x2N;jB0J zL^H<`jd9(RXzadRU-@{XcJYs;_22yrx>p9F3J+EKFaP}fa2dTt&X{3>Ep8O=>qc)i z@O1X1z?p}`1v9t5g}~w=o>Bp8jt*!l&Cab-@< z*b`AF1svHw$3OC5`9dJI=JJB23wJ@k!4=Ep_Fb3RV2GNwJrjXC9d1%X#v57nMlTGn zN749DuxMn5!!Z3ifHk_{cnJ>p){F&c=4oN=8bpe>U(uLvQ1uYi{ZCOj()O>gqsD?r z;#s@VS4ZmJz$@sf9NEk|V$DS`d^rIZk5y3=$f#Kh!E;1%Y^RYD>rkMGTBvK zWeS(tSdkVvYYh>05l1s)vc0?}>WC>f+v6~IgY?u!M=;Rta&M##kDg9h279dPc1EVL ztIGS(>Zd$GYPi`;u3aw=8?$EGZBa9F&{2*AJXdrmT-Dyx5q>-2KHN|$B%FRADsEnQDg%LP?-k#&EB|^)a zv-;*z4`0I+N(3~Y561(CjW#DHCRkJ~mS2g%%mHnoh7wob(2ycKds(+0$_*LxWvZ`! z8j_>hiZYg$mn7)?9tN?WTLo9T50=jrsm*qfEXgH+w?UPQ*-c8dQHrx-s5V zR!{}&c4#!}iK$5_#vJEw{0a$;iCS{OI0NA=^$ZvFff*_6>2Vx)A^FirtQ-%`icxU+0_R1asb39|@nhPWj=oe)HnDh4TG$a{g7GuSE(}(Zofe5@qp0EgBF>v=Su! zP9Ua;*$E+OKD{_L^K3z6Jg~2?L-a!@j2EG5i@#@Z4^H4P;AkH_PuQO3$eB!bnWT@R zCZ0Wq3eMj3ADkLoKCYOUn7viUG?&cQMPksGU*-y25qtbK*oME z8?rX%^Tf0?n$U25#|Bqgo+oaUTp2gWJT$tlz@D7OFB8I0+Z6S=Gi;NNDD4#2D*O6~ zj2;^E8)k_#VXR8Kvo{idMQPL?GMHc{%M6UgtU6g{c#NP;Oak5aFa2L%-uKv;WCt#s z6D0>0YnK{A2ldGq_3A+=I3_D@I3uCy5Es6@HN#b-hH@QQvoWDNunm#tsc%aRf54Au}8S$62q$?JR zucd6sIDC}2*JmKkWjC!@lzkGYeA!(LEqtcb+TE_#t&wOq+-fT?FD~w&|E*NOj^;(5 zQqQB8Z~QYeA!C#&QGQvDs`){-P21r)2c|>xTz0BU&9f&=OLEV%EU0Lej}7o_ZVwhx z@>Yh;{leZpVCw0TkRHzY^%Jvg{B$kW|5L<_XT0Hdu+|&&RYxM!!E|1K+cG6Z@x1dO zc5A=jkxBdOy3T)8@cQXnzK|^M5zca?x1cXy(QS9w?&<&GBXv3J9Wy5U^Y;0vd09LW)sDr}7t2pw?M% zox^Axq4c;GJ~pxP_=^V2fK=lW-?LjK1Y1anz6>Rc`&iMo z^5@5@ka<8y@ir#j%e3V`7NY16IjSukIYRC3Uz^wY^6RhAcOm>9J7TG4(iSFaF1DW} zlCNtsy4`=5?X@;iL$a&w+tosV60>Mb_c=IHH`}OGX9H)_1QkaNo3zsu)6i3EGd5{v z?$U@xF1aW!L^xlpM~!05AI;^hUz9}mys-eeF;eZRpyQV6;zB#9=l0*@D4J#aEapAG zotHQB+z&L*YF}SnhwqdYp(H`!4q699T?JEHpV=qU59%ApFgvD*Jn~jW2te31$x!;a zqN0mx7G}1(Uer!p_0S?LH!LvDh=3W2;C^#d1)CtImOE}Re9sGKKhkM}QND-@%tA;q z58qzw`mq`oHLq&?=jTJ$<}AgxYA55}HZi$B5-262;TUg+N=W|v;p$J`r8)WbjT>IB zZfpL7U$sMW_kGB-b#sV{Tc*gYR=d_)rSWJ;4xZMLT2^XkEi$s-$U~vfs!IEe`%%m` zR590)C(nw0R*&~Es>+g1qvbHLix?8J*^ip-@!O0)s>Hq#N|s-f+v=s|k|R8Iu-?SR z#EPkicksu3MkY&v5_?B-(^r|Wbf52O_?>tWDPjgW+yq@1vZB)~|NFBet!-}J{^?SX zOWXQA1&D6i7m{op9oe@>U&cXyn)7%b#wjs-dwb=yRm^6l4Li9FtZ%KJRDxVCFjI5} z_5qq{%PeV&SH8o$c7|X~pa9vc5COkz7mG=KoA2AYKirh@nNgiNW=`*#T zP>-{S&1_CDdrKv66^s(8KmSjmhAlA>i}|xwD^BTer{uKhYz2mTwEVLOsjY|#5A2R{ zI$jU@$$31SN@fdGQrg$5*y${sm}_lugwm0JmQBArnr$UC{z4->P*v!RS4}?OeuvZ(=R{Pgj z_25ceoqqXv{i;-nXgj&Vx$+QK)wv-Ya}7@kyHZ+aua_ z?Dc<~mG6)JM(0_2$(#-;Q`MW(1jjDSb~H4Ce#_@f%ZHI)iA_gp+=3JiTI%;bu(7W4 zC13d+nM~?eC^1w~WY~T@e=8;5Q!M`|jFHU_SLLN5?t2zmJ6p*nRg&j&Fc%0bIO1k)cP-ExRVHxNIWuklQQ*`~*43vo5ha>b z<JaVe!==^kiFRE zP^2hK)0r*pq4k0f25AciH#dQcIP}JAHB}Z$e<{)ff$U$o(dPyXE$o10miw<3%UJH< z;^94lF{-`fl~@@z+rihQHrhwM7!7jO+h*slGYT3m$X}MB+Iww;3OUjmdg#c7V$8^y zgSa72Hs@?7qqKXP=MNbU7Ms)SxQFto7pDM!(pL6P4Qs%Um%^G5` zix)sgWe`*`ib*>A%5&QVXW-+`B*i+~wX8_VgYf9mjyLwK7{0RQV^P5qHVzaa$!W9U z0%Izc-xkbe4ZCEVDa*BPF`kP*+&Dt%inO_jLtu=7<?Purva(|jNE#D1?zjlA| zr>KnO-+O<)rIIfH^AQqutNiR1{n~`c#Vv$)W>WoM5fwG1E^A+8#9Tz67D}SlbKxKs zHQ!%kgG0EHy^)V6+3R^E(qu*JuFWJ~FCb4ZhhtWpccZygzZ9-9(hXA>?VtuneCe(W zAk(eM?w+65Cxz_uY7p+Ynq)C=Ce;rP4x;6rLTl8av27c+H#_NVm^D$(p2Ij^>4t?a zkrp=BwC`^wCM)6M)B{)J&EQt;uZ#YJD7@G*W%MC?XY8~X9VQaVj5jGCyGeZE!*En9 zz=^&gRgZRgeK~mkwv^&w#!-q>NyJ8h*%C@I=A&4N*N+V3FnvrdJ#-W-dcyiqjGneX zWqeO3e%LCZoQTe^x$`7+i042q6mr%|C)9tdDbN+uV%=^+4QiH3EWFj;9q_v9^Kb|o zlT|Pw8g+aGLG21Fq0SURxNT<**Y09LZr$v!AMDR~?#g+HKfvXdz%XzlY*Lc)9yR+# z?72f7F2n0^&w-2xuc89^u%HL!v_R{(nQ~ZjBw#=SJ@)&5;FS3HIngm|K~4BeHmGvW z^u1dxfuEtVkkkyTkLU0bK+e>=kNq24moxIFv#hC6BT5+Zs_`9UbE-2&1wus$VuE>f z`|XUiGdJTau)SJ|2pOHWgwkPkugHxBB}L1P~cm&78sS}h=*|MrtU zI7!>29`{TrCq;9rQ{}EN`zS&9MsWX?j+`}`-@;YljnJUbL}=vT3KCJv?g<^WZki>3 zv|25(;PXULtvw{@Kl!Oar*rAuZm_fSie%A?z7PMM^^up-aELrX^u+fIfG$v7B{3tOXTW;|g8O^} z^rd*HvPyhD4rbr$bnl;1|5z+a=!RB9xP4ltC2NHCAt2QvSeAq2DX$#LT*)s2!hn5& z)m%9nC*sLsp~?hLhfvkz^Ws(-G~!>d3^jwh$#tt1qPg}l%BFYn-iSp$ewm>AjCzv^ zrH1m`gaLuvQ@?`V4*Im4WAzyQe^OH(fSo8HwJ=%}+@sl|4v%H1vr*^><#w7B1K zGSVY$o9X%*P=;8#*TVthQq^%!h3}2464uGO;IzwPXkTVkU!!v)im**TwaM|Ul#I+8 z9V*HMJ65T`97A8EZI8rAa68)8*L-KI(veqz3Jb4O{uRA4kdEqXaxd_#_v;P4?OUw> zbP~s_Lh$}U;$Q_RQ&$n82ejHb8#TZdkZB(1N7p%54)a{6j1q)h3A6CP$qmZwwyr*h z;-E$n%fEkqAw%?cn7AV^2N&kwK`o7{x&ol%5I{^ThNV@MYSVkbRn7`J!c%t!zxnx_@)`2jktN^^ZcB(9&njW^Gh+kvjrazrR)p*klUi$s}lRU|kKZ2}*Ibt_Oid$j;(Gw3yg z*5J@n<^8G$^WuGV=&GU~)IqlvT@M5{tAkp3`sh1?f?`dxlYa*Kn*Tkqpk7fSDRbI; zUs`r8r(x1+={HBq?-SgXt2HXltH)f+m%d!f|7^H2YN~8sSAE(scklR}AXMV691v>Z z?tL#gaOWC{f2L1NQ_1}_ zCInXmBg!ni%5pWP%k<;w9(f0Gp0}5JWm59YSXHoT6kXeIB9{ym@fmeyFO-(6Ur(Ru zWYR1@|G{TNUb^TKlV+pfU~(&!gg(UJB1FI}&gN~5NA>E*c76A9bPL7|WjVt~OSn`0 zRUWPn^iE%AObl1ur}%pR_&xK;nTnd%xe)R2-RUte!UH=e9I{ROsIPs$-)HXX)_JVS zahf!6Zd`&wnc(mexeGf>9enhBktjT1!@uwIX7@4GKZpjJS)UQ}`@Y%Hurzn>WzWHR z*AgTcpF3>cBTBEptKW{uDIa8sfF|jy$g+z;9Iw_l%e~@V%WB!_8y`B3!|C`YrLIlB`@|C3bWyIc;Le9U`FzD#tG7~y=MxM#yR1G8y0hT z^F4T0^zPAY$`A2x310`5k2C4@i!+!eu0tq~BdLR+UIhSaA*#%sw7X8X0_d|@Vd zBe^Q0`Xi7-;@sU08wF>U{Yqi6)SKHXQ%ryYKNpJNcIA@>_kG{a& z{>L-noJ>zw+A~ka-`72_RQF}oAPr%lc`p@5N+*5a{Y|piOYUI97Hqwh7()M)xu1CC z!z^-#e9x+7TIzRb8c3#3Bk_vRsRMolaHAdbB>nnf@AHp7>EBpUBhA%=&0icZw3w2A zbD9;MGPSHW+kJRlU@68B>a%)%rqb2D`%3vqGTCfP4V8~XjG-oXxuI!WCvoQn_M0#U z`cI!jvFeyQj@ME@ajT}W3dHFKQ@BhGJ6vtrAFkOsyiJ)(yS>G8d#DTgP35jR2-Vqb zk;1piF-8mmc&I%(P)8LgQj zk?BN&vI#w%l%<>c@Au>T!(pMzb+0w&m}6Y$brrp8`3&QccFu>zoGDf4z(vKrT9F*G!dcgU&YL5P z&?xtrvRoHkTT9Cns%`QC;&Xy38M}(v(C6#tr%Yur23iA;<^PDx>gcvoN`Q0uCfGH8 zH_+pv0M?PVbZ~fhc#1R&RNSo#zq-Ke*%|7tzSsb|C~HX?s*(Pm8o2E(gHJ&iUtqw~ zzf3k~aUf}D2TI*G{a7cEhNdF0j6d%WTs5E(MYdP}ic3p-q5FvT_ae&K&%{jet9?oW zBskVgeiPo)A4P|60y=NGt+|vjUK`y6Q=2@#qc;+Vj4E(G8S`(!9I+^_Wd*?xLMSld zko6vFn`yHfvz@v6)&r#_uqcTcuW@$rY}7uQd@)eLM&v{}Y*ta&f#c6P9T1<~OT^jy zS*5dgAZgBmNMbn?8Tr*q%TGV#0OMs}A{b%*qh?iF+&vH4EPFxNOp`0ZgdTQN`;54< zhEH2_(fz*KHJ*haTu=`+7tWRS#9ue`FIj>2x=-u3qFP`v4-UmY8je6h*)-`C7@Nq~ z-b3e(XCMu$PIsImMjV<<22Hxm^gUOMj#~!rWi=S;+b!7?W)PmbuPy+MU`~oJl$_x& z_4yCc(iN2eB=Ml#M`yP3nt+HXq9Pb-cL3$F(vC!d@C}@|gF5`~%|JGeAh5aeD0Koo zj6^t;eQ44H&?FPzkl2mr0nX#@`s6e4-3LNZN|>)rVBW%?wf5{xDEi~u1Bu?wV-GBl z1t(0h?V#jED|?l)5cK`wp8@i_(%02q$!14nF~mm!+|49TRhxp{!;e|vL0^iQLQlqF zH@WLwOk3b~dPt^S?%k_@EPaN9pJSXs78OrdE>tErls+L92hg==^5o1<{+}Ck!CjEbPo!Y4fj1 zzkOSrNTwEh(M#pTc%dMIg6qD_4L1+9vjKd^=mM9^l?y0##(q-+l^qF?Dg_-upFaQ$ zh5&(0>w?uB%o^ZL`@ybTZCxx4VZ@Q$Si-GVC=Egww3-V72JF z0(x3pFc8zZAo3+UfJ${v*L&;jKxYza(hkzmj~FEUwX_JJ1R&7h$lCh=2=NpIPyW^3 z6a~VyK!z=7#sQGPscUHb+8PW1Q^~1HV~IAvOF|6bf}x2!Q&^IkY;gG8moJ~pf^GYF zzt_UT(@AHz$z&OuGr!A`^wj$~GMI|xOv9SJO1T!8jJS^xsX*c>zd zqwLy^fxi+o*nhB7HnIPfyqw>Q!ghTTp;kqJ@}-(DFTOz|d_`*acf^joiM}(jLPqp$ z)ck9z544<%q;gJG2ko(+7}4VO)xvdX-$h{ye!>fGRlbdF9TAIvi@Ux}V_ob>lWS@F z<<>j24?gZLhNJ9iU*SD0&Mv88jEE0VV!e7Jjgbq2sU`NXd1bZ3rdVD(jEArsoNA0w z#MqDA_WacxwH?o%JpAQl@-(kX8{r%HE2;@2clh4uqASphef1x<-*~1P)~11RRXAgepr)dVpX# z1kB+q-F~-rbWlP+L9jU^!5gHoj`o08Evy|h6dK8P0YwuYd;?h095Ba(x)RlRw*dwX zHCHaS2P&Nh9chRo*QF8oV74TKIIhvw&$EG*s7C>d7KdR}=xX*|Y{Kz6%B;3}IO4m; z+oVd)N}u&0q}Pqz0uFsNtPTTzGS4hDhJ$qdH#nt`P#&6OkeMOMYTbqrKfgO?Q|ZeR z#Ydt@_m3`ihAW8RamII>jI?XueYt-*84F9n*ozy;&bh$QHNBVC5UiOZ)((4?PP;|$ zj`oW&>}bubZQz*>wul6*iv+hpMW`jc($8&hT&p^0W-MP9P=3e9Vj@u-`+-?u?}Cku z<~f>A#`v0W*z4-JG4REfX*tVsrKfqexFsvvC1( zEvEJg@aWLQQl}Ypd8i(`8{kN3$>sq7TYzd^peSabEbAI#9AM3!ZO?t!L%X13<$T8UV+>uP9(0Hq}fv>^!mzNkF?pzs4=hop;5)a}}#X+I#w5fflZ@@w^g zWiSVcK;58o5evE$8N%ZrQP~Mx(`HF&1*+itg@#!0M;fD&P6|q(<5ob|%(`01m2aIAn-AeOhZ$hPe^SI(*6AGZWjfC6yK4iWD!j7 z^>{SA8|zrDIMdZOZLkT#)ak-G$Vj2+opY!k#9A%LwxK7_#d*PQ>)yu6_BM&0DfO-7 z*DRELuM>O6E3dTCl$Y-tWRN|x(J}bcCk>erxp1x6QKswh7F*N7VE}Td=t!nJT{+y% zAN}>^1!VFZw!l#ma!f?FE$^*;=4{y@+*O?!jdrqHg=BEWHA%#r4BNKp0;5H3g%DoWmn9% zr0ChZuR=3&XY7bYoHVnD?IHqokvvi9;w5(`xnMr(X~kmwFo=9_s*kVjDr>K<-)Dw$ zTZa5h@`56~MVG>Wv1icq>uvRMS6yX${4W3M}( z#h@o}knKx)v{?VD)%^(Ssp2Xe3q};?AieG8v=t*w}QQ6)ng1jSwe z(rGcE7y~NTw^?29a2_oC8tk^V`e2ar$POfXKD5gWP)>f!Ck(>od5VfRk(yG36$uTj zGB3K%CR_1&Pl72-Ury@XDU~D{_y-X)&TKB!ZA`dPVM2pb|5ObrmovXgN@!iBR3iGM zlgTyiURw&C3RiyW*kV(026;@0>J2Kq0w>whfHnqqVCeuv>XZb|_V?VztcZA!Bu><# z`zv)>AZED5PTA%=?iu~17TE1d&W*L!3lq+!I}VGKr)K7aMUM?6E)9Be!g&P3ABL}` zH~NHE7q$0_q$%~q=P~D$c=S12P)Q4irYV61zXz!2$nQEp z!-|y0YW&aoCeSM(LY*(h0Yc6|QTTh2`1@=OnVZ|kt2E6&X{{_+bT{pla4$-uc_(u&#}J!WKW<8si@;^k z{`{_Bi1xBXxHwMx!+W?ar64SR8VU%n{-aA`_i$15wT8Lr2h+HkoN6ny`I}k7wu{C$ z@f9qRP8PtPW)-hl)}rcfle<{3r-^a+KL)Yr5OkHOx;J6Yq0-3lv+7wtGHGJW4|NG> zLN?F}!c@P%js7q)HM~CPu+=c5Wd{q{+UcWHJAJl3jxsR?8BX9uf3!}cY;2KHffAh*p(@TRW}&4#4V4WU?hRd0hA90VUvuyNGOog z#LNeodw;OemtS3jycC#<_A5Q;Zvq3^kJ>iD(2W_KP2$4Ph~%hgN($1ttgWr*3ywK6 zR9_^&&IXkm@K~L1a(V$(C4=D)tcK;!S9XSQuNRLZA95PXxbQv_NCvJo)n@#p)Y^6M z^usS~Le2=lq={LBnI%q0l=C|x6uc0+qUu1f^QK$RQ%qC$K$aT8nFuyn&e(CJ+h>#$ z+20$pndD0D@e!*{q~o(Z-Dk(_sW}X$?w-f`iT{!3742yT=iB_**D}QQpL#s<_|+BW z$|4lW#KhJ|zjpN32$kYQf6ZyC5qZ6M=~|BxDcXfg8m91#i3K~QpfhsfWb^{gh01cM z)+CBl3R%&>Tt}Me7v1+&YDqAGvHJWy8c~{Jwb9<vby zAX}`#hYD)40h+&IP&^&%-b@@E4(I832vqegHN$E0)J?#$H}tAb1l34WWWUAu0!|SAh6|rnN(zFo1qK67(mC z&OCj;-Yi!Er31j&h@id+fKUWd<|@O?lRwQoP+lG!e}w>r?l99YH|rf3yeWvwH1qp6 zH0BIye02txuC;Nbr&x#1dzjn)WTuKgiOp@uDaIeTyW_sCj4us z!=TL;4{}!)3<@HHfg$WdphB-Q;?N(CI~LktlP{-q2tV_SxngqK2d6qGWCTX=8RV_= zzWjv#3;0y*eGlK?73j@{==pEMpWcu;UM-w=QwY9ZrpS9IpGz-Rs0GlEV z0K;K|<{y3M?+U}{KYt9MX5n>p_&|Lv3*5mc!4m~dn2VIj(VeIT*VNqH-1_$fDl;_& zA?_dmw3&Y#_`NW(vdRF1v&9BGmU?k0ZUJSridv@L9=qkJxxmM;O`s%j-wY%TC(cAvO^R;6Wyj%g=u z2gCnrlz7n}=_)50#FwdBkc`ycOhmpBe|puN(PJ?v=JAV?&=D_1%#MS!f!E~Mh;5m# zmDq3JDg=yK6gsq#_;&`8!8NKwEOrs^jkrFFtXI&*kFY`N>Y3pYPJ}0u=7kFSR~lxR9Jva9UK5$raHUVfrUsl!&%`^0GXsi~hKNKqeYN+n96 z+_kDeJ@AhjU%5lR$qh$$?E|mOP|6bWzbh{6#;)%=aily8YqeS=e&iUV+RwSuu_GUP zbVD*;V!yeUI1w*6{G~9LKc$u?_o_T3RpoO2+)k|I_iXk-D}KrE{P>bu9Hp%n-{&av z?MA=o&ma!>IB#tc09xC0p#cqnF7b7h&riwZH(0di7X!iyj>Qi6i*7F8W3ZXA<5xYt zhh)KM$&Z>idhF)Vnccs;C;W+@9Ap{qV{ks99{NIBDzM5`^Ps)3!Bz~CmB$%*J}nK> z{rb$R4f&O5N{Vu#n8BAt*`Q!7!lrm3%Focsmx6iJR3FwRLao(8?0i&sZ!A?2YpRLB z60OWqx3}r$?Zr?1cLQu+@dkQ=?fOjs*Al%;tF9KM>F1Mo(2(W6N%&CJK+3XOE>)6^ zsM>VM$}@*2pdR!BW3vMu(BW& zxf^ab|3pSY3AJsUBk0>sP@5#IGi1=o#&92>#_)~fG^`}LG=S=z8gD-n(4-8>I;9!( zIYS*XXus%(%G5xr(+xnYh!yA81tiby-^kfe=GoUbiQ!yg)wRm+8TOgz>)B--L)FwV zDG?$={aewwzaY8X214$Ta7;+MHS3Z2#a4=;EG9f-`EOGOdAe3L+fj}k0YmH*M|RzLJlTdp zKY}QRBsUAB^O!E2i@U@15&~Nj%u&#)%&U$BfoI6*Rkt+MI06<{Wap{V;*}0RHLKjL zIx0+ZWR~`%H_}uj#s;4-ec$5-KF;@aG*dTLaH~5&* z5M(mot`dcV{f1V>hB^cB3tYr>mg@)tO5EnyldZ<(fM~~$Cu$!#RJ0A)VE#8HK?0X0 zi@{;nhzVm3g1H$F1M$k)l8{_$`F9t@|H=*GD_ZYwhNN%O{nz=5QZPoNUdAkZt_cva#lA=+FD93da%{mIy#XnQ@<9VXS{x|T$ z#RVN9j185G-gixO_HiS~`H4C5*~~BYgX~9L*#0oD>E%x$q1LkhebvBVp(_n*w1)1I z2>@*OC(9&M?8X>^HZu>WV&_VWFMqT##?AU}E&MGeG?)FU%?ClxdTSphu~`;^a4vr6 zvwe^8?Vy@fz(Y#se68VP$4W&a?8^_P$@GLPV^BeDQDOVYzFFfI-ke-fg@7SwP0m~!k1b7b&xWN7W$eaIg)HO|p~TnyBp_EOrJk1IqW#zu zlg@2+|KcS5K>&qDx{u0MYqZ_{?HoQVD=TN}_sM_62C^Zfcwyy3o~PgWBWySfFyQDo zw*KG%hQB>La!V}Y%v zok8rwv^(W1R8pl6Qw!kV}J#L8ZdWVsXq!210zzQo~(ULMB--Wu4(dsOFo~G4kN?77W597D;Cw+?O?J4sbfBr-J zmIIFHbI{ROA!Vr$UH{sOI4Ql%kHyJkSBl*C)+iUg;~lt92T9}>0tZSc7nbCAiPCqH z_gBTQ^{DYB={9-9Wso)O5>39NimTCCP$~tN3-4cRVdi?6(kV=gQd4{zaTG`DHW9UR z>kU|$xf(m{fR$uoSIDxu#vGcMnCqFPrE&T1%2^U94Gv#dhYJY#vdz>G3Eyiz!Dxfr zdd@)-7ybM1b?G@X_gxh7;uJ2LJ#(pm$ z!9CqaULG6>iqFcl1m96hw>tYL9QO-JhYc1V^{v8+{TXXze!D4GCZ%;j%&i^#FpEOD zSAaH^j~PfDL!B{A&xLwh_o5pT=55vkzhqj+EVlM-h7U!d1;<9RhjrQ&^)+7Bmul*l zN;Foo=L>ka^KbX6$x)7dGdi%^F0^nk7A29hynhML%soLQ*SdaCsJ}G`1-1@bJ3E#$ zFaNs=s`H_#opuM?EluAteOHE>Q{~h53tF#^Ljvy>nP(oVjM5a6F|y>l)v^Mmy(v$^hfl3S=@>BSQ!vnXOOB&)h{MJofR8sYU(x!)&`ugP0d zzp6s~BVkpk$T)2n7yNRLvOgk*52?a|CZBDxZ+cNcIe=zxeDD?ij5Ib$U~C;%LMei@ zb(dx*lC=ML@|Xf8A<-P?p@hid02nHamMy9ro`ca^`zH7U%oYU5y+A>166bJFKvE=dG|j-Jhsh z%!06WB$bMUzUQfKr_xBQ-Bs@q;hT7p`SHf50w=BClWxi}adzpO2tD0m{uBun#4n9h@{$@gFcNO+OVkb%>A7exf5$AknWd+%e9&aiW) zw)~^=H*|Uw%x_oHO^QD+>-8Zp7b~?CBd?XzK>9j3VK9AU`=2_?RyWH87LkhS<{}r| zpQNwJ8zv4~G~jGa)p0N95bW8@p%510McsoVSC-0s^6uf0dOKbB8|px%K$_m{cPHkA zRZQpK%_-U20xsg);e6TBhgYG$YyRP#I+uU%r$-`2Y!5>k@``&kqMKOw;9Vi=7%XKH zG=N7cLoM`lmo|3&!HeJkpZ5{wOR$Pe$-cIS6eqX%Tv5D#HZ~0V*BLQuIo-G%J`x5k z8Sm8kX_@$eZ*M0ct=7@JSjJ;*+WSSb6M>>!`PpAm&4U|^3Mz5-0_inBoCLu=J1Gjx znD(Ylz(D%Foc=ai$>S3cWM^ejQ@@V`2#$yzm^gx=^I}6nLV~r)f0jZZkbRU4+GPT- zNDhz&08jkAb_W4X{9Rax|9llLn00kUo$; zZ@20^o$ibk>V{Ltpp;8c9B06{(_`A_F-@vmzHgQ8)y;3q=AI?BJw`w3Vj}A6&xRf? z=_0fdjGnReLg2oX+4&ojLOl+(ASS}1D;4X09hJl%6*ii0rY^yPHzt{zPlbQ!!s4+} zq5=@?+zl(7a5R}RJb5zIy``9%*k9FkmL zHhIQsUQfdMk_aBf#Z7tW4?@88EZLlQGfEdFMbwnfNd?H@kuo*S#^<`f3q>ZEsjqbd z6yYY&hx9RyQxW#H0eKkCzVZPcsSzX3f3*G`Jhw0Y9VOA1THS>X#&j-z@nk9 zsM-|~gomrk!|K}G<7_eI!_Qb(TDIlu74Vq+1}0Rn&fu$?c5jSum!r!^e1D(Ub_+3ev8lgi>e=Tx+DOrXULOQ;*!Td0k<B+E%_mHmRBIR|1{Ce z+x&OqOMPwPY>R`CIBLO_>h$=E)FFq1g|bTI#g>FgvUji^f~cdWj-IVWaD;EmivlSA z?8}c$^RT?**OpdtCp4P4`tZgvVWQJn+o^I&PZNvKkcrGHkO;bcN+opFp$N4Dhf))zM)7y;io6=Jy z1n^yRa4B=v7SB&Q^uogz*{GczyM7SNm+CWuPg@bkMO8JB$o-HSSU9ak%@!It8Ni zY@#vb{QC9?F5+5MfLrt1pTSvoAT67jg4G~)ODe~(Y7 zu=uedj328lvECQUSg!AlRZxADYtte`R3!h4QLw1l6#G)+4cdA{Om!DG%xQ`qj&9RV zaAc@oDgk3r?T1rM^J{2OSgIfeFUBdt;-*HFW=8 zvLudF$3aFVzc;D0GF@u%t=Gz{`FT0b*ImC<;>APQ6|h`g5N5njNvPhLW#X(fDRzs$ zJV-s|WqQB(^7afnwQGfFrL?*#>w|NXQ3y{qsV%?x7|}`a^h;z&#^R2-NoM<0gPne{ z;?U^mtMO-tlX3qSA{8Q#%#99Ap?g~S6b!~-imu2Ic&{_1CzpFRLTuvdMCz0^$+x^O zV(T%lEo)oPyE?A5XYJzt&L_y9U&CO;HSzjn2i`50xQy@YOfR6g<3R%7(K5NB6*g#R zv$%G&v>Z4{JWa7W{}`ND^=4$Z`gL>jXxzUZaxyr+p6l2~>p2<~yDGefj(rFl_oU}uv=fFW#{p;P1(l0%=SxFgRz~n{(q|j zv#z*}$DKwTs-w9w(N{T!D$;e)105#31c!$eoSrkTMV+K8m8^%!)-q+h{R6M8Qr9ou z9DimY=#3$|HoStd^^m3Ad37fzmzijR%J-(rq@QrMdi+r>i^~?(e6MLMZR^<>pH`naJ{Q@Y|Tf3=9}NQ zw7d=XLa&gA?M@183tuKn72C`$hS&LrOW)S8iwhi#%;2j9oJ~1w?rmPv?tZ$QC71Bs zw10TSTKzQlLp1LG`s7+ZE~Wkzn?4~Nq>WoDTOtl;Dl>6hXmDUauCrFn|DpAhj-g>> z=HrF{SM!L|!@~2x)`RRHuLsMr3J3Sn)`BY&!BH^-Koh;(wmtj97I)8bYq;*BI_s2} zU;gUw)m1-wP|*SU2;}Wi;KjR*UuvQHzS)ZwL$W^O)_O$+1_X1Mm2L!y1B?aX1P4`) zKgV#JmSBqSo8l&B3h%zDu!{)ib2DwXgjtiOObH$2N^ConEVf{N$t$%cUMV#*G;GkO z*kQ^OSBo?Lu8Gts3#I}mD!9s<;x!g8ev`kNa<`X*FW7+DB|e}%5EX_GUAUkX_kWf! zbH1acefzOu`(|Ilm*eR!meJ|{>Ij(VNEvsvwYM)*3E853gMk<|XJM-w5C4s$b_>dG zOMbOze`F8dnmZNI=Lb>tiO(AzCyzel;t&2GPgYxJNp_jCPMx-ZRGuQQ_L zb_ee*K9I;9JpCxvsp8K^&5ig?Wi>2Il#}y=*5lpPFUK(949kntxMSUQbefkEoy8V%mUmy?^rVy}g2hf<>0}AMNb=L21VqwqOqn zIem>3#?P4~g0@_K@4fKW&BKc2>GG??pvXx8VeNv2@73QnAWs5%hTv;zyqOIeSC)*N-!oQ-|CQUWVN1_?Qk|LOgxtagwq=4$J@uFUn4)OcdF!@5y1e1zEoZ-?bVoU0m&>J|K0@EJ zr^zU3sM_|3WphwWZZ-a`h4^)LtCvjU1T%j_N0a>!KB_8tj0#9vqmMm}UV15?9yP z=2ljPl}gySkU$3mHEXoA7qq{B|1PO)bRqgZH)rnS!_x8ZCgi5SawU4R*2drm)!@m> zqs3oJ(U@hCN^l>wo!|@p)~>n@yd1TS_%f_@owh-&+F6h$uQ4f-?mi)FC|y4peZ{L< zaZ;;;r7iF5jE|RIS$N)1t}yyTw79+2U0shNpoFMb2*WTkr=+-usJ>Y6WVHcLC3i5< z{D($aQg_*3Qmv!OytBNmir&ebc!5HG%%l^$v;O3BvItD|FB*6p;j{-8?RL6tVxf_L zB#sU6*$SBsk*7}uqcftcv8;Y|c4p@bjWwre=F(F$w>iD?f7_(QIzgXwM=0W4`~LJA2P&k!9Wu zPgANv-xT9X|NbtO-1xsCoazqJvw__9o8iw!pCho2uVL2L)~JCY7eQKD+G9DZ(eG_! z$c-Fg7zY}foe9`?P9o~;92^GMyZzg4FT5V^jtbAZ2M4(_#=!lMce>i}DswpLAMehc z2WC+$rH9e9QwHcHN*1bnL~YB`&h!}N!lN52>kpo8la`+_idpR`D|&X-xg2L13w0g@ zcp%PxNd*GA|&aD1FP9 zRp}F6EIKknB!Q0M>EcmW`nMtZ-;Lh^V!hj78R4^^n;QwyVCKN^?u41M`;0{4Z1N{Y z504FpfKyKJnn?WtX&&~g9m{ict|BO8WMq%eREAaduY#)}5Z~P3p3$BjmBG&~Syi>Q zc&)9iXM5|*o_AA&XPGCW(NKoh){)dybe-VW8FJTs{pKC#7N?kqNLd*mvurvS zt04Y#?x?}O*mIT5z;QUOLG`cLpL#?#03;!*6Rcm!Cp*tY+B9jdxLxk+&+&hFI(E9e zZY(p`s6{>r-^8FuIk<~*dd|*P&BLJdYb<4Akx1P%n^kL{`77L~7LySL&@n}Rp#K-zJ-ilG1*xj{s*!paBoP`M4-0_hvks0x~7Kk!-ur$nud-~=-cG=_}ji^ zX9=Y&uT{S+-t7!U4^FDe7`-i3GihnM2iFSSb4TqwwAR>Z(!fiFgY^ zrG$7)#C4?zadKmOd%ORq#hsmhrK!RnQ~7lp>MC^Ji+*f{ga-**HF|{X`}CD=GUL< zdzOy8LUhhQm(vJnh!FyuAFR?aplrC#@fC zLIfUaZKN=ZpzQQ{1H|xSg|}X;#b}tzM;jYC&p(d&Gr&WTG0?xo9TzvT;poB_XaZuD;9qK3ZmQPoX*1yApf0h6gUc&@@gv*F{@*xo|lC{o;HG|$p}*@ zSTh_TLd8wINv#)Rm4+gi`; zOi2`NE!1ia{72&M6fyAd5J|P07jUmg$QLOh*wO1USv+t@meFoBLc^WkW$SNopurr) zNs*v~Ke(1=ax`%}s`2ZjBcpSY?dILPclVQOh>`r7Nm9A!$icA2vN}wMD2v#RzjjA6 z39um$sr6?PV%i)mbUPSVnV23a{by>rQ>}-L2GjL|H8uaX;*c+a5C}4Az?yW~!=IYT zsxl@$g|)*wGCL0-=v=7_LLl&Am0Y}a^2vJAumQ)M zXb@_qHZwH7Xg&z!X0zNA1p@wM7zhL#|9|xZNj328@CXoa4gbG?@V~r-|9t!Z@)Drm k{(sBu_&;?mSr-BF{mUlx9{F(KKL=5iRh6lbHVOWJ0FcE=GXMYp literal 0 HcmV?d00001 diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index 1776d0aa5d..bc7f9e9725 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -62,6 +62,7 @@ Provider list volumes create volume destroy volume `vCloud`_ no no no no no no no `Voxel VoxCLOUD`_ no no no no no no no `vps.net`_ no no no no no no no +`VMware vSphere`_ no no no no no no no ===================================== ============ ============= ============== ============= ============= ============== =============== .. _`Abiquo`: http://www.abiquo.com/ @@ -125,3 +126,4 @@ Provider list volumes create volume destroy volume .. _`vCloud`: http://www.vmware.com/products/vcloud/ .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ +.. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 07343740cb..03d104b580 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -62,6 +62,7 @@ Provider list images get image create image delete `vCloud`_ yes no no no no `Voxel VoxCLOUD`_ yes no no no no `vps.net`_ yes no no no no +`VMware vSphere`_ no no no no no ===================================== =========== ========= ============ ============ ========== .. _`Abiquo`: http://www.abiquo.com/ @@ -125,3 +126,4 @@ Provider list images get image create image delete .. _`vCloud`: http://www.vmware.com/products/vcloud/ .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ +.. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index f474959c7e..8941a8b700 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -62,6 +62,7 @@ Provider list key pairs get key pair create key pai `vCloud`_ no no no no no no `Voxel VoxCLOUD`_ no no no no no no `vps.net`_ no no no no no no +`VMware vSphere`_ no no no no no no ===================================== ============== ============ =============== ============================= =========================== =============== .. _`Abiquo`: http://www.abiquo.com/ @@ -125,3 +126,4 @@ Provider list key pairs get key pair create key pai .. _`vCloud`: http://www.vmware.com/products/vcloud/ .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ +.. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index 5fab3493b0..216e1bb9fb 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -62,6 +62,7 @@ Provider list nodes create node reboot node destroy `vCloud`_ yes yes yes yes yes yes yes `Voxel VoxCLOUD`_ yes yes yes yes yes yes no `vps.net`_ yes yes yes yes yes yes no +`VMware vSphere`_ yes no yes yes no no no ===================================== ========== =========== =========== ============ =========== ========== =========== .. _`Abiquo`: http://www.abiquo.com/ @@ -125,3 +126,4 @@ Provider list nodes create node reboot node destroy .. _`vCloud`: http://www.vmware.com/products/vcloud/ .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ +.. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index 53c7099dc7..fa24076584 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -62,6 +62,7 @@ Provider Documentation `vCloud`_ :doc:`Click ` VCLOUD :mod:`libcloud.compute.drivers.vcloud` :class:`VCloudNodeDriver` `Voxel VoxCLOUD`_ VOXEL :mod:`libcloud.compute.drivers.voxel` :class:`VoxelNodeDriver` `vps.net`_ VPSNET :mod:`libcloud.compute.drivers.vpsnet` :class:`VPSNetNodeDriver` +`VMware vSphere`_ :doc:`Click ` VSPHERE :mod:`libcloud.compute.drivers.vsphere` :class:`VSphereNodeDriver` ===================================== ============================================ =================== ============================================== ==================================== .. _`Abiquo`: http://www.abiquo.com/ @@ -125,3 +126,4 @@ Provider Documentation .. _`vCloud`: http://www.vmware.com/products/vcloud/ .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ +.. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ diff --git a/docs/compute/drivers/vsphere.rst b/docs/compute/drivers/vsphere.rst new file mode 100644 index 0000000000..9f423166db --- /dev/null +++ b/docs/compute/drivers/vsphere.rst @@ -0,0 +1,76 @@ +VMware vSphere Compute Driver Documentation +=========================================== + +`VMware vSphere`_ is VMware's cloud computing operating system which allows +you to run your own private cloud. + +.. figure:: /_static/images/provider_logos/vmware_vsphere.png + :align: center + :width: 200 + :target: http://www.vmware.com/products/vsphere/ + +Requirements +------------ + +VMware vSphere driver depends on the `pysphere`_ Python library which needs to +be installed for the driver to work. + +This library can be installed using pip as shown bellow: + +.. sourcecode:: bash + + pip install pysphere + +Connecting to vSphere installation +---------------------------------- + +To connect to the vSphere installation you need to pass the following arguments +to the driver constructor + +* ``host`` - hostname or IP address of your vSphere installation. Note: if your + installation is using or accessible via a different port, you should use the + ``url`` argument which is described bellow instead. +* ``url`` - full URL to your vSphere installation client endpoint - e.g. + ``https:///sdk/``. Note: This argument is mutually exclusive with + ``host`` argument which means you need to provide either ``host`` or ``url`` + argument, but not both. +* ``username`` - username used to log in +* ``password`` - password used to log in + +Examples +-------- + +1. Connect by specfiying a host +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/vsphere/connect_host.py + :language: python + +2. Connect by specifying a url +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/vsphere/connect_url.py + :language: python + +3. Connect by specifying a url (custom port) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/vsphere/connect_url_custom_port.py + :language: python + +Troubleshooting +--------------- + +How do I know if I'm connecting to the correct URL? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you are connecting by provider ``url`` argument and you get the +``Response is "text", not "text/xml"`` or similar error back, this most likely +means you have specified an invalid URL (e.g. you forgot to specify a path). + +You can test if the url you are using is valid by adding ``/vimService.wsdl`` +to it (e.g. ``https:///sdk/vimService.wsdl``). When you visit this page, +you should get an XML response back. + +.. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ +.. _`pysphere`: https://pypi.python.org/pypi/pysphere diff --git a/docs/examples/compute/vsphere/connect_host.py b/docs/examples/compute/vsphere/connect_host.py new file mode 100644 index 0000000000..da587b7187 --- /dev/null +++ b/docs/examples/compute/vsphere/connect_host.py @@ -0,0 +1,8 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +cls = get_driver(Provider.VSPHERE) +driver = cls(host='192.168.1.100', + username='admin', password='admin') +print(driver.list_nodes()) +# ... diff --git a/docs/examples/compute/vsphere/connect_url.py b/docs/examples/compute/vsphere/connect_url.py new file mode 100644 index 0000000000..de48298de5 --- /dev/null +++ b/docs/examples/compute/vsphere/connect_url.py @@ -0,0 +1,8 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +cls = get_driver(Provider.VSPHERE) +driver = cls(url='https://192.168.1.100/sdk/', + username='admin', password='admin') +print(driver.list_nodes()) +# ... diff --git a/docs/examples/compute/vsphere/connect_url_custom_port.py b/docs/examples/compute/vsphere/connect_url_custom_port.py new file mode 100644 index 0000000000..0f5a0dc83d --- /dev/null +++ b/docs/examples/compute/vsphere/connect_url_custom_port.py @@ -0,0 +1,8 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +cls = get_driver(Provider.VSPHERE) +driver = cls(url='https://192.168.1.100:8080/sdk/', + username='admin', password='admin') +print(driver.list_nodes()) +# ... diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py new file mode 100644 index 0000000000..aa9ddd515d --- /dev/null +++ b/libcloud/compute/drivers/vsphere.py @@ -0,0 +1,349 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +VMware vSphere driver supporting vSphere v5.5. + +Note: This driver requires pysphere package +(https://pypi.python.org/pypi/pysphere) which can be installed using pip. For +more information, please refer to the official documentation. +""" + +import os +import sys +import atexit + +try: + import pysphere + pysphere +except ImportError: + raise ImportError('Missing "pysphere" dependency. You can install it ' + 'using pip - pip install pysphere') + +from pysphere import VIServer +from pysphere.vi_task import VITask +from pysphere.resources import VimService_services as VI + +from libcloud.utils.decorators import wrap_non_libcloud_exceptions +from libcloud.common.base import ConnectionUserAndKey +from libcloud.common.types import LibcloudError +from libcloud.common.types import InvalidCredsError +from libcloud.compute.base import NodeDriver +from libcloud.compute.base import NodeLocation +from libcloud.compute.base import Node +from libcloud.compute.types import NodeState, Provider +from libcloud.utils.networking import is_public_subnet + +__all__ = [ + 'VSphereNodeDriver', + 'VSphere_5_5_NodeDriver' +] + +DEFAULT_API_VERSION = '5.5' +DEFAULT_CONNECTION_TIMEOUT = 5 # default connection timeout in seconds + + +class VSphereConnection(ConnectionUserAndKey): + def __init__(self, user_id, key, secure=True, + host=None, port=None, url=None, timeout=None): + if host and url: + raise ValueError('host and url arguments are mutally exclusive') + + if host: + host_or_url = host + elif url: + host_or_url = url + else: + raise ValueError('Either "host" or "url" argument must be ' + 'provided') + + self.host_or_url = host_or_url + self.client = None + super(VSphereConnection, self).__init__(user_id=user_id, + key=key, secure=secure, + host=host, port=port, + url=url, timeout=timeout) + + def connect(self): + self.client = VIServer() + + trace_file = os.environ.get('LIBCLOUD_DEBUG', None) + + try: + self.client.connect(host=self.host_or_url, user=self.user_id, + password=self.key, + sock_timeout=DEFAULT_CONNECTION_TIMEOUT, + trace_file=trace_file) + except Exception: + e = sys.exc_info()[1] + message = e.message + fault = getattr(e, 'fault', None) + + if fault == 'InvalidLoginFault': + raise InvalidCredsError(message) + + raise LibcloudError(value=message, driver=self.driver) + + atexit.register(self.disconnect) + + def disconnect(self): + if not self.client: + return + + try: + self.client.disconnect() + except Exception: + # Ignore all the disconnect errors + pass + + def run_client_method(self, method_name, **method_kwargs): + method = getattr(self.client, method_name, None) + return method(**method_kwargs) + + +class VSphereNodeDriver(NodeDriver): + name = 'VMware vSphere' + website = 'http://www.vmware.com/products/vsphere/' + type = Provider.VSPHERE + connectionCls = VSphereConnection + + NODE_STATE_MAP = { + 'POWERED ON': NodeState.RUNNING, + 'POWERED OFF': NodeState.STOPPED, + 'SUSPENDED': NodeState.SUSPENDED, + 'POWERING ON': NodeState.PENDING, + 'POWERING OFF': NodeState.PENDING, + 'SUSPENDING': NodeState.PENDING, + 'RESETTING': NodeState.PENDING, + 'BLOCKED ON MSG': NodeState.ERROR, + 'REVERTING TO SNAPSHOT': NodeState.PENDING + } + + def __new__(cls, username, password, secure=True, host=None, port=None, + url=None, api_version=DEFAULT_API_VERSION, **kwargs): + if cls is VSphereNodeDriver: + if api_version == '5.5': + cls = VSphere_5_5_NodeDriver + else: + raise NotImplementedError('Unsupported API version: %s' % + (api_version)) + return super(VSphereNodeDriver, cls).__new__(cls) + + def __init__(self, username, password, secure=True, + host=None, port=None, url=None, timeout=None): + self.url = url + super(VSphereNodeDriver, self).__init__(key=username, secret=password, + secure=secure, host=host, + port=port, url=url) + + @wrap_non_libcloud_exceptions + def list_locations(self): + """ + List available locations. + + In vSphere case, a location represents a datacenter. + """ + datacenters = self.connection.client.get_datacenters() + + locations = [] + for id, name in datacenters.items(): + location = NodeLocation(id=id, name=name, country=None, + driver=self) + locations.append(location) + + return locations + + @wrap_non_libcloud_exceptions + def list_nodes(self): + vm_paths = self.connection.client.get_registered_vms() + nodes = self._to_nodes(vm_paths=vm_paths) + + return nodes + + @wrap_non_libcloud_exceptions + def reboot_node(self, node): + vm = self._get_vm_for_node(node=node) + vm.reset() + + return True + + @wrap_non_libcloud_exceptions + def destroy_node(self, node, ex_remove_files=True): + """ + :param ex_remove_files: Remove all the files from the datastore. + :type ex_remove_files: ``bool`` + """ + ex_remove_files = False + vm = self._get_vm_for_node(node=node) + + server = self.connection.client + + # Based on code from + # https://pypi.python.org/pypi/pyxenter + if ex_remove_files: + request = VI.Destroy_TaskRequestMsg() + + _this = request.new__this(vm._mor) + _this.set_attribute_type(vm._mor.get_attribute_type()) + request.set_element__this(_this) + ret = server._proxy.Destroy_Task(request)._returnval + task = VITask(ret, server) + + # Wait for the task to finish + status = task.wait_for_state([task.STATE_SUCCESS, + task.STATE_ERROR]) + + if status == task.STATE_ERROR: + raise LibcloudError('Error destroying node: %s' % + (task.get_error_message())) + else: + request = VI.UnregisterVMRequestMsg() + + _this = request.new__this(vm._mor) + _this.set_attribute_type(vm._mor.get_attribute_type()) + request.set_element__this(_this) + ret = server._proxy.UnregisterVM(request) + task = VITask(ret, server) + + return True + + @wrap_non_libcloud_exceptions + def ex_stop_node(self, node): + vm = self._get_vm_for_node(node=node) + vm.power_off() + + return True + + @wrap_non_libcloud_exceptions + def ex_start_node(self, node): + vm = self._get_vm_for_node(node=node) + vm.power_on() + + return True + + @wrap_non_libcloud_exceptions + def ex_suspend_node(self, node): + vm = self._get_vm_for_node(node=node) + vm.suspend() + + return True + + @wrap_non_libcloud_exceptions + def ex_get_node_by_path(self, path): + """ + Retrieve Node object for a VM with a provided path. + + :type path: ``str`` + :rtype: :class:`Node` + """ + node = self._to_node(vm_path=path) + return node + + @wrap_non_libcloud_exceptions + def ex_get_server_type(self): + """ + Return VMware installation type. + + :rtype: ``str`` + """ + return self.connection.client.get_server_type() + + @wrap_non_libcloud_exceptions + def ex_get_api_version(self): + """ + Return API version of the vmware provider. + + :rtype: ``str`` + """ + return self.connection.client.get_api_version() + + def _to_nodes(self, vm_paths): + nodes = [] + for vm_path in vm_paths: + node = self._to_node(vm_path=vm_path) + nodes.append(node) + + return nodes + + def _to_node(self, vm_path): + vm = self.connection.client.get_vm_by_path(vm_path) + + properties = vm.get_properties() + status = vm.get_status() + + id = properties['path'] + name = properties['name'] + public_ips = [] + private_ips = [] + + state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN) + ip_address = properties.get('ip_address', None) + net = properties.get('net', []) + + extra = { + 'path': properties['path'], + 'hostname': properties.get('hostname', None), + 'guest_id': properties['guest_id'], + 'devices': properties.get('devices', {}), + 'disks': properties.get('disks', []), + 'net': net + } + + # Add primary IP + if ip_address: + if is_public_subnet(ip_address): + public_ips.append(ip_address) + else: + private_ips.append(ip_address) + + # Add other IP addresses + for nic in net: + ip_addresses = nic['ip_addresses'] + for ip_address in ip_addresses: + try: + is_public = is_public_subnet(ip_address) + except Exception: + # TODO: Better support for IPv6 + is_public = False + + if is_public: + public_ips.append(ip_address) + else: + private_ips.append(ip_address) + + # Remove duplicate IPs + public_ips = list(set(public_ips)) + private_ips = list(set(private_ips)) + + node = Node(id=id, name=name, state=state, public_ips=public_ips, + private_ips=private_ips, driver=self, extra=extra) + return node + + def _get_vm_for_node(self, node): + vm_path = node.id + vm = self.connection.client.get_vm_by_path(vm_path) + + return vm + + def _ex_connection_class_kwargs(self): + kwargs = { + 'url': self.url + } + + return kwargs + + +class VSphere_5_5_NodeDriver(VSphereNodeDriver): + name = 'VMware vSphere v5.5' diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 87b3f321d0..5c19f61e50 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -147,6 +147,8 @@ ('libcloud.compute.drivers.ec2', 'OutscaleSASNodeDriver'), Provider.OUTSCALE_INC: ('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'), + Provider.VSPHERE: + ('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 602d8e849b..e2961726db 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -121,6 +121,7 @@ class Provider(object): IKOULA = 'ikoula' OUTSCALE_SAS = 'outscale_sas' OUTSCALE_INC = 'outscale_inc' + VSPHERE = 'vsphere' # OpenStack based providers HPCLOUD = 'hpcloud' diff --git a/libcloud/utils/decorators.py b/libcloud/utils/decorators.py new file mode 100644 index 0000000000..3c395118de --- /dev/null +++ b/libcloud/utils/decorators.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from functools import wraps + +from libcloud.common.types import LibcloudError + +__all__ = [ + 'wrap_non_libcloud_exceptions' +] + + +def wrap_non_libcloud_exceptions(func): + """ + Decorators function which catches non LibcloudError exceptions, wraps them + in LibcloudError class and re-throws the wrapped exception. + + Note: This function should only be used to wrap methods on the driver + classes. + """ + @wraps(func) + def decorated_function(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception: + e = sys.exc_info()[1] + + if isinstance(e, LibcloudError): + raise e + + if len(args) >= 1: + driver = args[0] + else: + driver = None + + fault = getattr(e, 'fault', None) + + if fault and getattr(fault, 'string', None): + message = fault.string + else: + message = e.message + + raise LibcloudError(value=message, driver=driver) + return decorated_function From 4e741049ac418c7d6a32facf7c8d5439149fc97e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 29 Jul 2014 16:27:16 +0200 Subject: [PATCH 122/315] Add ex_get_resource_pool_name and ex_get_resource_pools method to the vSphere driver. --- libcloud/compute/drivers/vsphere.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index aa9ddd515d..29e863e038 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -240,6 +240,26 @@ def ex_suspend_node(self, node): return True + @wrap_non_libcloud_exceptions + def ex_get_resource_pools(self): + """ + Return all the available resource pools. + + :rtype: ``dict`` + """ + result = self.connection.client.get_resource_pools() + return result + + @wrap_non_libcloud_exceptions + def ex_get_resource_pool_name(self, node): + """ + Retrieve resource pool name for the provided node. + + :rtype: ``str`` + """ + vm = self._get_vm_for_node(node=node) + return vm.get_resource_pool_name() + @wrap_non_libcloud_exceptions def ex_get_node_by_path(self, path): """ From 9af583c98cfe1de4c1fb89dc520a4558440e0f5b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 29 Jul 2014 19:57:32 +0200 Subject: [PATCH 123/315] Update vSphere driver to use VM UUID as Node id instead of using VM's path. --- libcloud/compute/drivers/vsphere.py | 77 +++++++++++++++++++++++++---- 1 file changed, 68 insertions(+), 9 deletions(-) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index 29e863e038..9db15cb089 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -34,7 +34,9 @@ from pysphere import VIServer from pysphere.vi_task import VITask +from pysphere.vi_mor import VIMor, MORTypes from pysphere.resources import VimService_services as VI +from pysphere.vi_virtual_machine import VIVirtualMachine from libcloud.utils.decorators import wrap_non_libcloud_exceptions from libcloud.common.base import ConnectionUserAndKey @@ -268,7 +270,18 @@ def ex_get_node_by_path(self, path): :type path: ``str`` :rtype: :class:`Node` """ - node = self._to_node(vm_path=path) + vm = self.connection.client.get_vm_by_path(path) + node = self._to_node(vm=vm) + return node + + def ex_get_node_by_uuid(self, uuid): + """ + Retrieve Node object for a VM with a provided uuid. + + :type uuid: ``str`` + """ + vm = self._get_vm_for_uuid(uuid=uuid) + node = self._to_node(vm=vm) return node @wrap_non_libcloud_exceptions @@ -289,21 +302,64 @@ def ex_get_api_version(self): """ return self.connection.client.get_api_version() + def _get_vm_for_uuid(self, uuid, datacenter=None): + """ + Retrieve VM for the provided UUID. + + :type uuid: ``str`` + """ + server = self.connection.client + + dc_list = [] + if datacenter and VIMor.is_mor(datacenter): + dc_list.append(datacenter) + else: + dc = server.get_datacenters() + if datacenter: + dc_list = [k for k, v in dc.iteritems() if v == datacenter] + else: + dc_list = list(dc.iterkeys()) + + for mor_dc in dc_list: + request = VI.FindByUuidRequestMsg() + search_index = server._do_service_content.SearchIndex + mor_search_index = request.new__this(search_index) + mor_search_index.set_attribute_type(MORTypes.SearchIndex) + request.set_element__this(mor_search_index) + + mor_datacenter = request.new_datacenter(mor_dc) + mor_datacenter.set_attribute_type(MORTypes.Datacenter) + request.set_element_datacenter(mor_datacenter) + + request.set_element_vmSearch(True) + request.set_element_uuid(uuid) + + try: + vm = server._proxy.FindByUuid(request)._returnval + except VI.ZSI.FaultException: + pass + else: + if vm: + return VIVirtualMachine(server, vm) + + return None + def _to_nodes(self, vm_paths): nodes = [] for vm_path in vm_paths: - node = self._to_node(vm_path=vm_path) + vm = self.connection.client.get_vm_by_path(vm_path) + node = self._to_node(vm=vm) nodes.append(node) return nodes - def _to_node(self, vm_path): - vm = self.connection.client.get_vm_by_path(vm_path) + def _to_node(self, vm): + assert(isinstance(vm, VIVirtualMachine)) properties = vm.get_properties() status = vm.get_status() - id = properties['path'] + id = vm.properties.config.uuid name = properties['name'] public_ips = [] private_ips = [] @@ -311,14 +367,18 @@ def _to_node(self, vm_path): state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN) ip_address = properties.get('ip_address', None) net = properties.get('net', []) + resource_pool_id = vm.properties.resourcePool._obj extra = { + 'uuid': id, 'path': properties['path'], 'hostname': properties.get('hostname', None), 'guest_id': properties['guest_id'], 'devices': properties.get('devices', {}), 'disks': properties.get('disks', []), - 'net': net + 'net': net, + + 'resource_pool_id': resource_pool_id } # Add primary IP @@ -352,9 +412,8 @@ def _to_node(self, vm_path): return node def _get_vm_for_node(self, node): - vm_path = node.id - vm = self.connection.client.get_vm_by_path(vm_path) - + uuid = node.id + vm = self._get_vm_for_uuid(uuid=uuid) return vm def _ex_connection_class_kwargs(self): From ff05bdbaecf6e7fc129b55dcebf6dd93e70ebcd0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 29 Jul 2014 22:52:40 +0200 Subject: [PATCH 124/315] Include more attributes in the extra dict of vSphere Node object. --- libcloud/compute/drivers/vsphere.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index 9db15cb089..31086e5707 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -359,7 +359,10 @@ def _to_node(self, vm): properties = vm.get_properties() status = vm.get_status() - id = vm.properties.config.uuid + uuid = vm.properties.config.uuid + instance_uuid = vm.properties.config.instanceUuid + + id = uuid name = properties['name'] public_ips = [] private_ips = [] @@ -369,16 +372,27 @@ def _to_node(self, vm): net = properties.get('net', []) resource_pool_id = vm.properties.resourcePool._obj + try: + operating_system = vm.properties.summary.guest.guestFullName, + except Exception: + operating_system = 'unknown' + extra = { - 'uuid': id, + 'uuid': uuid, + 'instance_uuid': instance_uuid, 'path': properties['path'], + 'resource_pool_id': resource_pool_id, 'hostname': properties.get('hostname', None), 'guest_id': properties['guest_id'], 'devices': properties.get('devices', {}), 'disks': properties.get('disks', []), 'net': net, - 'resource_pool_id': resource_pool_id + 'overall_status': vm.properties.overallStatus, + 'operating_system': operating_system, + + 'cpus': vm.properties.config.hardware.numCPU, + 'memory_mb': vm.properties.config.hardware.memoryMB } # Add primary IP From eec3cc2b0edf93e547739534869f4f4c23244e4e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 30 Jul 2014 23:16:11 +0200 Subject: [PATCH 125/315] Fix an issue with LIBCLOUD_DEBUG not working correctly with the Linode driver. Closes #342 --- CHANGES.rst | 5 +++++ libcloud/common/linode.py | 12 ++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 5282f0b703..6de450b376 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -7,6 +7,11 @@ Changes with Apache Libcloud in development Compute ~~~~~~~ +- Fix an issue with ``LIBCLOUD_DEBUG`` not working correctly with the + Linode driver. + [Tomaz Muraus, Juan Carlos Moreno] + (LIBCLOUD-598, GITHUB-342) + - Add new driver for VMware vSphere (http://www.vmware.com/products/vsphere/) based clouds. [Tomaz Muraus] diff --git a/libcloud/common/linode.py b/libcloud/common/linode.py index f7ee22bdd2..6ee9cc6c8d 100644 --- a/libcloud/common/linode.py +++ b/libcloud/common/linode.py @@ -87,8 +87,16 @@ def __init__(self, response, connection): self.error = response.reason self.status = response.status - self.body = self._decompress_response(body=response.read(), - headers=self.headers) + # This attribute is set when using LoggingConnection. + original_data = getattr(response, '_original_data', None) + + if original_data: + # LoggingConnection already decompresses data so it can log it + # which means we don't need to decompress it here. + self.body = response._original_data + else: + self.body = self._decompress_response(body=response.read(), + headers=self.headers) if PY3: self.body = b(self.body).decode('utf-8') From abdde154eb981e70d96219a7c1f1b70da8b64464 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 00:05:24 +0200 Subject: [PATCH 126/315] docs: Update committer guide. --- docs/committer_guide.rst | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/docs/committer_guide.rst b/docs/committer_guide.rst index 5f49c56d34..81bd8156d3 100644 --- a/docs/committer_guide.rst +++ b/docs/committer_guide.rst @@ -86,7 +86,16 @@ preparing a release. * Remove the ``tox`` directory with ``rm -rf .tox`` * Remove the _secrets_ file with ``rm test/secrets.py`` -2. Creating release artifacts +2. Update JIRA +~~~~~~~~~~~~~~ + +* Create a new JIRA version for the release in question (if one doesn't exist + yet) +* Close all the corresponding JIRA tickets and set ``Fix Version/s`` field + to the current version +* Release the JIRA version + +3. Creating release artifacts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We have a script that runs the required setup.py commands and then hashes @@ -107,7 +116,7 @@ are suitable to be uploaded for a release. Copy the artifacts in another directory, unpack one of them and test it with ``tox``. -3. Tagging a release +4. Tagging a release ~~~~~~~~~~~~~~~~~~~~ Tag the tentative release with a ``-tentative`` postfix. @@ -121,8 +130,8 @@ For example: .. sourcecode:: bash git tag v0.15.0-tentative 105b9610835f99704996d861d613c5a9a8b3f8b1 - -4. Upload the release artifacts and start a [VOTE] thread + +5. Upload the release artifacts and start a [VOTE] thread ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Upload all release artifacts including the ``whl`` files to your people.apache.org @@ -131,7 +140,7 @@ space. Then start a [VOTE] thread on the dev@libcloud.apache.org mailing list. Once the vote has passed tag the release with a new tag, removing the ``-tentative`` postfix. Upload the release artifacts to Apache servers and Pypi. -5. Uploading release artifacts to Apache servers +6. Uploading release artifacts to Apache servers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Add release artifacts to the dist SVN repository at @@ -146,7 +155,7 @@ Upload the release artifacts to Apache servers and Pypi. are automatically archived and available at https://dist.apache.org/repos/dist/release/libcloud/. -6. Publishing package to PyPi +7. Publishing package to PyPi ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **For consistency and security reasons packages are always uploaded to PyPi @@ -172,7 +181,7 @@ screenshot below. :width: 700px :align: center -7. Verifying the release artifact check sums +8. Verifying the release artifact check sums ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To verify that nothing went wrong doing the release process, run the @@ -193,14 +202,14 @@ For example ./dist/verify_checksums.sh apache-libcloud-0.13.2 -8. Updating doap_libcloud.rdf file +9. Updating doap_libcloud.rdf file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Add information about the new release to the ``doap_libcloud.rdf`` file in the root of the main code repository. -9. Updating website -~~~~~~~~~~~~~~~~~~~ +10. Updating website +~~~~~~~~~~~~~~~~~~~~ Check out the website using SVN: ``svn co https://svn.apache.org/repos/asf/libcloud/site/trunk`` @@ -210,7 +219,7 @@ Check out the website using SVN: ``svn co https://svn.apache.org/repos/asf/libcl Build the site locally and make sure everything is correct. Check the ``README.md`` file. -10. Sending announcements +11. Sending announcements ~~~~~~~~~~~~~~~~~~~~~~~~~ * Send a release announcement to {dev,users}@libcloud.apache.org. If it's a From 106846bf67ce8ccb04befdeaf90408b425852c18 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 00:29:44 +0200 Subject: [PATCH 127/315] Add ex_clone_node method to vSphere driver. --- libcloud/compute/drivers/vsphere.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index 31086e5707..da40ba90ab 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -174,6 +174,33 @@ def list_nodes(self): return nodes + @wrap_non_libcloud_exceptions + def ex_clone_node(self, node, name, power_on=True, template=False): + """ + Clone the provided node. + + :param node: Node to clone. + :type node: :class:`Node` + + :param name: Name of the new node. + :type name: ``str`` + + :param power_on: Power the new node on after being created. + :type power_on: ``bool`` + + :param template: Specifies whether or not the new virtual machine + should be marked as a template. + :type template: ``bool`` + + :return: New node. + :rtype: :class:`Node` + """ + vm = self._get_vm_for_node(node=node) + new_vm = vm.clone(name=name, power_on=power_on, template=template) + new_node = self._to_node(vm=new_vm) + + return new_node + @wrap_non_libcloud_exceptions def reboot_node(self, node): vm = self._get_vm_for_node(node=node) From a8346b056ca8930f681880204775fc486be6eca0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 12:58:05 +0200 Subject: [PATCH 128/315] Add ex_migrate_node method to the vSphere driver. --- libcloud/compute/drivers/vsphere.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index da40ba90ab..abf449f1d9 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -174,6 +174,7 @@ def list_nodes(self): return nodes + @wrap_non_libcloud_exceptions @wrap_non_libcloud_exceptions def ex_clone_node(self, node, name, power_on=True, template=False): """ @@ -201,6 +202,34 @@ def ex_clone_node(self, node, name, power_on=True, template=False): return new_node + @wrap_non_libcloud_exceptions + def ex_migrate_node(self, node, resource_pool=None, host=None, + priority='default'): + """ + Migrate provided node to a new host or resource pool. + + :param node: Node to clone. + :type node: :class:`Node` + + :param resource_pool: ID of the target resource pool to migrate the + node into. + :type resource_pool: ``str`` + + :param host: Target host to migrate the host to. + :type host: ``str`` + + :param priority: Migration task priority. Possible values: default, + high, low. + :type priority: ``str`` + + :return: True on success. + :rtype: ``bool`` + """ + vm = self._get_vm_for_node(node=node) + vm.migrate(priority=priority, resource_pool=resource_pool, host=host) + + return True + @wrap_non_libcloud_exceptions def reboot_node(self, node): vm = self._get_vm_for_node(node=node) From 52e01d5ba72dc8786310dc00eb16e57bd1fdeed5 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 13:01:17 +0200 Subject: [PATCH 129/315] Update docstrings. --- libcloud/compute/drivers/vsphere.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index abf449f1d9..65079596f8 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -181,7 +181,7 @@ def ex_clone_node(self, node, name, power_on=True, template=False): Clone the provided node. :param node: Node to clone. - :type node: :class:`Node` + :type node: :class:`libcloud.compute.base.Node` :param name: Name of the new node. :type name: ``str`` @@ -194,7 +194,7 @@ def ex_clone_node(self, node, name, power_on=True, template=False): :type template: ``bool`` :return: New node. - :rtype: :class:`Node` + :rtype: :class:`libcloud.compute.base.Node` """ vm = self._get_vm_for_node(node=node) new_vm = vm.clone(name=name, power_on=power_on, template=template) @@ -209,7 +209,7 @@ def ex_migrate_node(self, node, resource_pool=None, host=None, Migrate provided node to a new host or resource pool. :param node: Node to clone. - :type node: :class:`Node` + :type node: :class:`libcloud.compute.base.Node` :param resource_pool: ID of the target resource pool to migrate the node into. @@ -324,7 +324,7 @@ def ex_get_node_by_path(self, path): Retrieve Node object for a VM with a provided path. :type path: ``str`` - :rtype: :class:`Node` + :rtype: :class:`libcloud.compute.base.Node` """ vm = self.connection.client.get_vm_by_path(path) node = self._to_node(vm=vm) From 081f26543d4ba60c206cfe2be713ccd68d752d16 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 13:01:26 +0200 Subject: [PATCH 130/315] Update vSphere docs. --- docs/compute/drivers/vsphere.rst | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/docs/compute/drivers/vsphere.rst b/docs/compute/drivers/vsphere.rst index 9f423166db..56013c88d4 100644 --- a/docs/compute/drivers/vsphere.rst +++ b/docs/compute/drivers/vsphere.rst @@ -72,5 +72,15 @@ You can test if the url you are using is valid by adding ``/vimService.wsdl`` to it (e.g. ``https:///sdk/vimService.wsdl``). When you visit this page, you should get an XML response back. +API Docs +-------- + +VMware vSphere v5.5 +~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: libcloud.compute.drivers.vsphere.VSphere_5_5_NodeDriver + :members: + :inherited-members: + .. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ .. _`pysphere`: https://pypi.python.org/pypi/pysphere From 9f0178984209b573b3c7281384c1467fd9354a5f Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 13:09:12 +0200 Subject: [PATCH 131/315] Update tox docs target dependencies. --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 7dee77e58e..0e13e6116e 100644 --- a/tox.ini +++ b/tox.ini @@ -46,6 +46,7 @@ deps = mock [testenv:docs] deps = sphinx + pysphere basepython = python2.7 changedir = docs commands = python ../contrib/generate_provider_feature_matrix_table.py From a0150609d13f13a14bb3a5bfd029e581e6a0ae31 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 13:24:35 +0200 Subject: [PATCH 132/315] Add more extension methods to the vSphere driver. --- libcloud/compute/drivers/vsphere.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index 65079596f8..c862b8a311 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -318,6 +318,26 @@ def ex_get_resource_pool_name(self, node): vm = self._get_vm_for_node(node=node) return vm.get_resource_pool_name() + @wrap_non_libcloud_exceptions + def ex_get_hosts(self): + """ + Retrurn all the available hosts. + + :rtype: ``dict`` + """ + result = self.connection.client.get_hosts() + return result + + @wrap_non_libcloud_exceptions + def ex_get_datastores(self): + """ + Return all the available datastores. + + :rtype: ``dict`` + """ + result = self.connection.client.get_datastores() + return result + @wrap_non_libcloud_exceptions def ex_get_node_by_path(self, path): """ From 48f0eebc5cd01399937ea439677d252ba20a9f97 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 31 Jul 2014 13:36:35 +0200 Subject: [PATCH 133/315] Add list_images method to the vSphere driver. --- libcloud/compute/drivers/vsphere.py | 34 +++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index c862b8a311..ffa84f6833 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -44,6 +44,7 @@ from libcloud.common.types import InvalidCredsError from libcloud.compute.base import NodeDriver from libcloud.compute.base import NodeLocation +from libcloud.compute.base import NodeImage from libcloud.compute.base import Node from libcloud.compute.types import NodeState, Provider from libcloud.utils.networking import is_public_subnet @@ -167,6 +168,39 @@ def list_locations(self): return locations + @wrap_non_libcloud_exceptions + def list_images(self): + """ + List available images (templates). + """ + server = self.connection.client + + names = ['name', 'config.uuid', 'config.template'] + properties = server._retrieve_properties_traversal( + property_names=names, + from_node=None, + obj_type=MORTypes.VirtualMachine) + + images = [] + for prop in properties: + id = None + name = None + is_template = False + + for item in prop.PropSet: + if item.Name == 'config.uuid': + id = item.Val + if item.Name == 'name': + name = item.Val + elif item.Name == 'config.template': + is_template = item.Val + + if is_template: + image = NodeImage(id=id, name=name, driver=self) + images.append(image) + + return images + @wrap_non_libcloud_exceptions def list_nodes(self): vm_paths = self.connection.client.get_registered_vms() From 63b8045b0fd7ad7bfe0569b7344eee846c262758 Mon Sep 17 00:00:00 2001 From: Carlos Valiente Date: Sat, 2 Aug 2014 15:23:49 +0100 Subject: [PATCH 134/315] Fix typo Closes #344 Signed-off-by: Tomaz Muraus --- docs/testing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/testing.rst b/docs/testing.rst index 3cef350818..f8a18854f2 100644 --- a/docs/testing.rst +++ b/docs/testing.rst @@ -19,7 +19,7 @@ dependencies installed: use tox to run the tests with all the supported Python versions * ``mock`` (``pip install mock``) * ``lockfile`` (``pip install lockfile``) - only used in the local storage - storage driver + driver * ``coverage`` (``pip install coverage``) - you only need this library if you want to generate a test coverage report From 7132cb2234f4e90664e2808f6ddd16fa23bcdb8c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 4 Aug 2014 18:56:40 +0200 Subject: [PATCH 135/315] Add utility get_regions and get_service_names function to the OpenStackServiceCatalog class. --- CHANGES.rst | 4 +++ libcloud/common/openstack.py | 44 +++++++++++++++++++++++++ libcloud/test/compute/test_openstack.py | 32 ++++++++++++++---- 3 files changed, 74 insertions(+), 6 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 6de450b376..92980a83f2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -33,6 +33,10 @@ Compute (GITHUB-339) [Eric Johnson] +- Add utility ``get_regions`` and ``get_service_names`` methods to the + ``OpenStackServiceCatalog`` class. + [Andrew Mann, Tomaz Muraus] + Loadbalancer ~~~~~~~~~~~~ diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 26bf6920b5..73bd27e082 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -378,6 +378,50 @@ def get_endpoint(self, service_type=None, name=None, region=None): else: return {} + def get_regions(self): + """ + Retrieve a list of all the available regions. + + :rtype: ``list`` of ``str`` + """ + regions = set() + + catalog_items = self._service_catalog.items() + + if '2.0' in self._auth_version: + for service_type, services_by_name in catalog_items: + items = services_by_name.items() + for service_name, endpoints_by_region in items: + for region in endpoints_by_region.keys(): + if region: + regions.add(region) + elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): + for service_name, endpoints_by_region in catalog_items: + for region in endpoints_by_region.keys(): + if region: + regions.add(region) + + return list(regions) + + def get_service_names(self, service_type, region=None): + """ + Retrieve list of service names that match service type and region + + :rtype: ``list`` of ``str`` + """ + names = set() + + if '2.0' in self._auth_version: + named_entries = self._service_catalog.get(service_type, {}) + for (name, region_entries) in named_entries.items(): + # Support None for region to return the first found + if region is None or region in region_entries.keys(): + names.add(name) + else: + raise ValueError('Unsupported version: %s' % (self._auth_version)) + + return list(names) + def _parse_auth_v1(self, service_catalog): for service, endpoints in service_catalog.items(): diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 6f5af3c3cd..e0971282d4 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -92,26 +92,46 @@ def setUp(self): OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, OpenStackMockHttp) - def test_connection_get_service_catalog(self): connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) connection.auth_url = "https://auth.api.example.com" connection._ex_force_base_url = "https://www.foo.com" connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) - result = connection.get_service_catalog() - catalog = result.get_catalog() - endpoints = result.get_endpoints('cloudFilesCDN', 'cloudFilesCDN') - public_urls = result.get_public_urls('cloudFilesCDN', 'cloudFilesCDN') + self.service_catalog = connection.get_service_catalog() + self.catalog = self.service_catalog.get_catalog() + + def test_connection_get_service_catalog(self): + endpoints = self.service_catalog.get_endpoints('cloudFilesCDN', 'cloudFilesCDN') + public_urls = self.service_catalog.get_public_urls('cloudFilesCDN', 'cloudFilesCDN') expected_urls = [ 'https://cdn2.clouddrive.com/v1/MossoCloudFS', 'https://cdn2.clouddrive.com/v1/MossoCloudFS' ] - self.assertTrue('cloudFilesCDN' in catalog) + self.assertTrue('cloudFilesCDN' in self.catalog) self.assertEqual(len(endpoints), 2) self.assertEqual(public_urls, expected_urls) + def test_get_regions(self): + regions = self.service_catalog.get_regions() + self.assertEqual(regions, ['ORD', 'LON']) + + def test_get_service_names(self): + OpenStackBaseConnection.conn_classes = (OpenStack_2_0_MockHttp, + OpenStack_2_0_MockHttp) + OpenStackBaseConnection._auth_version = '2.0' + + connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) + connection.auth_url = "https://auth.api.example.com" + connection._ex_force_base_url = "https://www.foo.com" + connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) + + service_catalog = connection.get_service_catalog() + + service_names = service_catalog.get_service_names(service_type='object-store') + self.assertEqual(service_names, ['cloudFiles']) + class OpenStackAuthConnectionTests(unittest.TestCase): # TODO refactor and move into libcloud/test/common From d7917ef5a2d27ce459cd797c37505eae0b387b4c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 5 Aug 2014 12:20:23 +0200 Subject: [PATCH 136/315] Moved shared OpenStackException and OpenStackResponse class from libcloud.compute.drivers.openstack to libcloud.common.openstack module. --- libcloud/common/openstack.py | 82 +++++++++++++++++++++++++ libcloud/compute/drivers/openstack.py | 86 +++------------------------ 2 files changed, 89 insertions(+), 79 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 73bd27e082..768f88b7e1 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -16,15 +16,23 @@ """ Common utilities for OpenStack """ + import sys import datetime +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + from libcloud.utils.py3 import httplib from libcloud.utils.iso8601 import parse_date from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.common.types import ProviderError from libcloud.compute.types import (LibcloudError, InvalidCredsError, MalformedResponseError) +from libcloud.compute.types import KeyPairDoesNotExistError try: import simplejson as json @@ -51,6 +59,8 @@ 'OpenStackBaseConnection', 'OpenStackAuthConnection', 'OpenStackServiceCatalog', + 'OpenStackResponse', + 'OpenStackException', 'OpenStackDriverMixin', 'AUTH_TOKEN_EXPIRES_GRACE_SECONDS' @@ -658,6 +668,78 @@ def _populate_hosts_and_request_paths(self): self._set_up_connection_info(url=url) +class OpenStackException(ProviderError): + pass + + +class OpenStackResponse(Response): + node_driver = None + + def success(self): + i = int(self.status) + return i >= 200 and i <= 299 + + def has_content_type(self, content_type): + content_type_value = self.headers.get('content-type') or '' + content_type_value = content_type_value.lower() + return content_type_value.find(content_type.lower()) > -1 + + def parse_body(self): + if self.status == httplib.NO_CONTENT or not self.body: + return None + + if self.has_content_type('application/xml'): + try: + return ET.XML(self.body) + except: + raise MalformedResponseError( + 'Failed to parse XML', + body=self.body, + driver=self.node_driver) + + elif self.has_content_type('application/json'): + try: + return json.loads(self.body) + except: + raise MalformedResponseError( + 'Failed to parse JSON', + body=self.body, + driver=self.node_driver) + else: + return self.body + + def parse_error(self): + text = None + body = self.parse_body() + + if self.has_content_type('application/xml'): + text = '; '.join([err.text or '' for err in body.getiterator() + if err.text]) + elif self.has_content_type('application/json'): + values = list(body.values()) + + context = self.connection.context + driver = self.connection.driver + key_pair_name = context.get('key_pair_name', None) + + if len(values) > 0 and values[0]['code'] == 404 and key_pair_name: + raise KeyPairDoesNotExistError(name=key_pair_name, + driver=driver) + elif len(values) > 0 and 'message' in values[0]: + text = ';'.join([fault_data['message'] for fault_data + in values]) + else: + text = body + else: + # while we hope a response is always one of xml or json, we have + # seen html or text in the past, its not clear we can really do + # something to make it more readable here, so we will just pass + # it along as the whole response body in the text variable. + text = body + + return '%s %s %s' % (self.status, self.error, text) + + class OpenStackDriverMixin(object): def __init__(self, *args, **kwargs): diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index c09cee6e0c..edad98a31b 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -21,6 +21,11 @@ except ImportError: import json +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + import warnings import base64 @@ -29,23 +34,18 @@ from libcloud.utils.py3 import next from libcloud.utils.py3 import urlparse -try: - from lxml import etree as ET -except ImportError: - from xml.etree import ElementTree as ET from libcloud.common.openstack import OpenStackBaseConnection from libcloud.common.openstack import OpenStackDriverMixin -from libcloud.common.types import MalformedResponseError, ProviderError +from libcloud.common.openstack import OpenStackException +from libcloud.common.openstack import OpenStackResponse from libcloud.utils.networking import is_private_subnet from libcloud.compute.base import NodeSize, NodeImage from libcloud.compute.base import (NodeDriver, Node, NodeLocation, StorageVolume, VolumeSnapshot) from libcloud.compute.base import KeyPair from libcloud.compute.types import NodeState, Provider -from libcloud.compute.types import KeyPairDoesNotExistError from libcloud.pricing import get_size_price -from libcloud.common.base import Response from libcloud.utils.xml import findall __all__ = [ @@ -67,78 +67,6 @@ DEFAULT_API_VERSION = '1.1' -class OpenStackException(ProviderError): - pass - - -class OpenStackResponse(Response): - node_driver = None - - def success(self): - i = int(self.status) - return i >= 200 and i <= 299 - - def has_content_type(self, content_type): - content_type_value = self.headers.get('content-type') or '' - content_type_value = content_type_value.lower() - return content_type_value.find(content_type.lower()) > -1 - - def parse_body(self): - if self.status == httplib.NO_CONTENT or not self.body: - return None - - if self.has_content_type('application/xml'): - try: - return ET.XML(self.body) - except: - raise MalformedResponseError( - 'Failed to parse XML', - body=self.body, - driver=self.node_driver) - - elif self.has_content_type('application/json'): - try: - return json.loads(self.body) - except: - raise MalformedResponseError( - 'Failed to parse JSON', - body=self.body, - driver=self.node_driver) - else: - return self.body - - def parse_error(self): - text = None - body = self.parse_body() - - if self.has_content_type('application/xml'): - text = '; '.join([err.text or '' for err in body.getiterator() - if err.text]) - elif self.has_content_type('application/json'): - values = list(body.values()) - - context = self.connection.context - driver = self.connection.driver - key_pair_name = context.get('key_pair_name', None) - - if len(values) > 0 and values[0]['code'] == 404 and key_pair_name: - raise KeyPairDoesNotExistError(name=key_pair_name, - driver=driver) - elif len(values) > 0 and 'message' in values[0]: - text = ';'.join([fault_data['message'] for fault_data - in values]) - else: - text = body - else: - # while we hope a response is always one of xml or json, we have - # seen html or text in the past, its not clear we can really do - # something to make it more readable here, so we will just pass - # it along as the whole response body in the text variable. - text = body - - return '%s %s %s' % (self.status, self.error, text) - - class OpenStackComputeConnection(OpenStackBaseConnection): # default config for http://devstack.org/ service_type = 'compute' From a2694af1a52b5377bebeeaf378c3b9bfbb05a21f Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 5 Aug 2014 22:40:24 +0200 Subject: [PATCH 137/315] [LIBCLOUD-577] Add an example to OpenStack driver documentation which shows how to specify a tenant name. --- docs/compute/drivers/openstack.rst | 14 ++++++++++++-- docs/examples/compute/openstack/tenant_name.py | 9 +++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) create mode 100644 docs/examples/compute/openstack/tenant_name.py diff --git a/docs/compute/drivers/openstack.rst b/docs/compute/drivers/openstack.rst index 0d46fb79f0..ecf57d8c63 100644 --- a/docs/compute/drivers/openstack.rst +++ b/docs/compute/drivers/openstack.rst @@ -48,6 +48,7 @@ Available arguments: Unless you are working with a very old version of OpenStack you will either want to use ``2.0_apikey`` or ``2.0_password``. +* ``ex_tenant_name`` - tenant / project name * ``ex_force_auth_token`` - token which is used for authentication. If this argument is provided, normal authentication flow is skipped and the OpenStack API endpoint is directly hit with the provided token. @@ -104,8 +105,17 @@ token before the currently used one is about to expire. .. literalinclude:: /examples/compute/openstack/force_auth_token.py :language: python -5. HP Cloud (www.hpcloud.com) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +5. Connecting and specifying a tenant +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This example shows how to connect to OpenStack installation which requires you +to specify a tenant (``ex_tenant_name`` argument). + +.. literalinclude:: /examples/compute/openstack/tenant_name.py + :language: python + +6. HP Cloud (www.hpcloud.com) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Connecting to HP Cloud US West and US East (OpenStack Havana). diff --git a/docs/examples/compute/openstack/tenant_name.py b/docs/examples/compute/openstack/tenant_name.py new file mode 100644 index 0000000000..bc5d7f4c91 --- /dev/null +++ b/docs/examples/compute/openstack/tenant_name.py @@ -0,0 +1,9 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + + +OpenStack = get_driver(Provider.OPENSTACK) +driver = OpenStack('your_auth_username', 'your_auth_password', + ex_tenant_name='mytenant', + ex_force_auth_url='http://192.168.1.101:5000', + ex_force_auth_version='2.0_password') From aa0879d93866754a734cafb4725df63258ecf65c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 5 Aug 2014 22:55:40 +0200 Subject: [PATCH 138/315] Sort the regions for a consistent result. --- libcloud/test/compute/test_openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index e0971282d4..06e79ba57c 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -115,7 +115,7 @@ def test_connection_get_service_catalog(self): def test_get_regions(self): regions = self.service_catalog.get_regions() - self.assertEqual(regions, ['ORD', 'LON']) + self.assertEqual(sorted(regions), ['LON', 'ORD']) def test_get_service_names(self): OpenStackBaseConnection.conn_classes = (OpenStack_2_0_MockHttp, From e63adedf059a098959fb81def0a84ec648f9ca37 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 5 Aug 2014 22:55:52 +0200 Subject: [PATCH 139/315] Re-generate supported methods file. --- docs/compute/_supported_methods_image_management.rst | 2 +- docs/compute/_supported_methods_main.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 03d104b580..dcbc1d0863 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -62,7 +62,7 @@ Provider list images get image create image delete `vCloud`_ yes no no no no `Voxel VoxCLOUD`_ yes no no no no `vps.net`_ yes no no no no -`VMware vSphere`_ no no no no no +`VMware vSphere`_ yes no no no no ===================================== =========== ========= ============ ============ ========== .. _`Abiquo`: http://www.abiquo.com/ diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index 216e1bb9fb..fbf8feb223 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -62,7 +62,7 @@ Provider list nodes create node reboot node destroy `vCloud`_ yes yes yes yes yes yes yes `Voxel VoxCLOUD`_ yes yes yes yes yes yes no `vps.net`_ yes yes yes yes yes yes no -`VMware vSphere`_ yes no yes yes no no no +`VMware vSphere`_ yes no yes yes yes no no ===================================== ========== =========== =========== ============ =========== ========== =========== .. _`Abiquo`: http://www.abiquo.com/ From ff191dfee89d2786a7c1e5d9a3c3b380263b01b1 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 6 Aug 2014 13:22:18 +0200 Subject: [PATCH 140/315] Add utility join_ipv4_segments and increment_ipv4_segments functions to libcloud.utils.networking module. Patch by Andrew Mann, Tomaz Muraus. --- libcloud/test/test_utils.py | 25 +++++++++++++++++++ libcloud/utils/networking.py | 47 +++++++++++++++++++++++++++++++++++- 2 files changed, 71 insertions(+), 1 deletion(-) diff --git a/libcloud/test/test_utils.py b/libcloud/test/test_utils.py index 58b350662e..4c1b6c0524 100644 --- a/libcloud/test/test_utils.py +++ b/libcloud/test/test_utils.py @@ -42,6 +42,8 @@ from libcloud.utils.networking import is_public_subnet from libcloud.utils.networking import is_private_subnet from libcloud.utils.networking import is_valid_ip_address +from libcloud.utils.networking import join_ipv4_segments +from libcloud.utils.networking import increment_ipv4_segments from libcloud.storage.drivers.dummy import DummyIterator @@ -355,6 +357,29 @@ def test_is_valid_ip_address(self): family=socket.AF_INET6) self.assertFalse(status) + def test_join_ipv4_segments(self): + values = [ + (('127', '0', '0', '1'), '127.0.0.1'), + (('255', '255', '255', '0'), '255.255.255.0'), + ] + + for segments, joined_ip in values: + result = join_ipv4_segments(segments=segments) + self.assertEqual(result, joined_ip) + + def test_increment_ipv4_segments(self): + values = [ + (('127', '0', '0', '1'), '127.0.0.2'), + (('255', '255', '255', '0'), '255.255.255.1'), + (('254', '255', '255', '255'), '255.0.0.0'), + (('100', '1', '0', '255'), '100.1.1.0'), + ] + + for segments, incremented_ip in values: + result = increment_ipv4_segments(segments=segments) + result = join_ipv4_segments(segments=result) + self.assertEqual(result, incremented_ip) + if __name__ == '__main__': sys.exit(unittest.main()) diff --git a/libcloud/utils/networking.py b/libcloud/utils/networking.py index f7dca9bbc5..d508d24dca 100644 --- a/libcloud/utils/networking.py +++ b/libcloud/utils/networking.py @@ -19,7 +19,9 @@ __all__ = [ 'is_private_subnet', 'is_public_subnet', - 'is_valid_ip_address' + 'is_valid_ip_address', + 'join_ipv4_segments', + 'increment_ipv4_segments' ] @@ -78,3 +80,46 @@ def is_valid_ip_address(address, family=socket.AF_INET): return False return True + + +def join_ipv4_segments(segments): + """ + Helper method to join ip numeric segment pieces back into a full + ip address. + + :param segments: IPv4 segments to join. + :type segments: ``list`` or ``tuple`` + + :return: IPv4 address. + :rtype: ``str`` + """ + return '.'.join([str(s) for s in segments]) + + +def increment_ipv4_segments(segments): + """ + Increment an ip address given in quad segments based on ipv4 rules + + :param segments: IPv4 segments to increment. + :type segments: ``list`` or ``tuple`` + + :return: Incremented segments. + :rtype: ``list`` + """ + segments = [int(segment) for segment in segments] + + segments[3] += 1 + + if segments[3] == 256: + segments[3] = 0 + segments[2] += 1 + + if segments[2] == 256: + segments[2] = 0 + segments[1] += 1 + + if segments[1] == 256: + segments[1] = 0 + segments[0] += 1 + + return segments From 89d23bc9c76095327da2f354b2fc8700f3027ef1 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 6 Aug 2014 14:17:42 +0200 Subject: [PATCH 141/315] Make sure resource_pool_id is a string. --- libcloud/compute/drivers/vsphere.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index ffa84f6833..5a51007f78 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -480,7 +480,7 @@ def _to_node(self, vm): state = self.NODE_STATE_MAP.get(status, NodeState.UNKNOWN) ip_address = properties.get('ip_address', None) net = properties.get('net', []) - resource_pool_id = vm.properties.resourcePool._obj + resource_pool_id = str(vm.properties.resourcePool._obj) try: operating_system = vm.properties.summary.guest.guestFullName, From a9eebae822da02694fcf327e396f7d278e7f57e6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 6 Aug 2014 14:36:14 +0200 Subject: [PATCH 142/315] Remove unnecessary wrapper function. --- libcloud/common/openstack.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 768f88b7e1..ac5d5ac597 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -627,9 +627,6 @@ def morph_action_hook(self, action): self._populate_hosts_and_request_paths() return super(OpenStackBaseConnection, self).morph_action_hook(action) - def request(self, **kwargs): - return super(OpenStackBaseConnection, self).request(**kwargs) - def _set_up_connection_info(self, url): result = self._tuple_from_url(url) (self.host, self.port, self.secure, self.request_path) = result From 763236f36848ccefb4aa6e5b14b5582135112669 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 6 Aug 2014 16:25:07 +0200 Subject: [PATCH 143/315] Fix a bug in ex_get_console_output in the EC2 driver which would cause an exception to be thrown if there was no console output for a particular node. Reported by Chris DeRamus. --- CHANGES.rst | 7 +++++++ libcloud/compute/drivers/ec2.py | 7 ++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 92980a83f2..4ac10f0ec1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -37,6 +37,13 @@ Compute ``OpenStackServiceCatalog`` class. [Andrew Mann, Tomaz Muraus] +- Fix a bug in ``ex_get_console_output`` in the EC2 driver which would cause + an exception to be thrown if there was no console output for a particular + node. + + Reported by Chris DeRamus. + [Tomaz Muraus] + Loadbalancer ~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index c17f072a8c..af13a3d500 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -3733,7 +3733,12 @@ def ex_get_console_output(self, node): namespace=NAMESPACE) timestamp = parse_date(timestamp) - output = base64.b64decode(b(encoded_string)).decode('utf-8') + + if encoded_string: + output = base64.b64decode(b(encoded_string)).decode('utf-8') + else: + # No console output + output = None return {'instance_id': node.id, 'timestamp': timestamp, From b016fa224750fb70f2c0a18bffa4fc61ded96666 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 7 Aug 2014 11:22:56 +0200 Subject: [PATCH 144/315] Override request method in the OpenStackBaseConnection and send a default "Content-Type" header if "default_content_type" attribute is set. This way, all the connection classes which inherit from this class don't need to duplicate the logic and override request themselves. --- libcloud/common/openstack.py | 17 +++++++++++++++++ libcloud/compute/drivers/openstack.py | 15 --------------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index ac5d5ac597..2d3b9d27a8 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -575,6 +575,23 @@ def __init__(self, user_id, key, secure=True, timeout=self.timeout) self._osa = osa + def request(self, action, params=None, data='', headers=None, + method='GET', raw=False): + headers = headers or {} + params = params or {} + + # Include default content-type for POST and PUT request (if available) + default_content_type = getattr(self, 'default_content_type', None) + if method.upper() in ['POST', 'PUT'] and default_content_type: + headers = {'Content-Type': default_content_type} + + return super(OpenStackBaseConnection, self).request(action=action, + params=params, + data=data, + method=method, + headers=headers, + raw=raw) + def _get_auth_url(self): """ Retrieve auth url for this instance using either "ex_force_auth_url" diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index edad98a31b..93834dc2b0 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -73,21 +73,6 @@ class OpenStackComputeConnection(OpenStackBaseConnection): service_name = 'nova' service_region = 'RegionOne' - def request(self, action, params=None, data='', headers=None, - method='GET'): - if not headers: - headers = {} - if not params: - params = {} - - if method in ("POST", "PUT"): - headers = {'Content-Type': self.default_content_type} - - return super(OpenStackComputeConnection, self).request( - action=action, - params=params, data=data, - method=method, headers=headers) - class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin): """ From ebb8f098caf8087e6255856f4bd86fe6e9c70e42 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 7 Aug 2014 16:53:06 +0200 Subject: [PATCH 145/315] Add experimental and hacky support for authenticating against Keystone API v3 via OpenStack based drivers. Notes: 1. Domains are not supported yet 2. Projects are supported by specifying "ex_tenant_name" argument 3. Internal interface is subject to change and will change in the near future --- libcloud/common/openstack.py | 122 ++++++++++++++++++++++++++++++++++- 1 file changed, 119 insertions(+), 3 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 2d3b9d27a8..65b4306632 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -46,7 +46,8 @@ '1.1', '2.0', '2.0_apikey', - '2.0_password' + '2.0_password', + '3.x_password' ] # How many seconds to substract from the auth token expiration time before @@ -165,6 +166,8 @@ def authenticate(self, force=False): return self.authenticate_2_0_with_apikey() elif self.auth_version == "2.0_password": return self.authenticate_2_0_with_password() + elif self.auth_version == '3.x_password': + return self.authenticate_3_x_with_password() else: raise LibcloudError('Unsupported Auth Version requested') @@ -294,6 +297,81 @@ def authenticate_2_0_with_body(self, reqbody): return self + def authenticate_3_x_with_password(self): + # TODO: Support for custom domain + # TODO: Refactor and add a class per API version + domain = 'Default' + + data = { + 'auth': { + 'identity': { + 'methods': ['password'], + 'password': { + 'user': { + 'domain': { + 'name': domain + }, + 'name': self.user_id, + 'password': self.key + } + } + }, + 'scope': { + 'project': { + 'domain': { + 'name': domain + }, + 'name': self.tenant_name + } + } + } + } + + if self.tenant_name: + data['auth']['scope'] = { + 'project': { + 'domain': { + 'name': domain + }, + 'name': self.tenant_name + } + } + + data = json.dumps(data) + response = self.request('/v3/auth/tokens', data=data, + headers={'Content-Type': 'application/json'}, + method='POST') + + if response.status == httplib.UNAUTHORIZED: + # Invalid credentials + raise InvalidCredsError() + elif response.status in [httplib.OK, httplib.CREATED]: + headers = response.headers + + try: + body = json.loads(response.body) + except Exception: + e = sys.exc_info()[1] + raise MalformedResponseError('Failed to parse JSON', e) + + try: + expires = body['token']['expires_at'] + + self.auth_token = headers['x-subject-token'] + self.auth_token_expires = parse_date(expires) + self.urls = body['token']['catalog'] + self.auth_user_info = None + except KeyError: + e = sys.exc_info()[1] + raise MalformedResponseError('Auth JSON response is \ + missing required elements', e) + body = 'code: %s body:%s' % (response.status, response.body) + else: + raise MalformedResponseError('Malformed response', body=body, + driver=self.driver) + + return self + def is_token_valid(self): """ Return True if the current auth token is already cached and hasn't @@ -339,7 +417,9 @@ def __init__(self, service_catalog, ex_force_auth_version=None): # Check this way because there are a couple of different 2.0_* # auth types. - if '2.0' in self._auth_version: + if '3.x' in self._auth_version: + self._parse_auth_v3(service_catalog) + elif '2.0' in self._auth_version: self._parse_auth_v2(service_catalog) elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): self._parse_auth_v1(service_catalog) @@ -376,7 +456,16 @@ def get_endpoints(self, service_type=None, name=None): return eps def get_endpoint(self, service_type=None, name=None, region=None): - if '2.0' in self._auth_version: + if '3.x' in self._auth_version: + endpoints = self._service_catalog.get(service_type, {}) \ + .get(region, []) + + endpoint = [] + for _endpoint in endpoints: + if _endpoint['type'] == 'public': + endpoint = [_endpoint] + break + elif '2.0' in self._auth_version: endpoint = self._service_catalog.get(service_type, {}) \ .get(name, {}).get(region, []) elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): @@ -465,6 +554,28 @@ def _parse_auth_v2(self, service_catalog): catalog[region].append(endpoint) + def _parse_auth_v3(self, service_catalog): + for entry in service_catalog: + service_type = entry['type'] + + # TODO: use defaultdict + if service_type not in self._service_catalog: + self._service_catalog[service_type] = {} + + for endpoint in entry['endpoints']: + region = endpoint.get('region', None) + + # TODO: Normalize entries for each version + catalog = self._service_catalog[service_type] + if region not in catalog: + catalog[region] = [] + + region_entry = { + 'url': endpoint['url'], + 'type': endpoint['interface'] # public / private + } + catalog[region].append(region_entry) + class OpenStackBaseConnection(ConnectionUserAndKey): @@ -630,8 +741,13 @@ def get_endpoint(self): ep = self.service_catalog.get_endpoint(service_type=service_type, name=service_name, region=service_region) + + # TODO: Normalize keys for different auth versions and use an object if 'publicURL' in ep: return ep['publicURL'] + elif 'url' in ep: + # v3 + return ep['url'] raise LibcloudError('Could not find specified endpoint') From 1e28abe049e3466e03acfc048ef9d82c6505cbb2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 7 Aug 2014 17:39:15 +0200 Subject: [PATCH 146/315] Make get_regions work with v3 auth. --- libcloud/common/openstack.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 65b4306632..3d4786bb26 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -487,7 +487,11 @@ def get_regions(self): catalog_items = self._service_catalog.items() - if '2.0' in self._auth_version: + if '3.x' in self._auth_version: + for service_type, values in catalog_items: + for region in values.keys(): + regions.add(region) + elif '2.0' in self._auth_version: for service_type, services_by_name in catalog_items: items = services_by_name.items() for service_name, endpoints_by_region in items: From 13fe413d7af4c11b836cbda0a3aa2a2f94ee03d5 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 7 Aug 2014 17:48:29 +0200 Subject: [PATCH 147/315] Add utility get_service_types method to OpenStackServiceCatalog class. --- libcloud/common/openstack.py | 21 +++++++++++++++++++++ libcloud/test/compute/test_openstack.py | 8 ++++++++ 2 files changed, 29 insertions(+) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 3d4786bb26..61d16c18f7 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -506,10 +506,31 @@ def get_regions(self): return list(regions) + def get_service_types(self, region=None): + """ + Retrieve all the available service types. + + :param region: Optional region to retrieve service types for. + :type region: ``str`` + + :rtype: ``list`` of ``str`` + """ + service_types = set() + + for service_type, values in self._service_catalog.items(): + regions = values.keys() + if not region or region in regions: + service_types.add(service_type) + + return list(service_types) + def get_service_names(self, service_type, region=None): """ Retrieve list of service names that match service type and region + :type service_type: ``str`` + :type region: ``str`` + :rtype: ``list`` of ``str`` """ names = set() diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 06e79ba57c..77a38e9fbb 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -117,6 +117,14 @@ def test_get_regions(self): regions = self.service_catalog.get_regions() self.assertEqual(sorted(regions), ['LON', 'ORD']) + def test_get_service_types(self): + service_types = self.service_catalog.get_service_types() + self.assertEqual(sorted(service_types), ['compute', 'object-store', + 'rax:object-cdn']) + + service_types = self.service_catalog.get_service_types(region='invalid') + self.assertEqual(sorted(service_types), []) + def test_get_service_names(self): OpenStackBaseConnection.conn_classes = (OpenStack_2_0_MockHttp, OpenStack_2_0_MockHttp) From 6deb49e0811011ee569d0ebca7cc70877dac8593 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 8 Aug 2014 15:42:33 +0200 Subject: [PATCH 148/315] Add two missing super calls. --- libcloud/common/types.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/libcloud/common/types.py b/libcloud/common/types.py index 98250c1ba2..6020440a2d 100644 --- a/libcloud/common/types.py +++ b/libcloud/common/types.py @@ -29,6 +29,7 @@ class LibcloudError(Exception): """The base class for other libcloud exceptions""" def __init__(self, value, driver=None): + super(LibcloudError, self).__init__(value) self.value = value self.driver = driver @@ -74,11 +75,9 @@ class ProviderError(LibcloudError): HTTP 404 : NodeNotFoundError, ContainerDoesNotExistError """ - def __init__(self, value, http_code, - driver=None): - self.value = value + def __init__(self, value, http_code, driver=None): + super(ProviderError, self).__init__(value=value, driver=driver) self.http_code = http_code - self.driver = driver def __str__(self): return self.__repr__() From 529a9cf59395e005cbd7c2d9383ce69a369306d3 Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Fri, 8 Aug 2014 13:03:00 +0200 Subject: [PATCH 149/315] Cloudstack - create_node: add param to specify specific ip_address for default nic plus flake8 & docstring fix closes #346 Signed-off-by: Sebastien Goasguen --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/cloudstack.py | 7 ++++++ .../deployVirtualMachine_deployip.json | 1 + .../cloudstack/listNetworks_deployip.json | 1 + .../listServiceOfferings_deployip.json | 1 + .../cloudstack/listTemplates_deployip.json | 1 + .../cloudstack/listZones_deployip.json | 1 + .../queryAsyncJobResult_deployvmwithid.json | 1 + libcloud/test/compute/test_cloudstack.py | 22 +++++++++++++++++++ 9 files changed, 39 insertions(+) create mode 100644 libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployip.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNetworks_deployip.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deployip.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listTemplates_deployip.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listZones_deployip.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmwithid.json diff --git a/CHANGES.rst b/CHANGES.rst index 4ac10f0ec1..6588b72287 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -44,6 +44,10 @@ Compute Reported by Chris DeRamus. [Tomaz Muraus] +- Add ip_address parameter in CloudStack driver ``create_node`` method. + (GITHUB-346) + [Roeland Kuipers] + Loadbalancer ~~~~~~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 81b8cb4be9..21d2fefe7b 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -810,6 +810,9 @@ def create_node(self, **kwargs): :keyword ex_displayname: String containing instance display name :type ex_displayname: ``str`` + :keyword ex_ip_address: String with ipaddress for the default nic + :type ex_ip_address: ``str`` + :rtype: :class:`.CloudStackNode` """ @@ -836,6 +839,7 @@ def _create_args_to_params(self, node, **kwargs): ex_user_data = kwargs.get('ex_userdata', None) ex_security_groups = kwargs.get('ex_security_groups', None) ex_displayname = kwargs.get('ex_displayname', None) + ex_ip_address = kwargs.get('ex_ip_address', None) if name: server_params['name'] = name @@ -876,6 +880,9 @@ def _create_args_to_params(self, node, **kwargs): ex_security_groups = ','.join(ex_security_groups) server_params['securitygroupnames'] = ex_security_groups + if ex_ip_address: + server_params['ipaddress'] = ex_ip_address + return server_params def destroy_node(self, node): diff --git a/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployip.json b/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployip.json new file mode 100644 index 0000000000..b099e8f1a3 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_deployip.json @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":"deployvmwithid","id":65385} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployip.json b/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployip.json new file mode 100644 index 0000000000..ad0bb8095a --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworks_deployip.json @@ -0,0 +1 @@ +{"listnetworksresponse": { "count": 3, "network": [ { "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://1002", "displaytext": "network:192.168.2.0/24", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "endip": "192.168.2.233", "gateway": "192.168.2.254", "id": 1823, "isdefault": false, "isshared": true, "issystem": false, "name": "ROOT", "netmask": "255.255.255.0", "networkdomain": "cs1cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Direct", "networkofferingid": 7, "networkofferingname": "DefaultDirectNetworkOffering", "related": 1823, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Dhcp" } ], "startip": "192.168.2.1", "state": "Setup", "traffictype": "Guest", "type": "Direct", "vlan": "1002", "zoneid": 1 }, { "account": "testuser", "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://2909", "displaytext": "testuser-network", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "id": 1547, "isdefault": true, "isshared": false, "issystem": false, "name": "testuser-network", "networkdomain": "cs586cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Virtual Vlan", "networkofferingid": 6, "networkofferingname": "DefaultVirtualizedNetworkOffering", "related": 1547, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Gateway" }, { "name": "Dhcp" }, { "capability": [ { "name": "SupportedVpnTypes", "value": "pptp,l2tp,ipsec" } ], "name": "Vpn" }, { "capability": [ { "name": "MultipleIps", "value": "true" }, { "name": "SupportedSourceNatTypes", "value": "per account" }, { "name": "SupportedProtocols", "value": "tcp,udp,icmp" }, { "name": "TrafficStatistics", "value": "per public ip" }, { "name": "PortForwarding", "value": "true" }, { "name": "StaticNat", "value": "true" } ], "name": "Firewall" } ], "state": "Implemented", "traffictype": "Guest", "type": "Virtual", "zoneid": 2 }, { "account": "testuser", "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://3564", "displaytext": "testuser-network", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "id": 1374, "isdefault": true, "isshared": false, "issystem": false, "name": "testuser-network", "networkdomain": "cs586cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Virtual Vlan", "networkofferingid": 6, "networkofferingname": "DefaultVirtualizedNetworkOffering", "related": 1374, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Gateway" }, { "name": "Dhcp" }, { "capability": [ { "name": "SupportedVpnTypes", "value": "pptp,l2tp,ipsec" } ], "name": "Vpn" }, { "capability": [ { "name": "MultipleIps", "value": "true" }, { "name": "SupportedSourceNatTypes", "value": "per account" }, { "name": "SupportedProtocols", "value": "tcp,udp,icmp" }, { "name": "TrafficStatistics", "value": "per public ip" }, { "name": "PortForwarding", "value": "true" }, { "name": "StaticNat", "value": "true" } ], "name": "Firewall" } ], "state": "Implemented", "traffictype": "Guest", "type": "Virtual", "zoneid": 1 } ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deployip.json b/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deployip.json new file mode 100644 index 0000000000..944a6f2919 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_deployip.json @@ -0,0 +1 @@ +{ "listserviceofferingsresponse" : {"count": 3, "serviceoffering": [ {"cpunumber": 2, "cpuspeed": 1600, "created": "2011-09-09T13:14:19+0900", "defaultuse": false, "displaytext": "M4", "id": 21, "issystem": false, "limitcpuuse": true, "memory": 4096, "name": "M4", "networkrate": 500, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}, {"cpunumber": 1, "cpuspeed": 800, "created": "2011-09-09T13:17:52+0900", "defaultuse": false, "displaytext": "XS", "id": 24, "issystem": false, "limitcpuuse": true, "memory": 512, "name": "XS", "networkrate": 100, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}, {"cpunumber": 1, "cpuspeed": 1600, "created": "2011-09-14T22:51:23+0900", "defaultuse": false, "displaytext": "S2", "id": 30, "issystem": false, "limitcpuuse": true, "memory": 2048, "name": "S2", "networkrate": 500, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/listTemplates_deployip.json b/libcloud/test/compute/fixtures/cloudstack/listTemplates_deployip.json new file mode 100644 index 0000000000..a9c778c738 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listTemplates_deployip.json @@ -0,0 +1 @@ +{ "listtemplatesresponse" : {"count": 2, "template": [ {"account": "admin", "created": "2014-06-06T20:08:49+0900", "crossZones": false, "displaytext": "CentOS 6.5", "domain": "ROOT", "domainid": 1, "format": "OVA", "hypervisor": "VMware", "id": 8028, "isextractable": true, "isfeatured": true, "ispublic": true, "isready": true, "name": "CentOS 6.5 64-bit", "ostypeid": 112, "ostypename": "CentOS 5.5 (64-bit)", "passwordenabled": true, "size": 16106127360, "status": "Download Complete", "templatetype": "USER", "zoneid": 2, "zonename": "zone2"}, {"account": "admin", "created": "2014-06-06T20:08:48+0900", "crossZones": false, "displaytext": "CentOS 6.5", "domain": "ROOT", "domainid": 1, "format": "OVA", "hypervisor": "VMware", "id": 8028, "isextractable": true, "isfeatured": true, "ispublic": true, "isready": true, "name": "CentOS 6.5 64-bit", "ostypeid": 112, "ostypename": "CentOS 5.5 (64-bit)", "passwordenabled": true, "size": 16106127360, "status": "Download Complete", "templatetype": "USER", "zoneid": 1, "zonename": "zone1"} ]} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listZones_deployip.json b/libcloud/test/compute/fixtures/cloudstack/listZones_deployip.json new file mode 100644 index 0000000000..b072749410 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listZones_deployip.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "count":2 ,"zone" : [ {"id":1,"name":"zone1","networktype":"Advanced","securitygroupsenabled":false,"allocationstate":"Enabled","zonetoken":"6a3bfa26-67cd-3ff2-867e-20e86b211bb1","dhcpprovider":"VirtualRouter"}, {"id":2,"name":"zone2","networktype":"Advanced","securitygroupsenabled":false,"allocationstate":"Enabled","zonetoken":"8366e550-542d-373d-88e3-ca7c90bc8e6c","dhcpprovider":"VirtualRouter"} ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmwithid.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmwithid.json new file mode 100644 index 0000000000..6b6409ee2c --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmwithid.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":"deployvmwithip", "jobprocstatus": 0, "jobresult": {"virtualmachine": {"account": "testuser", "cpunumber": 2, "cpuspeed": 1600, "created": "2014-07-06T16:40:39+0900", "displayname": "deployip", "domain": "ROOT", "domainid": 1623, "guestosid": 112, "haenable": false, "hypervisor": "VMware", "id": 65385, "memory": 4096, "name": "deployip", "nic": [{"gateway": "10.1.0.1", "id": 87320, "ipaddress": "10.1.0.128", "isdefault": true, "macaddress": "02:00:78:4a:01:9e", "netmask": "255.255.252.0", "networkid": 1374, "traffictype": "Guest", "type": "Virtual"}, {"gateway": "192.168.2.254", "id": 87319, "ipaddress": "192.168.2.55", "isdefault": false, "macaddress": "06:e6:50:00:70:0e", "netmask": "255.255.255.0", "networkid": 1823, "traffictype": "Guest", "type": "Direct"}], "password": "password", "passwordenabled": true, "rootdeviceid": 0, "rootdevicetype": "VMFS", "securitygroup": [], "serviceofferingid": 21, "serviceofferingname": "M4", "state": "Running" , "templatedisplaytext": "CentOS 6.5", "templateid": 8028, "templatename": "CentOS 6.5 64-bit", "zoneid": 1, "zonename": "zone1"}}, "jobresultcode": 0, "jobresulttype": "object", "jobstatus": 1} } diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index aade378bda..3393c0cef4 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -127,6 +127,28 @@ def test_create_node_ex_networks(self): self.assertEqual(node.extra['image_id'], image.id) self.assertEqual(len(node.private_ips), 2) + def test_create_node_ex_ipaddress(self): + CloudStackMockHttp.fixture_tag = 'deployip' + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + ipaddress = '10.1.0.128' + + networks = [nw for nw in self.driver.ex_list_networks() + if str(nw.zoneid) == str(location.id)] + + node = self.driver.create_node(name='deployip', + location=location, + image=image, + size=size, + networks=networks, + ex_ip_address=ipaddress) + self.assertEqual(node.name, 'deployip') + self.assertEqual(node.extra['size_id'], size.id) + self.assertEqual(node.extra['zone_id'], location.id) + self.assertEqual(node.extra['image_id'], image.id) + self.assertEqual(node.private_ips[0], ipaddress) + def test_create_node_ex_security_groups(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] From cd1671941951d262e901a098d32693302330166e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 10 Aug 2014 18:18:36 +0200 Subject: [PATCH 150/315] Refactor OpenStack identity (auth) code and classes, make it more flexible, re-usable and maintainableL * Add new module which handles identity related logic * (libcloud.common.openstack_identity) * Add new classes for each identity API version. * Add additional functionality to class for Keystone API v3 * Add new OpenStackServiceCatalogEntry and OpenStackServiceCatalogEntryEndpoint class * Modify OpenStackServiceCatalog class to store entries in a structured format (OpenStackServiceCatalogEntry instances) instead of storing it in an unstructured dictionary * Update all the affected code to use the new classes and methods. * Fix a bug with the CDN requests in the CloudFiles driver. Also a add new class for each identity API version and add additional functionality to class for Keystone API v3. Backward incompatible changes: * OpenStackAuthConnection class has been removed and replaced with version specific classes and "get_class_for_auth_version" class retrieval function. --- CHANGES.rst | 13 + docs/upgrade_notes.rst | 36 + libcloud/common/openstack.py | 605 +------- libcloud/common/openstack_identity.py | 1225 +++++++++++++++++ libcloud/compute/drivers/hpcloud.py | 2 +- libcloud/compute/drivers/kili.py | 2 +- libcloud/compute/drivers/rackspace.py | 6 +- libcloud/dns/drivers/rackspace.py | 2 +- libcloud/storage/drivers/cloudfiles.py | 35 +- libcloud/storage/drivers/ktucloud.py | 10 +- libcloud/test/common/test_openstack.py | 1 - .../test/common/test_openstack_identity.py | 504 +++++++ .../compute/fixtures/openstack/_v3__auth.json | 182 +++ .../openstack_identity/v3_create_user.json | 12 + .../openstack_identity/v3_domains.json | 18 + .../v3_domains_default.json | 11 + .../v3_domains_default_users_a_roles.json | 16 + .../openstack_identity/v3_projects.json | 49 + .../fixtures/openstack_identity/v3_roles.json | 25 + .../fixtures/openstack_identity/v3_users.json | 131 ++ .../v3_users_a_projects.json | 8 + libcloud/test/compute/test_openstack.py | 249 +--- libcloud/test/storage/test_cloudfiles.py | 10 - 23 files changed, 2306 insertions(+), 846 deletions(-) create mode 100644 libcloud/common/openstack_identity.py create mode 100644 libcloud/test/common/test_openstack_identity.py create mode 100644 libcloud/test/compute/fixtures/openstack/_v3__auth.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_create_user.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_domains.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_domains_default.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_domains_default_users_a_roles.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_projects.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_roles.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_users.json create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_users_a_projects.json diff --git a/CHANGES.rst b/CHANGES.rst index 4ac10f0ec1..0cf0d4769a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -4,6 +4,13 @@ Changelog Changes with Apache Libcloud in development ------------------------------------------- +General +~~~~~~~ + +- Add new ``OpenStackIdentity_3_0_Connection`` class for working with + OpenStack Identity (Keystone) service API v3. + [Tomaz Muraus] + Compute ~~~~~~~ @@ -44,6 +51,12 @@ Compute Reported by Chris DeRamus. [Tomaz Muraus] +Storage +~~~~~~~ + +- Fix a bug with CDN requests in the CloudFiles driver. + [Tomaz Muraus] + Loadbalancer ~~~~~~~~~~~~ diff --git a/docs/upgrade_notes.rst b/docs/upgrade_notes.rst index 93d920d73a..ef54b38faf 100644 --- a/docs/upgrade_notes.rst +++ b/docs/upgrade_notes.rst @@ -5,6 +5,42 @@ This page describes how to upgrade from a previous version to a new version which contains backward incompatible or semi-incompatible changes and how to preserve the old behavior when this is possible. +Libcloud in development +----------------------- + +Changes in the OpenStack authentication and service catalog classes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. note:: + If you are only working with the driver classes and have never dorectly + touched the classes mentioned bellow, then you aren't affected and those + changes are fully backward compatible. + + +To make OpenStack authentication and identity related classes more extensible, +easier to main and easier to use, those classes have been refactored. All of +the changes are described bellow. + +* New ``libcloud.common.openstack_identity`` module has been added. This module + contains code for working with OpenStack Identity (Keystone) service. +* ``OpenStackAuthConnection`` class has been removed and replaced with one + connection class per Keystone API version + (``OpenStackIdentity_1_0_Connection``, ``OpenStackIdentity_2_0_Connection``, + ``OpenStackIdentity_3_0_Connection``). +* New ``get_auth_class`` method has been added to ``OpenStackBaseConnection`` + class. This method allows you to retrieve an instance of the authentication + class which is used with the current connection. +* ``OpenStackServiceCatalog`` class has been refactored to store parsed catalog + entries in a structured format (``OpenStackServiceCatalogEntry`` and + ``OpenStackServiceCatalogEntryEndpoint`` class). Previously entries were + stored in an unstructured form in a dictionary. All the catalog entries can + be retrieved by using ``OpenStackServiceCatalog.get_entris`` method. +* ``ex_force_auth_version`` argument in ``OpenStackServiceCatalog`` constructor + method has been renamed to ``auth_version`` +* ``get_regions``, ``get_service_types`` and ``get_service_names`` methods on + the ``OpenStackServiceCatalog`` class have been modified to always return the + result in the same order (result values are sorted beforehand). + Libcloud 0.14.1 --------------- diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 61d16c18f7..80ca08f134 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -17,23 +17,23 @@ Common utilities for OpenStack """ -import sys -import datetime - try: from lxml import etree as ET except ImportError: from xml.etree import ElementTree as ET from libcloud.utils.py3 import httplib -from libcloud.utils.iso8601 import parse_date from libcloud.common.base import ConnectionUserAndKey, Response from libcloud.common.types import ProviderError -from libcloud.compute.types import (LibcloudError, InvalidCredsError, - MalformedResponseError) +from libcloud.compute.types import (LibcloudError, MalformedResponseError) from libcloud.compute.types import KeyPairDoesNotExistError +# Imports for backward compatibility reasons +from libcloud.common.openstack_identity import get_class_for_auth_version +from libcloud.common.openstack_identity import OpenStackServiceCatalog + + try: import simplejson as json except ImportError: @@ -58,8 +58,6 @@ __all__ = [ 'OpenStackBaseConnection', - 'OpenStackAuthConnection', - 'OpenStackServiceCatalog', 'OpenStackResponse', 'OpenStackException', 'OpenStackDriverMixin', @@ -68,540 +66,6 @@ ] -# @TODO: Refactor for re-use by other openstack drivers -class OpenStackAuthResponse(Response): - def success(self): - return True - - def parse_body(self): - if not self.body: - return None - - if 'content-type' in self.headers: - key = 'content-type' - elif 'Content-Type' in self.headers: - key = 'Content-Type' - else: - raise LibcloudError('Missing content-type header', - driver=OpenStackAuthConnection) - - content_type = self.headers[key] - if content_type.find(';') != -1: - content_type = content_type.split(';')[0] - - if content_type == 'application/json': - try: - data = json.loads(self.body) - except: - raise MalformedResponseError('Failed to parse JSON', - body=self.body, - driver=OpenStackAuthConnection) - elif content_type == 'text/plain': - data = self.body - else: - data = self.body - - return data - - -class OpenStackAuthConnection(ConnectionUserAndKey): - - responseCls = OpenStackAuthResponse - name = 'OpenStack Auth' - timeout = None - - def __init__(self, parent_conn, auth_url, auth_version, user_id, key, - tenant_name=None, timeout=None): - self.parent_conn = parent_conn - # enable tests to use the same mock connection classes. - self.conn_classes = parent_conn.conn_classes - - super(OpenStackAuthConnection, self).__init__( - user_id, key, url=auth_url, timeout=timeout) - - self.auth_version = auth_version - self.auth_url = auth_url - self.driver = self.parent_conn.driver - self.tenant_name = tenant_name - self.timeout = timeout - - self.urls = {} - self.auth_token = None - self.auth_token_expires = None - self.auth_user_info = None - - def morph_action_hook(self, action): - (_, _, _, request_path) = self._tuple_from_url(self.auth_url) - - if request_path == '': - # No path is provided in the auth_url, use action passed to this - # method. - return action - - return request_path - - def add_default_headers(self, headers): - headers['Accept'] = 'application/json' - headers['Content-Type'] = 'application/json; charset=UTF-8' - return headers - - def authenticate(self, force=False): - """ - Authenticate against the keystone api. - - :param force: Forcefully update the token even if it's already cached - and still valid. - :type force: ``bool`` - """ - if not force and self.auth_version in AUTH_VERSIONS_WITH_EXPIRES \ - and self.is_token_valid(): - # If token is still valid, there is no need to re-authenticate - return self - - if self.auth_version == "1.0": - return self.authenticate_1_0() - elif self.auth_version == "1.1": - return self.authenticate_1_1() - elif self.auth_version == "2.0" or self.auth_version == "2.0_apikey": - return self.authenticate_2_0_with_apikey() - elif self.auth_version == "2.0_password": - return self.authenticate_2_0_with_password() - elif self.auth_version == '3.x_password': - return self.authenticate_3_x_with_password() - else: - raise LibcloudError('Unsupported Auth Version requested') - - def authenticate_1_0(self): - headers = { - 'X-Auth-User': self.user_id, - 'X-Auth-Key': self.key, - } - - resp = self.request('/v1.0', headers=headers, method='GET') - - if resp.status == httplib.UNAUTHORIZED: - # HTTP UNAUTHORIZED (401): auth failed - raise InvalidCredsError() - elif resp.status not in [httplib.NO_CONTENT, httplib.OK]: - body = 'code: %s body:%s headers:%s' % (resp.status, - resp.body, - resp.headers) - raise MalformedResponseError('Malformed response', body=body, - driver=self.driver) - else: - headers = resp.headers - # emulate the auth 1.1 URL list - self.urls = {} - self.urls['cloudServers'] = \ - [{'publicURL': headers.get('x-server-management-url', None)}] - self.urls['cloudFilesCDN'] = \ - [{'publicURL': headers.get('x-cdn-management-url', None)}] - self.urls['cloudFiles'] = \ - [{'publicURL': headers.get('x-storage-url', None)}] - self.auth_token = headers.get('x-auth-token', None) - self.auth_user_info = None - - if not self.auth_token: - raise MalformedResponseError('Missing X-Auth-Token in \ - response headers') - - return self - - def authenticate_1_1(self): - reqbody = json.dumps({'credentials': {'username': self.user_id, - 'key': self.key}}) - resp = self.request('/v1.1/auth', data=reqbody, headers={}, - method='POST') - - if resp.status == httplib.UNAUTHORIZED: - # HTTP UNAUTHORIZED (401): auth failed - raise InvalidCredsError() - elif resp.status != httplib.OK: - body = 'code: %s body:%s' % (resp.status, resp.body) - raise MalformedResponseError('Malformed response', body=body, - driver=self.driver) - else: - try: - body = json.loads(resp.body) - except Exception: - e = sys.exc_info()[1] - raise MalformedResponseError('Failed to parse JSON', e) - - try: - expires = body['auth']['token']['expires'] - - self.auth_token = body['auth']['token']['id'] - self.auth_token_expires = parse_date(expires) - self.urls = body['auth']['serviceCatalog'] - self.auth_user_info = None - except KeyError: - e = sys.exc_info()[1] - raise MalformedResponseError('Auth JSON response is \ - missing required elements', e) - - return self - - def authenticate_2_0_with_apikey(self): - # API Key based authentication uses the RAX-KSKEY extension. - # http://s.apache.org/oAi - data = {'auth': - {'RAX-KSKEY:apiKeyCredentials': - {'username': self.user_id, 'apiKey': self.key}}} - if self.tenant_name: - data['auth']['tenantName'] = self.tenant_name - reqbody = json.dumps(data) - return self.authenticate_2_0_with_body(reqbody) - - def authenticate_2_0_with_password(self): - # Password based authentication is the only 'core' authentication - # method in Keystone at this time. - # 'keystone' - http://s.apache.org/e8h - data = {'auth': - {'passwordCredentials': - {'username': self.user_id, 'password': self.key}}} - if self.tenant_name: - data['auth']['tenantName'] = self.tenant_name - reqbody = json.dumps(data) - return self.authenticate_2_0_with_body(reqbody) - - def authenticate_2_0_with_body(self, reqbody): - resp = self.request('/v2.0/tokens', data=reqbody, - headers={'Content-Type': 'application/json'}, - method='POST') - if resp.status == httplib.UNAUTHORIZED: - raise InvalidCredsError() - elif resp.status not in [httplib.OK, - httplib.NON_AUTHORITATIVE_INFORMATION]: - body = 'code: %s body: %s' % (resp.status, resp.body) - raise MalformedResponseError('Malformed response', body=body, - driver=self.driver) - else: - try: - body = json.loads(resp.body) - except Exception: - e = sys.exc_info()[1] - raise MalformedResponseError('Failed to parse JSON', e) - - try: - access = body['access'] - expires = access['token']['expires'] - - self.auth_token = access['token']['id'] - self.auth_token_expires = parse_date(expires) - self.urls = access['serviceCatalog'] - self.auth_user_info = access.get('user', {}) - except KeyError: - e = sys.exc_info()[1] - raise MalformedResponseError('Auth JSON response is \ - missing required elements', e) - - return self - - def authenticate_3_x_with_password(self): - # TODO: Support for custom domain - # TODO: Refactor and add a class per API version - domain = 'Default' - - data = { - 'auth': { - 'identity': { - 'methods': ['password'], - 'password': { - 'user': { - 'domain': { - 'name': domain - }, - 'name': self.user_id, - 'password': self.key - } - } - }, - 'scope': { - 'project': { - 'domain': { - 'name': domain - }, - 'name': self.tenant_name - } - } - } - } - - if self.tenant_name: - data['auth']['scope'] = { - 'project': { - 'domain': { - 'name': domain - }, - 'name': self.tenant_name - } - } - - data = json.dumps(data) - response = self.request('/v3/auth/tokens', data=data, - headers={'Content-Type': 'application/json'}, - method='POST') - - if response.status == httplib.UNAUTHORIZED: - # Invalid credentials - raise InvalidCredsError() - elif response.status in [httplib.OK, httplib.CREATED]: - headers = response.headers - - try: - body = json.loads(response.body) - except Exception: - e = sys.exc_info()[1] - raise MalformedResponseError('Failed to parse JSON', e) - - try: - expires = body['token']['expires_at'] - - self.auth_token = headers['x-subject-token'] - self.auth_token_expires = parse_date(expires) - self.urls = body['token']['catalog'] - self.auth_user_info = None - except KeyError: - e = sys.exc_info()[1] - raise MalformedResponseError('Auth JSON response is \ - missing required elements', e) - body = 'code: %s body:%s' % (response.status, response.body) - else: - raise MalformedResponseError('Malformed response', body=body, - driver=self.driver) - - return self - - def is_token_valid(self): - """ - Return True if the current auth token is already cached and hasn't - expired yet. - - :return: ``True`` if the token is still valid, ``False`` otherwise. - :rtype: ``bool`` - """ - if not self.auth_token: - return False - - if not self.auth_token_expires: - return False - - expires = self.auth_token_expires - \ - datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS) - - time_tuple_expires = expires.utctimetuple() - time_tuple_now = datetime.datetime.utcnow().utctimetuple() - - if time_tuple_now < time_tuple_expires: - return True - - return False - - -class OpenStackServiceCatalog(object): - """ - http://docs.openstack.org/api/openstack-identity-service/2.0/content/ - - This class should be instanciated with the contents of the - 'serviceCatalog' in the auth response. This will do the work of figuring - out which services actually exist in the catalog as well as split them up - by type, name, and region if available - """ - - _auth_version = None - _service_catalog = None - - def __init__(self, service_catalog, ex_force_auth_version=None): - self._auth_version = ex_force_auth_version or AUTH_API_VERSION - self._service_catalog = {} - - # Check this way because there are a couple of different 2.0_* - # auth types. - if '3.x' in self._auth_version: - self._parse_auth_v3(service_catalog) - elif '2.0' in self._auth_version: - self._parse_auth_v2(service_catalog) - elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): - self._parse_auth_v1(service_catalog) - else: - raise LibcloudError('auth version "%s" not supported' - % (self._auth_version)) - - def get_catalog(self): - return self._service_catalog - - def get_public_urls(self, service_type=None, name=None): - endpoints = self.get_endpoints(service_type=service_type, - name=name) - - result = [] - for endpoint in endpoints: - if 'publicURL' in endpoint: - result.append(endpoint['publicURL']) - - return result - - def get_endpoints(self, service_type=None, name=None): - eps = [] - - if '2.0' in self._auth_version: - endpoints = self._service_catalog.get(service_type, {}) \ - .get(name, {}) - elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): - endpoints = self._service_catalog.get(name, {}) - - for regionName, values in endpoints.items(): - eps.append(values[0]) - - return eps - - def get_endpoint(self, service_type=None, name=None, region=None): - if '3.x' in self._auth_version: - endpoints = self._service_catalog.get(service_type, {}) \ - .get(region, []) - - endpoint = [] - for _endpoint in endpoints: - if _endpoint['type'] == 'public': - endpoint = [_endpoint] - break - elif '2.0' in self._auth_version: - endpoint = self._service_catalog.get(service_type, {}) \ - .get(name, {}).get(region, []) - elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): - endpoint = self._service_catalog.get(name, {}).get(region, []) - - # ideally an endpoint either isn't found or only one match is found. - if len(endpoint) == 1: - return endpoint[0] - else: - return {} - - def get_regions(self): - """ - Retrieve a list of all the available regions. - - :rtype: ``list`` of ``str`` - """ - regions = set() - - catalog_items = self._service_catalog.items() - - if '3.x' in self._auth_version: - for service_type, values in catalog_items: - for region in values.keys(): - regions.add(region) - elif '2.0' in self._auth_version: - for service_type, services_by_name in catalog_items: - items = services_by_name.items() - for service_name, endpoints_by_region in items: - for region in endpoints_by_region.keys(): - if region: - regions.add(region) - elif ('1.1' in self._auth_version) or ('1.0' in self._auth_version): - for service_name, endpoints_by_region in catalog_items: - for region in endpoints_by_region.keys(): - if region: - regions.add(region) - - return list(regions) - - def get_service_types(self, region=None): - """ - Retrieve all the available service types. - - :param region: Optional region to retrieve service types for. - :type region: ``str`` - - :rtype: ``list`` of ``str`` - """ - service_types = set() - - for service_type, values in self._service_catalog.items(): - regions = values.keys() - if not region or region in regions: - service_types.add(service_type) - - return list(service_types) - - def get_service_names(self, service_type, region=None): - """ - Retrieve list of service names that match service type and region - - :type service_type: ``str`` - :type region: ``str`` - - :rtype: ``list`` of ``str`` - """ - names = set() - - if '2.0' in self._auth_version: - named_entries = self._service_catalog.get(service_type, {}) - for (name, region_entries) in named_entries.items(): - # Support None for region to return the first found - if region is None or region in region_entries.keys(): - names.add(name) - else: - raise ValueError('Unsupported version: %s' % (self._auth_version)) - - return list(names) - - def _parse_auth_v1(self, service_catalog): - for service, endpoints in service_catalog.items(): - - self._service_catalog[service] = {} - - for endpoint in endpoints: - region = endpoint.get('region') - - if region not in self._service_catalog[service]: - self._service_catalog[service][region] = [] - - self._service_catalog[service][region].append(endpoint) - - def _parse_auth_v2(self, service_catalog): - for service in service_catalog: - service_type = service['type'] - service_name = service.get('name', None) - - if service_type not in self._service_catalog: - self._service_catalog[service_type] = {} - - if service_name not in self._service_catalog[service_type]: - self._service_catalog[service_type][service_name] = {} - - for endpoint in service.get('endpoints', []): - region = endpoint.get('region', None) - - catalog = self._service_catalog[service_type][service_name] - if region not in catalog: - catalog[region] = [] - - catalog[region].append(endpoint) - - def _parse_auth_v3(self, service_catalog): - for entry in service_catalog: - service_type = entry['type'] - - # TODO: use defaultdict - if service_type not in self._service_catalog: - self._service_catalog[service_type] = {} - - for endpoint in entry['endpoints']: - region = endpoint.get('region', None) - - # TODO: Normalize entries for each version - catalog = self._service_catalog[service_type] - if region not in catalog: - catalog[region] = [] - - region_entry = { - 'url': endpoint['url'], - 'type': endpoint['interface'] # public / private - } - catalog[region].append(region_entry) - - class OpenStackBaseConnection(ConnectionUserAndKey): """ @@ -687,6 +151,7 @@ def __init__(self, user_id, key, secure=True, self._ex_force_service_type = ex_force_service_type self._ex_force_service_name = ex_force_service_name self._ex_force_service_region = ex_force_service_region + self._osa = None if ex_force_auth_token and not ex_force_base_url: raise LibcloudError( @@ -705,11 +170,24 @@ def __init__(self, user_id, key, secure=True, raise LibcloudError('OpenStack instance must ' + 'have auth_url set') - osa = OpenStackAuthConnection(self, auth_url, self._auth_version, - self.user_id, self.key, - tenant_name=self._ex_tenant_name, - timeout=self.timeout) - self._osa = osa + def get_auth_class(self): + """ + Retrieve identity / authentication class instance. + + :rtype: :class:`OpenStackIdentityConnection` + """ + if not self._osa: + auth_url = self._get_auth_url() + + cls = get_class_for_auth_version(auth_version=self._auth_version) + self._osa = cls(auth_url=auth_url, + user_id=self.user_id, + key=self.key, + tenant_name=self._ex_tenant_name, + timeout=self.timeout, + parent_conn=self) + + return self._osa def request(self, action, params=None, data='', headers=None, method='GET', raw=False): @@ -756,6 +234,7 @@ def get_endpoint(self): service_type = self.service_type service_name = self.service_name service_region = self.service_region + if self._ex_force_service_type: service_type = self._ex_force_service_type if self._ex_force_service_name: @@ -763,18 +242,17 @@ def get_endpoint(self): if self._ex_force_service_region: service_region = self._ex_force_service_region - ep = self.service_catalog.get_endpoint(service_type=service_type, - name=service_name, - region=service_region) + endpoint = self.service_catalog.get_endpoint(service_type=service_type, + name=service_name, + region=service_region) # TODO: Normalize keys for different auth versions and use an object - if 'publicURL' in ep: - return ep['publicURL'] - elif 'url' in ep: - # v3 - return ep['url'] + url = endpoint.url - raise LibcloudError('Could not find specified endpoint') + if not url: + raise LibcloudError('Could not find specified endpoint') + + return url def add_default_headers(self, headers): headers['X-Auth-Token'] = self.auth_token @@ -794,7 +272,7 @@ def _populate_hosts_and_request_paths(self): OpenStack uses a separate host for API calls which is only provided after an initial authentication request. """ - osa = self._osa + osa = self.get_auth_class() if self._ex_force_auth_token: # If ex_force_auth_token is provided we always hit the api directly @@ -808,15 +286,22 @@ def _populate_hosts_and_request_paths(self): if not osa.is_token_valid(): # Token is not available or it has expired. Need to retrieve a # new one. - osa.authenticate() # may throw InvalidCreds + if self._auth_version == '2.0_apikey': + kwargs = {'auth_type': 'api_key'} + elif self._auth_version == '2.0_password': + kwargs = {'auth_type': 'password'} + else: + kwargs = {} + + osa = osa.authenticate(**kwargs) # may throw InvalidCreds self.auth_token = osa.auth_token self.auth_token_expires = osa.auth_token_expires self.auth_user_info = osa.auth_user_info # Pull out and parse the service catalog - osc = OpenStackServiceCatalog( - osa.urls, ex_force_auth_version=self._auth_version) + osc = OpenStackServiceCatalog(service_catalog=osa.urls, + auth_version=self._auth_version) self.service_catalog = osc url = self._ex_force_base_url or self.get_endpoint() diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py new file mode 100644 index 0000000000..5fdd1868aa --- /dev/null +++ b/libcloud/common/openstack_identity.py @@ -0,0 +1,1225 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Common / shared code for handling authentication against OpenStack identity +service (Keystone). +""" + +import sys +import datetime + +from libcloud.utils.py3 import httplib +from libcloud.utils.iso8601 import parse_date + +from libcloud.common.base import ConnectionUserAndKey, Response +from libcloud.compute.types import (LibcloudError, InvalidCredsError, + MalformedResponseError) + +try: + import simplejson as json +except ImportError: + import json + +AUTH_API_VERSION = '1.1' + +# Auth versions which contain token expiration information. +AUTH_VERSIONS_WITH_EXPIRES = [ + '1.1', + '2.0', + '2.0_apikey', + '2.0_password', + '3.0', + '3.x_password' +] + +# How many seconds to substract from the auth token expiration time before +# testing if the token is still valid. +# The time is subtracted to account for the HTTP request latency and prevent +# user from getting "InvalidCredsError" if token is about to expire. +AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5 + + +__all__ = [ + 'OpenStackIdentityDomain', + 'OpenStackIdentityProject', + 'OpenStackIdentityUser', + 'OpenStackIdentityRole', + + 'OpenStackServiceCatalog', + 'OpenStackServiceCatalogEntry', + 'OpenStackServiceCatalogEntryEndpoint', + + 'OpenStackIdentityConnection', + 'OpenStackIdentity_1_0_Connection', + 'OpenStackIdentity_1_1_Connection', + 'OpenStackIdentity_2_0_Connection', + 'OpenStackIdentity_3_0_Connection', + + 'get_class_for_auth_version' +] + + +class OpenStackIdentityDomain(object): + def __init__(self, id, name, enabled): + self.id = id + self.name = name + self.enabled = enabled + + def __repr__(self): + return (('' % + (self.id, self.name, self.enabled))) + + +class OpenStackIdentityProject(object): + def __init__(self, id, domain_id, name, description, enabled): + self.id = id + self.domain_id = domain_id + self.name = name + self.description = description + self.enabled = enabled + + def __repr__(self): + return (('' % + (self.id, self.domain_id, self.name, self.enabled))) + + +class OpenStackIdentityRole(object): + def __init__(self, id, name, description, enabled): + self.id = id + self.name = name + self.description = description + self.enabled = enabled + + def __repr__(self): + return (('' % (self.id, self.name, self.description, + self.enabled))) + + +class OpenStackIdentityUser(object): + def __init__(self, id, domain_id, name, email, description, enabled): + self.id = id + self.domain_id = domain_id + self.name = name + self.email = email + self.description = description + self.enabled = enabled + + def __repr__(self): + return ((' 1: + raise ValueError('Found more than 1 matching endpoint') + else: + raise LibcloudError('Could not find specified endpoint') + + def get_regions(self, service_type=None): + """ + Retrieve a list of all the available regions. + + :param service_type: If specified, only return regions for this + service type. + :type service_type: ``str`` + + :rtype: ``list`` of ``str`` + """ + regions = set() + + for entry in self._entries: + if service_type and entry.service_type != service_type: + continue + + for endpoint in entry.endpoints: + if endpoint.region: + regions.add(endpoint.region) + + return sorted(list(regions)) + + def get_service_types(self, region=None): + """ + Retrieve all the available service types. + + :param region: Optional region to retrieve service types for. + :type region: ``str`` + + :rtype: ``list`` of ``str`` + """ + service_types = set() + + for entry in self._entries: + include = True + + for endpoint in entry.endpoints: + if region and endpoint.region != region: + include = False + break + + if include: + service_types.add(entry.service_type) + + return sorted(list(service_types)) + + def get_service_names(self, service_type=None, region=None): + """ + Retrieve list of service names that match service type and region. + + :type service_type: ``str`` + :type region: ``str`` + + :rtype: ``list`` of ``str`` + """ + names = set() + + if '2.0' not in self._auth_version: + raise ValueError('Unsupported version: %s' % (self._auth_version)) + + for entry in self._entries: + if service_type and entry.service_type != service_type: + continue + + include = True + for endpoint in entry.endpoints: + if region and endpoint.region != region: + include = False + break + + if include and entry.service_name: + names.add(entry.service_name) + + return sorted(list(names)) + + def _parse_service_catalog_auth_v1(self, service_catalog): + entries = [] + + for service, endpoints in service_catalog.items(): + entry_endpoints = [] + for endpoint in endpoints: + region = endpoint.get('region', None) + + public_url = endpoint.get('publicURL', None) + private_url = endpoint.get('internalURL', None) + + if public_url: + entry_endpoint = OpenStackServiceCatalogEntryEndpoint( + region=region, url=public_url, + endpoint_type='external') + entry_endpoints.append(entry_endpoint) + + if private_url: + entry_endpoint = OpenStackServiceCatalogEntryEndpoint( + region=region, url=private_url, + endpoint_type='internal') + entry_endpoints.append(entry_endpoint) + + entry = OpenStackServiceCatalogEntry(service_type=service, + endpoints=entry_endpoints) + entries.append(entry) + + return entries + + def _parse_service_catalog_auth_v2(self, service_catalog): + entries = [] + + for service in service_catalog: + service_type = service['type'] + service_name = service.get('name', None) + + entry_endpoints = [] + for endpoint in service.get('endpoints', []): + region = endpoint.get('region', None) + + public_url = endpoint.get('publicURL', None) + private_url = endpoint.get('internalURL', None) + + if public_url: + entry_endpoint = OpenStackServiceCatalogEntryEndpoint( + region=region, url=public_url, + endpoint_type='external') + entry_endpoints.append(entry_endpoint) + + if private_url: + entry_endpoint = OpenStackServiceCatalogEntryEndpoint( + region=region, url=private_url, + endpoint_type='internal') + entry_endpoints.append(entry_endpoint) + + entry = OpenStackServiceCatalogEntry(service_type=service_type, + endpoints=entry_endpoints, + service_name=service_name) + entries.append(entry) + + return entries + + def _parse_service_catalog_auth_v3(self, service_catalog): + entries = [] + + for item in service_catalog: + service_type = item['type'] + + entry_endpoints = [] + for endpoint in item['endpoints']: + region = endpoint.get('region', None) + url = endpoint['url'] + endpoint_type = endpoint['interface'] + + if endpoint_type == 'internal': + endpoint_type = 'internal' + elif endpoint_type == 'public': + endpoint_type = 'external' + elif endpoint_type == 'admin': + endpoint_type = 'admin' + + entry_endpoint = OpenStackServiceCatalogEntryEndpoint( + region=region, url=url, endpoint_type=endpoint_type) + entry_endpoints.append(entry_endpoint) + + entry = OpenStackServiceCatalogEntry(service_type=service_type, + endpoints=entry_endpoints) + entries.append(entry) + + return entries + + +class OpenStackServiceCatalogEntry(object): + def __init__(self, service_type, endpoints=None, service_name=None): + """ + :param service_type: Service type. + :type service_type: ``str`` + + :param endpoints: Endpoints belonging to this entry. + :type endpoints: ``list`` + + :param service_name: Optional service name. + :type service_name: ``str`` + """ + self.service_type = service_type + self.endpoints = endpoints or [] + self.service_name = service_name + + # For consistency, sort the endpoints + self.endpoints = sorted(self.endpoints, key=lambda x: x.url or '') + + def __eq__(self, other): + return (self.service_type == other.service_type and + self.endpoints == other.endpoints and + other.service_name == self.service_name) + + def __ne__(self, other): + return not self.__eq__(other=other) + + def __repr__(self): + return (('= 1) + self.assertTrue(osa.auth_token is not None) + + if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']: + self.assertTrue(osa.auth_token_expires is not None) + + if auth_version in ['2.0', '2.0_apikey', '2.0_password']: + self.assertTrue(osa.auth_user_info is not None) + + def test_token_expiration_and_force_reauthentication(self): + user_id = OPENSTACK_PARAMS[0] + key = OPENSTACK_PARAMS[1] + + connection = self._get_mock_connection(OpenStack_2_0_MockHttp) + auth_url = connection.auth_url + + yesterday = datetime.datetime.today() - datetime.timedelta(1) + tomorrow = datetime.datetime.today() + datetime.timedelta(1) + + osa = OpenStackIdentity_2_0_Connection(auth_url=auth_url, + user_id=user_id, + key=key, + parent_conn=connection) + + mocked_auth_method = Mock(wraps=osa._authenticate_2_0_with_body) + osa._authenticate_2_0_with_body = mocked_auth_method + + # Force re-auth, expired token + osa.auth_token = None + osa.auth_token_expires = yesterday + count = 5 + + for i in range(0, count): + osa.authenticate(force=True) + + self.assertEqual(mocked_auth_method.call_count, count) + + # No force reauth, expired token + osa.auth_token = None + osa.auth_token_expires = yesterday + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + self.assertEqual(mocked_auth_method.call_count, 1) + + # No force reauth, valid / non-expired token + osa.auth_token = None + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + osa.authenticate(force=False) + + if i == 0: + osa.auth_token_expires = tomorrow + + self.assertEqual(mocked_auth_method.call_count, 1) + + # No force reauth, valid / non-expired token which is about to expire in + # less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS + soon = datetime.datetime.utcnow() + \ + datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1) + osa.auth_token = None + + mocked_auth_method.call_count = 0 + self.assertEqual(mocked_auth_method.call_count, 0) + + for i in range(0, count): + if i == 0: + osa.auth_token_expires = soon + + osa.authenticate(force=False) + + self.assertEqual(mocked_auth_method.call_count, 1) + + def _get_mock_connection(self, mock_http_class, auth_url=None): + OpenStackBaseConnection.conn_classes = (mock_http_class, + mock_http_class) + + if auth_url is None: + auth_url = "https://auth.api.example.com" + + OpenStackBaseConnection.auth_url = auth_url + connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) + + connection._ex_force_base_url = "https://www.foo.com" + connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) + + return connection + + +class OpenStackIdentity_3_0_ConnectionTests(unittest.TestCase): + def setUp(self): + mock_cls = OpenStackIdentity_3_0_MockHttp + OpenStackIdentity_3_0_Connection.conn_classes = (mock_cls, mock_cls) + + self.auth_instance = OpenStackIdentity_3_0_Connection(auth_url='http://none', + user_id='test', + key='test') + self.auth_instance.auth_token = 'mock' + + def test_list_domains(self): + domains = self.auth_instance.list_domains() + self.assertEqual(len(domains), 1) + self.assertEqual(domains[0].id, 'default') + self.assertEqual(domains[0].name, 'Default') + self.assertTrue(domains[0].enabled) + + def test_list_projects(self): + projects = self.auth_instance.list_projects() + self.assertEqual(len(projects), 4) + self.assertEqual(projects[0].id, 'a') + self.assertEqual(projects[0].domain_id, 'default') + self.assertTrue(projects[0].enabled) + self.assertEqual(projects[0].description, 'Test project') + + def test_list_users(self): + users = self.auth_instance.list_users() + self.assertEqual(len(users), 12) + self.assertEqual(users[0].id, 'a') + self.assertEqual(users[0].domain_id, 'default') + self.assertEqual(users[0].enabled, True) + self.assertEqual(users[0].email, 'openstack-test@localhost') + + def test_list_roles(self): + roles = self.auth_instance.list_roles() + self.assertEqual(len(roles), 2) + self.assertEqual(roles[1].id, 'b') + self.assertEqual(roles[1].name, 'admin') + + def test_list_user_projects(self): + user = self.auth_instance.list_users()[0] + projects = self.auth_instance.list_user_projects(user=user) + self.assertEqual(len(projects), 0) + + def test_list_user_domain_roles(self): + user = self.auth_instance.list_users()[0] + domain = self.auth_instance.list_domains()[0] + roles = self.auth_instance.list_user_domain_roles(domain=domain, + user=user) + self.assertEqual(len(roles), 1) + self.assertEqual(roles[0].name, 'admin') + + def test_get_domain(self): + domain = self.auth_instance.get_domain(domain_id='default') + self.assertEqual(domain.name, 'Default') + + def test_create_user(self): + user = self.auth_instance.create_user(email='test2@localhost', password='test1', + name='test2', domain_id='default') + + self.assertEqual(user.id, 'c') + self.assertEqual(user.name, 'test2') + + def test_grant_role_to_user(self): + domain = self.auth_instance.list_domains()[0] + role = self.auth_instance.list_roles()[0] + user = self.auth_instance.list_users()[0] + + result = self.auth_instance.grant_role_to_user(domain=domain, + role=role, + user=user) + self.assertTrue(result) + + def test_revoke_role_from_user(self): + domain = self.auth_instance.list_domains()[0] + role = self.auth_instance.list_roles()[0] + user = self.auth_instance.list_users()[0] + + result = self.auth_instance.revoke_role_from_user(domain=domain, + role=role, + user=user) + self.assertTrue(result) + + +class OpenStackServiceCatalogTestCase(unittest.TestCase): + fixtures = ComputeFileFixtures('openstack') + + def test_parsing_auth_v1_1(self): + data = self.fixtures.load('_v1_1__auth.json') + data = json.loads(data) + service_catalog = data['auth']['serviceCatalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='1.0') + entries = catalog.get_entries() + self.assertEqual(len(entries), 3) + + entry = [e for e in entries if e.service_type == 'cloudFilesCDN'][0] + self.assertEqual(entry.service_type, 'cloudFilesCDN') + self.assertEqual(entry.service_name, None) + self.assertEqual(len(entry.endpoints), 2) + self.assertEqual(entry.endpoints[0].region, 'ORD') + self.assertEqual(entry.endpoints[0].url, + 'https://cdn2.clouddrive.com/v1/MossoCloudFS') + self.assertEqual(entry.endpoints[0].endpoint_type, 'external') + self.assertEqual(entry.endpoints[1].region, 'LON') + self.assertEqual(entry.endpoints[1].endpoint_type, 'external') + + def test_parsing_auth_v2(self): + data = self.fixtures.load('_v2_0__auth.json') + data = json.loads(data) + service_catalog = data['access']['serviceCatalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='2.0') + entries = catalog.get_entries() + self.assertEqual(len(entries), 6) + + entry = [e for e in entries if e.service_name == 'cloudServers'][0] + self.assertEqual(entry.service_type, 'compute') + self.assertEqual(entry.service_name, 'cloudServers') + self.assertEqual(len(entry.endpoints), 1) + self.assertEqual(entry.endpoints[0].region, None) + self.assertEqual(entry.endpoints[0].url, + 'https://servers.api.rackspacecloud.com/v1.0/1337') + self.assertEqual(entry.endpoints[0].endpoint_type, 'external') + + def test_parsing_auth_v3(self): + data = self.fixtures.load('_v3__auth.json') + data = json.loads(data) + service_catalog = data['token']['catalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='3.x') + entries = catalog.get_entries() + self.assertEqual(len(entries), 6) + entry = [e for e in entries if e.service_type == 'volume'][0] + self.assertEqual(entry.service_type, 'volume') + self.assertEqual(entry.service_name, None) + self.assertEqual(len(entry.endpoints), 3) + self.assertEqual(entry.endpoints[0].region, 'regionOne') + self.assertEqual(entry.endpoints[0].endpoint_type, 'external') + self.assertEqual(entry.endpoints[1].region, 'regionOne') + self.assertEqual(entry.endpoints[1].endpoint_type, 'admin') + self.assertEqual(entry.endpoints[2].region, 'regionOne') + self.assertEqual(entry.endpoints[2].endpoint_type, 'internal') + + def test_get_public_urls(self): + data = self.fixtures.load('_v2_0__auth.json') + data = json.loads(data) + service_catalog = data['access']['serviceCatalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='2.0') + + public_urls = catalog.get_public_urls(service_type='object-store') + expected_urls = ['https://storage101.lon1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111', + 'https://storage101.ord1.clouddrive.com/v1/MossoCloudFS_11111-111111111-1111111111-1111111'] + self.assertEqual(public_urls, expected_urls) + + def test_get_regions(self): + data = self.fixtures.load('_v2_0__auth.json') + data = json.loads(data) + service_catalog = data['access']['serviceCatalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='2.0') + + regions = catalog.get_regions(service_type='object-store') + self.assertEqual(regions, ['LON', 'ORD']) + + regions = catalog.get_regions(service_type='invalid') + self.assertEqual(regions, []) + + def test_get_service_types(self): + data = self.fixtures.load('_v2_0__auth.json') + data = json.loads(data) + service_catalog = data['access']['serviceCatalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='2.0') + service_types = catalog.get_service_types() + self.assertEqual(service_types, ['compute', 'object-store', + 'rax:object-cdn']) + + service_types = catalog.get_service_types(region='ORD') + self.assertEqual(service_types, ['rax:object-cdn']) + + def test_get_service_names(self): + data = self.fixtures.load('_v2_0__auth.json') + data = json.loads(data) + service_catalog = data['access']['serviceCatalog'] + + catalog = OpenStackServiceCatalog(service_catalog=service_catalog, + auth_version='2.0') + + service_names = catalog.get_service_names() + self.assertEqual(service_names, ['cloudFiles', 'cloudFilesCDN', + 'cloudServers', + 'cloudServersOpenStack', + 'cloudServersPreprod', + 'nova']) + + service_names = catalog.get_service_names(service_type='compute') + self.assertEqual(service_names, ['cloudServers', + 'cloudServersOpenStack', + 'cloudServersPreprod', + 'nova']) + + +class OpenStackIdentity_3_0_MockHttp(MockHttp): + fixtures = ComputeFileFixtures('openstack_identity') + json_content_headers = {'content-type': 'application/json; charset=UTF-8'} + + def _v3_domains(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('v3_domains.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v3_projects(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('v3_projects.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v3_users(self, method, url, body, headers): + if method == 'GET': + # list users + body = self.fixtures.load('v3_users.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + elif method == 'POST': + # create user + body = self.fixtures.load('v3_create_user.json') + return (httplib.CREATED, body, self.json_content_headers, + httplib.responses[httplib.CREATED]) + raise NotImplementedError() + + def _v3_roles(self, method, url, body, headers): + if method == 'GET': + body = self.fixtures.load('v3_roles.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v3_domains_default_users_a_roles_a(self, method, url, body, headers): + if method == 'PUT': + # grant role + body = '' + return (httplib.NO_CONTENT, body, self.json_content_headers, + httplib.responses[httplib.NO_CONTENT]) + elif method == 'DELETE': + # revoke role + body = '' + return (httplib.NO_CONTENT, body, self.json_content_headers, + httplib.responses[httplib.NO_CONTENT]) + raise NotImplementedError() + + def _v3_domains_default(self, method, url, body, headers): + if method == 'GET': + # get domain + body = self.fixtures.load('v3_domains_default.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v3_users_a_projects(self, method, url, body, headers): + if method == 'GET': + # get user projects + body = self.fixtures.load('v3_users_a_projects.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + def _v3_domains_default_users_a_roles(self, method, url, body, headers): + if method == 'GET': + # get user domain roles + body = self.fixtures.load('v3_domains_default_users_a_roles.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/compute/fixtures/openstack/_v3__auth.json b/libcloud/test/compute/fixtures/openstack/_v3__auth.json new file mode 100644 index 0000000000..de78703139 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack/_v3__auth.json @@ -0,0 +1,182 @@ +{ + "token": { + "methods": [ + "password" + ], + "roles": [ + { + "id": "9fe2ff9ee4384b1894a90878d3e92bab", + "name": "_member_" + }, + { + "id": "b258b68172db4403892320f784c4d503", + "name": "admin" + } + ], + "expires_at": "2014-08-10T19:15:57.096078Z", + "project": { + "domain": { + "id": "default", + "name": "Default" + }, + "id": "9c4693dce56b493b9b83197d900f7fba", + "name": "admin" + }, + "catalog": [ + { + "endpoints": [ + { + "url": "http://controller:8774/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "internal", + "id": "b3bfb29033ff4add9c97e523e1022794" + }, + { + "url": "http://192.168.18.100:8774/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "admin", + "id": "b52ee215ded7473f94a46512cb94dbf1" + }, + { + "url": "http://192.168.18.100:8774/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "public", + "id": "ca8a6e39b9334300bf036c0c4226a173" + } + ], + "type": "compute", + "id": "03f123b2253e4852a86b994f86489c0a" + }, + { + "endpoints": [ + { + "url": "http://192.168.18.100:8776/v1/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "public", + "id": "20bf617f334c4bcf82746820f5006599" + }, + { + "url": "http://192.168.18.100:8776/v1/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "admin", + "id": "2da639c26463424fa9775e0bf4e9f29e" + }, + { + "url": "http://controller:8776/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "internal", + "id": "d568ed6f8c5a4649a6e68b7bcb86694b" + } + ], + "type": "volume", + "id": "47f77ba8f3864a03b66024e910ad7247" + }, + { + "endpoints": [ + { + "url": "http://192.168.18.100:9696", + "region": "regionOne", + "interface": "admin", + "id": "720303f92f81404aa80caa32cd9c7d23" + }, + { + "url": "http://192.168.18.100:9696", + "region": "regionOne", + "interface": "public", + "id": "8823b9edba354bb6bdc944a6b3bb5404" + }, + { + "url": "http://controller:9696", + "region": "regionOne", + "interface": "internal", + "id": "c2a522538037492dbec2173f271ecb32" + } + ], + "type": "network", + "id": "9bd61e09d372427f81eca9328f33c510" + }, + { + "endpoints": [ + { + "url": "http://controller:5000/v2.0", + "region": "regionOne", + "interface": "internal", + "id": "802622da0a874cac8fe2ec7a02d87c44" + }, + { + "url": "http://192.168.18.100:35357/v2.0", + "region": "regionOne", + "interface": "admin", + "id": "8a4eed85ddc748b18cc2b92e64291eb5" + }, + { + "url": "http://192.168.18.100:5000/v2.0", + "region": "regionOne", + "interface": "public", + "id": "9ef69c1600a944b9904f34efb6dc67eb" + } + ], + "type": "identity", + "id": "aef833a14f4240d0bbb699f0154add8e" + }, + { + "endpoints": [ + { + "url": "http://192.168.18.100:9292", + "region": "regionOne", + "interface": "public", + "id": "1aa84aebd3e2467e898e3c18428e3feb" + }, + { + "url": "http://192.168.18.100:9292", + "region": "regionOne", + "interface": "admin", + "id": "3f6aa4ffd0ec47d2862eee1648993bef" + }, + { + "url": "http://192.168.200.1:9292", + "region": "regionOne", + "interface": "internal", + "id": "9f66f90af36949479a6365680afabe12" + } + ], + "type": "image", + "id": "c0be10ea61e240f99567f328b9adf3d6" + }, + { + "endpoints": [ + { + "url": "http://192.168.18.100:8776/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "public", + "id": "6c6b0990ccf84f1890e404fddad7b6e5" + }, + { + "url": "http://192.168.18.100:8776/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "admin", + "id": "ab0f0bd770494d4399036867935c52ea" + }, + { + "url": "http://controller:8776/v2/9c4693dce56b493b9b83197d900f7fba", + "region": "regionOne", + "interface": "internal", + "id": "fc7c82deda034e52a544da7d00cd28de" + } + ], + "type": "volumev2", + "id": "e097b64d701e4ce29f2a69eed4e69856" + } + ], + "extras": {}, + "user": { + "domain": { + "id": "default", + "name": "Default" + }, + "id": "55fba80f022b4855acfc700ae13b2b24", + "name": "admin" + }, + "issued_at": "2014-08-10T18:15:57.096107Z" + } +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_create_user.json b/libcloud/test/compute/fixtures/openstack_identity/v3_create_user.json new file mode 100644 index 0000000000..82445dc16b --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_create_user.json @@ -0,0 +1,12 @@ +{ + "user": { + "name": "test2", + "links": { + "self": "http://192.168.18.100:5000/v3/users/c" + }, + "domain_id": "default", + "enabled": true, + "email": "test2@localhost", + "id": "c" + } +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_domains.json b/libcloud/test/compute/fixtures/openstack_identity/v3_domains.json new file mode 100644 index 0000000000..7813acf2cb --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_domains.json @@ -0,0 +1,18 @@ +{ + "domains": [ + { + "links": { + "self": "http://192.168.18.100:5000/v3/domains/default" + }, + "enabled": true, + "description": "Owns users and tenants (i.e. projects) available on Identity API v2.", + "name": "Default", + "id": "default" + } + ], + "links": { + "self": "http://192.168.18.100:5000/v3/domains", + "previous": null, + "next": null + } +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_domains_default.json b/libcloud/test/compute/fixtures/openstack_identity/v3_domains_default.json new file mode 100644 index 0000000000..f00230fe43 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_domains_default.json @@ -0,0 +1,11 @@ +{ + "domain": { + "links": { + "self": "http://192.168.18.100:5000/v3/domains/default" + }, + "enabled": true, + "description": "Owns users and tenants (i.e. projects) available on Identity API v2.", + "name": "Default", + "id": "default" + } +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_domains_default_users_a_roles.json b/libcloud/test/compute/fixtures/openstack_identity/v3_domains_default_users_a_roles.json new file mode 100644 index 0000000000..03120727cc --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_domains_default_users_a_roles.json @@ -0,0 +1,16 @@ +{ + "links": { + "self": "http://192.168.18.100:5000/v3/domains/default/users/a/roles", + "previous": null, + "next": null + }, + "roles": [ + { + "id": "d", + "links": { + "self": "http://192.168.18.100:5000/v3/roles/d" + }, + "name": "admin" + } + ] +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_projects.json b/libcloud/test/compute/fixtures/openstack_identity/v3_projects.json new file mode 100644 index 0000000000..16acbce989 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_projects.json @@ -0,0 +1,49 @@ +{ + "links": { + "self": "http://192.168.18.100:5000/v3/projects", + "previous": null, + "next": null + }, + "projects": [ + { + "description": "Test project", + "links": { + "self": "http://192.168.18.100:5000/v3/projects/a" + }, + "enabled": true, + "id": "a", + "domain_id": "default", + "name": "divvy" + }, + { + "description": "Admin Tenant", + "links": { + "self": "http://192.168.18.100:5000/v3/projects/b" + }, + "enabled": true, + "id": "b", + "domain_id": "default", + "name": "admin" + }, + { + "description": "Initial tenant", + "links": { + "self": "http://192.168.18.100:5000/v3/projects/c" + }, + "enabled": true, + "id": "c", + "domain_id": "default", + "name": "first-tenant" + }, + { + "description": "Service Tenant", + "links": { + "self": "http://192.168.18.100:5000/v3/projects/d" + }, + "enabled": true, + "id": "d", + "domain_id": "default", + "name": "service" + } + ] +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_roles.json b/libcloud/test/compute/fixtures/openstack_identity/v3_roles.json new file mode 100644 index 0000000000..57854845fd --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_roles.json @@ -0,0 +1,25 @@ +{ + "links": { + "self": "http://192.168.18.100:5000/v3/roles", + "previous": null, + "next": null + }, + "roles": [ + { + "links": { + "self": "http://192.168.18.100:5000/v3/roles/a" + }, + "enabled": "True", + "description": "Default role for project membership", + "name": "_member_", + "id": "a" + }, + { + "id": "b", + "links": { + "self": "http://192.168.18.100:5000/v3/roles/b" + }, + "name": "admin" + } + ] +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_users.json b/libcloud/test/compute/fixtures/openstack_identity/v3_users.json new file mode 100644 index 0000000000..d4763eaf2f --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_users.json @@ -0,0 +1,131 @@ +{ + "users": [ + { + "name": "cloud", + "links": { + "self": "http://192.168.18.100:5000/v3/users/a" + }, + "domain_id": "default", + "enabled": true, + "email": "openstack-test@localhost", + "id": "a" + }, + { + "name": "trove", + "links": { + "self": "http://192.168.18.100:5000/v3/users/1ec8cface8614a2786c99c87c7116f09" + }, + "domain_id": "default", + "enabled": true, + "email": "trove@localhost", + "id": "1ec8cface8614a2786c99c87c7116f09" + }, + { + "domain_id": "default", + "name": "tomaz", + "links": { + "self": "http://192.168.18.100:5000/v3/users/458f20357227462e8a17355628984515" + }, + "id": "458f20357227462e8a17355628984515", + "enabled": true, + "email": "tomaz@tomaz.me", + "default_project_id": "3130562cafe147f289bbb3b557f2e7ed" + }, + { + "name": "admin", + "links": { + "self": "http://192.168.18.100:5000/v3/users/55fba80f022b4855acfc700ae13b2b24" + }, + "domain_id": "default", + "enabled": true, + "email": "openstack-test@localhost", + "id": "55fba80f022b4855acfc700ae13b2b24" + }, + { + "name": "nova", + "links": { + "self": "http://192.168.18.100:5000/v3/users/679c69c6cfd049ebb3ad85734dfea61a" + }, + "domain_id": "default", + "enabled": true, + "email": "nova@localhost", + "id": "679c69c6cfd049ebb3ad85734dfea61a" + }, + { + "name": "glance", + "links": { + "self": "http://192.168.18.100:5000/v3/users/6f10443170ad4daf81bff251944da9d7" + }, + "domain_id": "default", + "enabled": true, + "email": "glance@localhost", + "id": "6f10443170ad4daf81bff251944da9d7" + }, + { + "name": "ceilometer", + "links": { + "self": "http://192.168.18.100:5000/v3/users/747cf6a2bf75453f847f307823e8eac9" + }, + "domain_id": "default", + "enabled": true, + "email": "ceilometer@localhost", + "id": "747cf6a2bf75453f847f307823e8eac9" + }, + { + "name": "swift", + "links": { + "self": "http://192.168.18.100:5000/v3/users/a0081ee2f9674458bcd7731b6dcbf5f9" + }, + "domain_id": "default", + "enabled": true, + "email": "swift@localhost", + "id": "a0081ee2f9674458bcd7731b6dcbf5f9" + }, + { + "domain_id": "default", + "name": "amann", + "links": { + "self": "http://192.168.18.100:5000/v3/users/aafb942e4c3d4fe1ba33088f3e7c5996" + }, + "id": "aafb942e4c3d4fe1ba33088f3e7c5996", + "enabled": true, + "email": "andrew@divvycloud.com", + "default_project_id": "3130562cafe147f289bbb3b557f2e7ed" + }, + { + "name": "heat", + "links": { + "self": "http://192.168.18.100:5000/v3/users/d16cad10e4f648e0bd9ab5a0359f6736" + }, + "domain_id": "default", + "enabled": true, + "email": "heat@localhost", + "id": "d16cad10e4f648e0bd9ab5a0359f6736" + }, + { + "name": "cinder", + "links": { + "self": "http://192.168.18.100:5000/v3/users/f3a4590b0d66497894ee9a3d73c09a95" + }, + "domain_id": "default", + "enabled": true, + "email": "cinder@localhost", + "id": "f3a4590b0d66497894ee9a3d73c09a95" + }, + { + "name": "neutron", + "links": { + "self": "http://192.168.18.100:5000/v3/users/fa9a7fedeb2844ff89e37d70a2519441" + }, + "domain_id": "default", + "enabled": true, + "email": "neutron@localhost", + "id": "fa9a7fedeb2844ff89e37d70a2519441" + } + ], + "links": { + "self": "http://192.168.18.100:5000/v3/users", + "previous": null, + "next": null + } +} diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_users_a_projects.json b/libcloud/test/compute/fixtures/openstack_identity/v3_users_a_projects.json new file mode 100644 index 0000000000..e117a8bc40 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_users_a_projects.json @@ -0,0 +1,8 @@ +{ + "links": { + "self": "http://192.168.18.100:5000/v3/users/a/projects", + "previous": null, + "next": null + }, + "projects": [] +} diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 77a38e9fbb..642b7a85b9 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -25,7 +25,7 @@ except ImportError: import json -from mock import Mock +from mock import Mock, patch from libcloud.utils.py3 import httplib from libcloud.utils.py3 import method_type @@ -33,9 +33,6 @@ from libcloud.common.types import InvalidCredsError, MalformedResponseError, \ LibcloudError -from libcloud.common.openstack import OpenStackBaseConnection -from libcloud.common.openstack import OpenStackAuthConnection -from libcloud.common.openstack import AUTH_TOKEN_EXPIRES_GRACE_SECONDS from libcloud.compute.types import Provider, KeyPairDoesNotExistError from libcloud.compute.providers import get_driver from libcloud.compute.drivers.openstack import ( @@ -85,243 +82,6 @@ def test_non_xml_content_type_handling(self): body, RESPONSE_BODY, "Non-XML body should be returned as is") -class OpenStackServiceCatalogTests(unittest.TestCase): - # TODO refactor and move into libcloud/test/common - - def setUp(self): - OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, - OpenStackMockHttp) - - connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) - connection.auth_url = "https://auth.api.example.com" - connection._ex_force_base_url = "https://www.foo.com" - connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) - - self.service_catalog = connection.get_service_catalog() - self.catalog = self.service_catalog.get_catalog() - - def test_connection_get_service_catalog(self): - endpoints = self.service_catalog.get_endpoints('cloudFilesCDN', 'cloudFilesCDN') - public_urls = self.service_catalog.get_public_urls('cloudFilesCDN', 'cloudFilesCDN') - - expected_urls = [ - 'https://cdn2.clouddrive.com/v1/MossoCloudFS', - 'https://cdn2.clouddrive.com/v1/MossoCloudFS' - ] - - self.assertTrue('cloudFilesCDN' in self.catalog) - self.assertEqual(len(endpoints), 2) - self.assertEqual(public_urls, expected_urls) - - def test_get_regions(self): - regions = self.service_catalog.get_regions() - self.assertEqual(sorted(regions), ['LON', 'ORD']) - - def test_get_service_types(self): - service_types = self.service_catalog.get_service_types() - self.assertEqual(sorted(service_types), ['compute', 'object-store', - 'rax:object-cdn']) - - service_types = self.service_catalog.get_service_types(region='invalid') - self.assertEqual(sorted(service_types), []) - - def test_get_service_names(self): - OpenStackBaseConnection.conn_classes = (OpenStack_2_0_MockHttp, - OpenStack_2_0_MockHttp) - OpenStackBaseConnection._auth_version = '2.0' - - connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) - connection.auth_url = "https://auth.api.example.com" - connection._ex_force_base_url = "https://www.foo.com" - connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) - - service_catalog = connection.get_service_catalog() - - service_names = service_catalog.get_service_names(service_type='object-store') - self.assertEqual(service_names, ['cloudFiles']) - - -class OpenStackAuthConnectionTests(unittest.TestCase): - # TODO refactor and move into libcloud/test/common - - def setUp(self): - OpenStackBaseConnection.auth_url = None - OpenStackBaseConnection.conn_classes = (OpenStackMockHttp, - OpenStackMockHttp) - - def test_auth_url_is_correctly_assembled(self): - tuples = [ - ('1.0', OpenStackMockHttp), - ('1.1', OpenStackMockHttp), - ('2.0', OpenStack_2_0_MockHttp), - ('2.0_apikey', OpenStack_2_0_MockHttp), - ('2.0_password', OpenStack_2_0_MockHttp) - ] - - APPEND = 0 - NOTAPPEND = 1 - - auth_urls = [ - ('https://auth.api.example.com', APPEND, ''), - ('https://auth.api.example.com/', NOTAPPEND, '/'), - ('https://auth.api.example.com/foo/bar', NOTAPPEND, '/foo/bar'), - ('https://auth.api.example.com/foo/bar/', NOTAPPEND, '/foo/bar/') - ] - - actions = { - '1.0': '/v1.0', - '1.1': '/v1.1/auth', - '2.0': '/v2.0/tokens', - '2.0_apikey': '/v2.0/tokens', - '2.0_password': '/v2.0/tokens' - } - - user_id = OPENSTACK_PARAMS[0] - key = OPENSTACK_PARAMS[1] - - for (auth_version, mock_http_class) in tuples: - for (url, should_append_default_path, expected_path) in auth_urls: - connection = \ - self._get_mock_connection(mock_http_class=mock_http_class, - auth_url=url) - - auth_url = connection.auth_url - - osa = OpenStackAuthConnection(connection, - auth_url, - auth_version, - user_id, key) - - try: - osa = osa.authenticate() - except: - pass - - if (should_append_default_path == APPEND): - expected_path = actions[auth_version] - - self.assertEqual(osa.action, expected_path) - - def test_basic_authentication(self): - tuples = [ - ('1.0', OpenStackMockHttp), - ('1.1', OpenStackMockHttp), - ('2.0', OpenStack_2_0_MockHttp), - ('2.0_apikey', OpenStack_2_0_MockHttp), - ('2.0_password', OpenStack_2_0_MockHttp) - ] - - user_id = OPENSTACK_PARAMS[0] - key = OPENSTACK_PARAMS[1] - - for (auth_version, mock_http_class) in tuples: - connection = \ - self._get_mock_connection(mock_http_class=mock_http_class) - auth_url = connection.auth_url - - osa = OpenStackAuthConnection(connection, auth_url, auth_version, - user_id, key) - - self.assertEqual(osa.urls, {}) - self.assertEqual(osa.auth_token, None) - self.assertEqual(osa.auth_user_info, None) - osa = osa.authenticate() - - self.assertTrue(len(osa.urls) >= 1) - self.assertTrue(osa.auth_token is not None) - - if auth_version in ['1.1', '2.0', '2.0_apikey', '2.0_password']: - self.assertTrue(osa.auth_token_expires is not None) - - if auth_version in ['2.0', '2.0_apikey', '2.0_password']: - self.assertTrue(osa.auth_user_info is not None) - - def test_token_expiration_and_force_reauthentication(self): - user_id = OPENSTACK_PARAMS[0] - key = OPENSTACK_PARAMS[1] - - connection = self._get_mock_connection(OpenStack_2_0_MockHttp) - auth_url = connection.auth_url - auth_version = '2.0' - - yesterday = datetime.datetime.today() - datetime.timedelta(1) - tomorrow = datetime.datetime.today() + datetime.timedelta(1) - - osa = OpenStackAuthConnection(connection, auth_url, auth_version, - user_id, key) - - mocked_auth_method = Mock(wraps=osa.authenticate_2_0_with_body) - osa.authenticate_2_0_with_body = mocked_auth_method - - # Force re-auth, expired token - osa.auth_token = None - osa.auth_token_expires = yesterday - count = 5 - - for i in range(0, count): - osa.authenticate(force=True) - - self.assertEqual(mocked_auth_method.call_count, count) - - # No force reauth, expired token - osa.auth_token = None - osa.auth_token_expires = yesterday - - mocked_auth_method.call_count = 0 - self.assertEqual(mocked_auth_method.call_count, 0) - - for i in range(0, count): - osa.authenticate(force=False) - - self.assertEqual(mocked_auth_method.call_count, 1) - - # No force reauth, valid / non-expired token - osa.auth_token = None - - mocked_auth_method.call_count = 0 - self.assertEqual(mocked_auth_method.call_count, 0) - - for i in range(0, count): - osa.authenticate(force=False) - - if i == 0: - osa.auth_token_expires = tomorrow - - self.assertEqual(mocked_auth_method.call_count, 1) - - # No force reauth, valid / non-expired token which is about to expire in - # less than AUTH_TOKEN_EXPIRES_GRACE_SECONDS - soon = datetime.datetime.utcnow() + \ - datetime.timedelta(seconds=AUTH_TOKEN_EXPIRES_GRACE_SECONDS - 1) - osa.auth_token = None - - mocked_auth_method.call_count = 0 - self.assertEqual(mocked_auth_method.call_count, 0) - - for i in range(0, count): - if i == 0: - osa.auth_token_expires = soon - - osa.authenticate(force=False) - - self.assertEqual(mocked_auth_method.call_count, 1) - - def _get_mock_connection(self, mock_http_class, auth_url=None): - OpenStackBaseConnection.conn_classes = (mock_http_class, - mock_http_class) - - if auth_url is None: - auth_url = "https://auth.api.example.com" - - OpenStackBaseConnection.auth_url = auth_url - connection = OpenStackBaseConnection(*OPENSTACK_PARAMS) - - connection._ex_force_base_url = "https://www.foo.com" - connection.driver = OpenStack_1_0_NodeDriver(*OPENSTACK_PARAMS) - - return connection - - class OpenStack_1_0_Tests(unittest.TestCase, TestCaseMixin): should_list_locations = False should_list_volumes = False @@ -356,7 +116,8 @@ def get_endpoint(*args, **kwargs): self.driver.connection._populate_hosts_and_request_paths() clear_pricing_data() - def test_populate_hosts_and_requests_path(self): + @patch('libcloud.common.openstack.OpenStackServiceCatalog') + def test_populate_hosts_and_requests_path(self, _): tomorrow = datetime.datetime.today() + datetime.timedelta(1) cls = self.driver_klass.connectionCls @@ -364,7 +125,7 @@ def test_populate_hosts_and_requests_path(self): # Test authentication and token re-use con = cls('username', 'key') - osa = con._osa + osa = con.get_auth_class() mocked_auth_method = Mock() osa.authenticate = mocked_auth_method @@ -385,7 +146,7 @@ def test_populate_hosts_and_requests_path(self): # ex_force_auth_token provided, authenticate should never be called con = cls('username', 'key', ex_force_base_url='http://ponies', ex_force_auth_token='1234') - osa = con._osa + osa = con.get_auth_class() mocked_auth_method = Mock() osa.authenticate = mocked_auth_method diff --git a/libcloud/test/storage/test_cloudfiles.py b/libcloud/test/storage/test_cloudfiles.py index 84ec615931..70e52c80bc 100644 --- a/libcloud/test/storage/test_cloudfiles.py +++ b/libcloud/test/storage/test_cloudfiles.py @@ -138,16 +138,6 @@ def test_service_catalog(self): self.driver.connection.get_endpoint()) self.driver.connection.cdn_request = False - def test_endpoint_pointer(self): - kwargs = {'use_internal_url': False} - driver = CloudFilesStorageDriver('driver', 'dummy', **kwargs) - self.assertEquals(driver.connection._get_endpoint_key(), libcloud.storage.drivers.cloudfiles.PUBLIC_ENDPOINT_KEY) - kwargs = {'use_internal_url': True} - driver = CloudFilesStorageDriver('driver', 'dummy', **kwargs) - self.assertEquals(driver.connection._get_endpoint_key(), libcloud.storage.drivers.cloudfiles.INTERNAL_ENDPOINT_KEY) - driver.connection.cdn_request = True - self.assertEquals(driver.connection._get_endpoint_key(), libcloud.storage.drivers.cloudfiles.PUBLIC_ENDPOINT_KEY) - def test_list_containers(self): CloudFilesMockHttp.type = 'EMPTY' containers = self.driver.list_containers() From 087da806768a1f950c888a416256b2f6489b76a6 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 13 Aug 2014 15:03:56 +0200 Subject: [PATCH 151/315] Simplify code, remove outdated comments. --- libcloud/storage/drivers/cloudfiles.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index d1f88a3550..b0694acfef 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -210,10 +210,7 @@ def get_endpoint(self): if self.cdn_request: ep = cdn_ep - if not ep: - raise LibcloudError('Could not find specified endpoint') - - if not ep.url: + if not ep or not ep.url: raise LibcloudError('Could not find specified endpoint') return ep.url From 44945ed403530af5df20dec9e43698db036153d8 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 13 Aug 2014 16:22:50 +0200 Subject: [PATCH 152/315] Add list_supported_versions to base OpenStackIdentityConnection class and parse roles which are assigned to the user which is used to authenticate. --- libcloud/common/openstack.py | 3 +- libcloud/common/openstack_identity.py | 59 +++++++++++++++++++ .../test/common/test_openstack_identity.py | 19 ++++++ .../openstack_identity/v3_versions.json | 58 ++++++++++++++++++ 4 files changed, 137 insertions(+), 2 deletions(-) create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_versions.json diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 80ca08f134..0a11d4400b 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -28,9 +28,9 @@ from libcloud.common.types import ProviderError from libcloud.compute.types import (LibcloudError, MalformedResponseError) from libcloud.compute.types import KeyPairDoesNotExistError +from libcloud.common.openstack_identity import get_class_for_auth_version # Imports for backward compatibility reasons -from libcloud.common.openstack_identity import get_class_for_auth_version from libcloud.common.openstack_identity import OpenStackServiceCatalog @@ -246,7 +246,6 @@ def get_endpoint(self): name=service_name, region=service_region) - # TODO: Normalize keys for different auth versions and use an object url = endpoint.url if not url: diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index 5fdd1868aa..0e11a0be00 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -53,6 +53,7 @@ __all__ = [ + 'OpenStackIdentityVersion', 'OpenStackIdentityDomain', 'OpenStackIdentityProject', 'OpenStackIdentityUser', @@ -72,6 +73,19 @@ ] +class OpenStackIdentityVersion(object): + def __init__(self, version, status, updated, url): + self.version = version + self.status = status + self.updated = updated + self.url = url + + def __repr__(self): + return ((' Date: Wed, 13 Aug 2014 16:30:20 +0200 Subject: [PATCH 153/315] Remove now unused and not needed OpenStackAuthConnection class. --- libcloud/common/openstack_identity.py | 81 --------------------------- 1 file changed, 81 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index 0e11a0be00..52e17e00df 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -683,87 +683,6 @@ def _is_authentication_needed(self, force=False): return True -class OpenStackAuthConnection(OpenStackIdentityConnection): - """ - Note: This class is only here for backward compatibility reasons. - """ - responseCls = OpenStackAuthResponse - name = 'OpenStack Auth' - timeout = None - - def __init__(self, parent_conn, auth_url, auth_version, user_id, key, - tenant_name=None, timeout=None): - super(OpenStackAuthConnection, self).__init__(auth_url=auth_url, - user_id=user_id, - key=key, - tenant_name=tenant_name, - timeout=timeout, - parent_conn=parent_conn) - self.auth_version = auth_version - self._instance_cache = {} - - def _get_cls_for_auth_version(self, auth_version): - if auth_version == '1.0': - cls = OpenStackIdentity_1_0_Connection - elif auth_version == '1.1': - cls = OpenStackIdentity_1_1_Connection - elif auth_version == '2.0' or auth_version == '2.0_apikey': - cls = OpenStackIdentity_2_0_Connection - elif auth_version == '2.0_password': - cls = OpenStackIdentity_2_0_Connection - elif auth_version == '3.x_password': - cls = OpenStackIdentity_3_0_Connection - else: - raise LibcloudError('Unsupported Auth Version requested') - - return cls - - def _get_instance_for_auth_version(self, auth_version): - """ - Retrieve instance for the provided auth version for the local cache (if - exists). - """ - # TODO: Just delegate to the new classes - kwargs = {'auth_url': self.auth_url, 'user_id': self.user_id, - 'key': self.key, 'tenant_name': self.tenant_name, - 'timeout': self.timeout, 'parent_conn': self.parent_conn} - - cls = self._get_cls_for_auth_version(auth_version=auth_version) - - if auth_version not in self._instance_cache: - obj = cls(**kwargs) - self._instance_cache[auth_version] = obj - - return self._instance_cache[auth_version] - - def authenticate(self, force=False): - """ - Authenticate against the keystone api. - - :param force: Forcefully update the token even if it's already cached - and still valid. - :type force: ``bool`` - """ - if not self._is_authentication_needed(force=force): - return self - - obj = self._get_instance_for_auth_version( - auth_version=self.auth_version) - - try: - obj.authenticate() - finally: - self.action = obj.action - - # For backward compatibility, re-assign attributes to this class - self.auth_token = obj.auth_token - self.auth_token_expires = obj.auth_token_expires - self.urls = obj.urls - self.auth_user_info = obj.auth_user_info - - return self - - class OpenStackIdentity_1_0_Connection(OpenStackIdentityConnection): """ Connection class for Keystone API v1.0. From 2871ea83d4a27ca29ec6898400678bc289c27da4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 13 Aug 2014 17:39:21 +0200 Subject: [PATCH 154/315] Default variable to None. --- libcloud/common/openstack_identity.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index 52e17e00df..ddd1f8dc6a 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -871,6 +871,7 @@ def __init__(self, auth_url, user_id, key, tenant_name=None, timeout=None, tenant_name=tenant_name, timeout=timeout, parent_conn=parent_conn) + self.auth_user_roles = None def authenticate(self, force=False): """ From 5af1bf3635435ea84becc4365d2ceba7d8462939 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 13 Aug 2014 19:13:08 +0200 Subject: [PATCH 155/315] Use an enum class for identity endpoint type. --- libcloud/common/openstack_identity.py | 37 +++++++++++++++++++-------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index ddd1f8dc6a..b9723fca1b 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -62,6 +62,7 @@ 'OpenStackServiceCatalog', 'OpenStackServiceCatalogEntry', 'OpenStackServiceCatalogEntryEndpoint', + 'OpenStackIdentityEndpointType', 'OpenStackIdentityConnection', 'OpenStackIdentity_1_0_Connection', @@ -73,6 +74,15 @@ ] +class OpenStackIdentityEndpointType(object): + """ + Enum class for openstack identity endpoint type. + """ + INTERNAL = 'internal' + EXTERNAL = 'external' + ADMIN = 'admin' + + class OpenStackIdentityVersion(object): def __init__(self, version, status, updated, url): self.version = version @@ -199,7 +209,8 @@ def get_public_urls(self, service_type=None, name=None): result = [] for endpoint in endpoints: - if endpoint.endpoint_type == 'external': + endpoint_type = endpoint.endpoint_type + if endpoint_type == OpenStackIdentityEndpointType.EXTERNAL: result.append(endpoint.url) return result @@ -225,7 +236,7 @@ def get_endpoints(self, service_type=None, name=None): return endpoints def get_endpoint(self, service_type=None, name=None, region=None, - endpoint_type='external'): + endpoint_type=OpenStackIdentityEndpointType.EXTERNAL): """ Retrieve a single endpoint using the provided criteria. @@ -346,13 +357,13 @@ def _parse_service_catalog_auth_v1(self, service_catalog): if public_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=public_url, - endpoint_type='external') + endpoint_type=OpenStackIdentityEndpointType.EXTERNAL) entry_endpoints.append(entry_endpoint) if private_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=private_url, - endpoint_type='internal') + endpoint_type=OpenStackIdentityEndpointType.INTERNAL) entry_endpoints.append(entry_endpoint) entry = OpenStackServiceCatalogEntry(service_type=service, @@ -378,13 +389,13 @@ def _parse_service_catalog_auth_v2(self, service_catalog): if public_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=public_url, - endpoint_type='external') + endpoint_type=OpenStackIdentityEndpointType.EXTERNAL) entry_endpoints.append(entry_endpoint) if private_url: entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=private_url, - endpoint_type='internal') + endpoint_type=OpenStackIdentityEndpointType.INTERNAL) entry_endpoints.append(entry_endpoint) entry = OpenStackServiceCatalogEntry(service_type=service_type, @@ -407,11 +418,11 @@ def _parse_service_catalog_auth_v3(self, service_catalog): endpoint_type = endpoint['interface'] if endpoint_type == 'internal': - endpoint_type = 'internal' + endpoint_type = OpenStackIdentityEndpointType.INTERNAL elif endpoint_type == 'public': - endpoint_type = 'external' + endpoint_type = OpenStackIdentityEndpointType.EXTERNAL elif endpoint_type == 'admin': - endpoint_type = 'admin' + endpoint_type = OpenStackIdentityEndpointType.ADMIN entry_endpoint = OpenStackServiceCatalogEntryEndpoint( region=region, url=url, endpoint_type=endpoint_type) @@ -458,6 +469,12 @@ def __repr__(self): class OpenStackServiceCatalogEntryEndpoint(object): + VALID_ENDPOINT_TYPES = [ + OpenStackIdentityEndpointType.INTERNAL, + OpenStackIdentityEndpointType.EXTERNAL, + OpenStackIdentityEndpointType.ADMIN, + ] + def __init__(self, region, url, endpoint_type='external'): """ :param region: Endpoint region. @@ -469,7 +486,7 @@ def __init__(self, region, url, endpoint_type='external'): :param endpoint_type: Endpoint type (external / internal / admin). :type endpoint_type: ``str`` """ - if endpoint_type not in ['internal', 'external', 'admin']: + if endpoint_type not in self.VALID_ENDPOINT_TYPES: raise ValueError('Invalid type: %s' % (endpoint_type)) # TODO: Normalize / lowercase all the region names From 9910bd5d85db29e79366e010db9685db61d1fa32 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 11:06:44 +0200 Subject: [PATCH 156/315] Allow user to specify a default_project_id when creating a user. --- libcloud/common/openstack_identity.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index b9723fca1b..dec8f431dd 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -1087,11 +1087,8 @@ def revoke_role_from_user(self, domain, user, role): response = self.authenticated_request(path, method='DELETE') return response.status == httplib.NO_CONTENT - def create_domain(self): - pass - def create_user(self, email, password, name, description=None, - domain_id=None, enabled=True): + domain_id=None, default_project_id=None, enabled=True): """ Create a new user account. @@ -1110,6 +1107,9 @@ def create_user(self, email, password, name, description=None, :param domain_id: ID of the domain to add the user to (optional). :type domain_id: ``str`` + :param default_project_id: ID of the default user project (optional). + :type default_project_id: ``str`` + :param enabled: True to enable user after creation. :type enabled: ``bool`` @@ -1129,6 +1129,9 @@ def create_user(self, email, password, name, description=None, if domain_id: data['domain_id'] = domain_id + if default_project_id: + data['default_project_id'] = default_project_id + data = json.dumps({'user': data}) response = self.authenticated_request('/v3/users', data=data, method='POST') From 23d4355a1c86cefdeb04936265a820723a3f5c15 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 11:30:08 +0200 Subject: [PATCH 157/315] Allow user to scope a token to either project or a domain by passing "scope_to" (and "domain_name") argument to Identity v3 class constructor. --- libcloud/common/openstack_identity.py | 61 ++++++++++++++----- .../test/common/test_openstack_identity.py | 49 ++++++++++++++- 2 files changed, 92 insertions(+), 18 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index dec8f431dd..f2082581f3 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -879,8 +879,24 @@ class OpenStackIdentity_3_0_Connection(OpenStackIdentityConnection): name = 'OpenStack Identity API v3.x' auth_version = '3.0' - def __init__(self, auth_url, user_id, key, tenant_name=None, timeout=None, - parent_conn=None): + def __init__(self, auth_url, user_id, key, tenant_name=None, + domain_name='Default', scope_to='project', + timeout=None, parent_conn=None): + """ + :param tenant_name: Name of the project this user belongs to. Note: + When scope_to is set to project, this argument + control to which project to scope the token to. + :type tenant_name: ``str`` + + :param domain_name: Domain the user belongs to. Note: Then scope_to + is set to token, this argument controls to which + domain to scope the token to. + :type domain_name: ``str`` + + :param scope_to: Whether to scope a token to a "project" or a + "domain" + :type scope_to: ``str`` + """ super(OpenStackIdentity_3_0_Connection, self).__init__(auth_url=auth_url, user_id=user_id, @@ -888,6 +904,19 @@ def __init__(self, auth_url, user_id, key, tenant_name=None, timeout=None, tenant_name=tenant_name, timeout=timeout, parent_conn=parent_conn) + if scope_to not in ['project', 'domain']: + raise ValueError('Invalid value for "scope_to" argument: %s' % + (scope_to)) + + if scope_to == 'project' and (not tenant_name or not domain_name): + raise ValueError('Must provide tenant_name and domain_name ' + 'argument') + elif scope_to == 'domain' and not domain_name: + raise ValueError('Must provide domain_name argument') + + self.tenant_name = tenant_name + self.domain_name = domain_name + self.scope_to = scope_to self.auth_user_roles = None def authenticate(self, force=False): @@ -897,9 +926,6 @@ def authenticate(self, force=False): if not self._is_authentication_needed(force=force): return self - # TODO: Support for custom domain - domain = 'Default' - data = { 'auth': { 'identity': { @@ -907,33 +933,36 @@ def authenticate(self, force=False): 'password': { 'user': { 'domain': { - 'name': domain + 'name': self.domain_name }, 'name': self.user_id, 'password': self.key } } - }, - 'scope': { - 'project': { - 'domain': { - 'name': domain - }, - 'name': self.tenant_name - } } } } - if self.tenant_name: + if self.scope_to == 'project': + # Scope token to project (tenant) data['auth']['scope'] = { 'project': { 'domain': { - 'name': domain + 'name': self.domain_name }, 'name': self.tenant_name } } + elif self.domain_name: + # Scope token to domain + data['auth']['scope'] = { + 'domain': { + 'name': self.domain_name + } + } + else: + raise ValueError('Token needs to be scoped either to project or ' + 'a domain') data = json.dumps(data) response = self.request('/v3/auth/tokens', data=data, diff --git a/libcloud/test/common/test_openstack_identity.py b/libcloud/test/common/test_openstack_identity.py index e84222b2d3..6057401693 100644 --- a/libcloud/test/common/test_openstack_identity.py +++ b/libcloud/test/common/test_openstack_identity.py @@ -14,7 +14,6 @@ # limitations under the License. import sys -import unittest import datetime try: @@ -33,6 +32,7 @@ from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver +from libcloud.test import unittest from libcloud.test import MockHttp from libcloud.test.secrets import OPENSTACK_PARAMS from libcloud.test.file_fixtures import ComputeFileFixtures @@ -229,9 +229,54 @@ def setUp(self): self.auth_instance = OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test', - key='test') + key='test', + tenant_name='test') self.auth_instance.auth_token = 'mock' + def test_scope_to_argument(self): + # Invalid scope_to value + expected_msg = 'Invalid value for "scope_to" argument: foo' + self.assertRaisesRegexp(ValueError, expected_msg, + OpenStackIdentity_3_0_Connection, + auth_url='http://none', + user_id='test', + key='test', + scope_to='foo') + + # Missing tenant_name + expected_msg = 'Must provide tenant_name and domain_name argument' + self.assertRaisesRegexp(ValueError, expected_msg, + OpenStackIdentity_3_0_Connection, + auth_url='http://none', + user_id='test', + key='test', + scope_to='project') + + # Missing domain_name + expected_msg = 'Must provide domain_name argument' + self.assertRaisesRegexp(ValueError, expected_msg, + OpenStackIdentity_3_0_Connection, + auth_url='http://none', + user_id='test', + key='test', + scope_to='domain', + domain_name=None) + + # Scope to project all ok + OpenStackIdentity_3_0_Connection(auth_url='http://none', + user_id='test', + key='test', + scope_to='project', + tenant_name='test', + domain_name='Default') + # Scope to domain + OpenStackIdentity_3_0_Connection(auth_url='http://none', + user_id='test', + key='test', + scope_to='domain', + tenant_name=None, + domain_name='Default') + def test_list_supported_versions(self): OpenStackIdentity_3_0_MockHttp.type = 'v3' From 591ffb06c66459e99dd7d1b51806105a7946f98e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 13:58:09 +0200 Subject: [PATCH 158/315] Update __all__ remove variables which have been moved to a different module. --- libcloud/common/openstack.py | 11 ++--------- libcloud/test/common/test_openstack_identity.py | 2 +- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/libcloud/common/openstack.py b/libcloud/common/openstack.py index 0a11d4400b..e44d144789 100644 --- a/libcloud/common/openstack.py +++ b/libcloud/common/openstack.py @@ -47,22 +47,15 @@ '2.0', '2.0_apikey', '2.0_password', + '3.x', '3.x_password' ] -# How many seconds to substract from the auth token expiration time before -# testing if the token is still valid. -# The time is subtracted to account for the HTTP request latency and prevent -# user from getting "InvalidCredsError" if token is about to expire. -AUTH_TOKEN_EXPIRES_GRACE_SECONDS = 5 - __all__ = [ 'OpenStackBaseConnection', 'OpenStackResponse', 'OpenStackException', - 'OpenStackDriverMixin', - - 'AUTH_TOKEN_EXPIRES_GRACE_SECONDS' + 'OpenStackDriverMixin' ] diff --git a/libcloud/test/common/test_openstack_identity.py b/libcloud/test/common/test_openstack_identity.py index 6057401693..20ec116b26 100644 --- a/libcloud/test/common/test_openstack_identity.py +++ b/libcloud/test/common/test_openstack_identity.py @@ -25,7 +25,7 @@ from libcloud.utils.py3 import httplib from libcloud.common.openstack import OpenStackBaseConnection -from libcloud.common.openstack import AUTH_TOKEN_EXPIRES_GRACE_SECONDS +from libcloud.common.openstack_identity import AUTH_TOKEN_EXPIRES_GRACE_SECONDS from libcloud.common.openstack_identity import get_class_for_auth_version from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection from libcloud.common.openstack_identity import OpenStackServiceCatalog From 266ade55730f6071b1853732a5d50db2220b71b2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 14:17:56 +0200 Subject: [PATCH 159/315] Add support for prettifying JSON response body which is printed to a file like object when using LIBCLOUD_DEBUG environment variable. This option can be enabled by setting LIBCLOUD_DEBUG_PRETTY_PRINT_JSON environment variable. --- CHANGES.rst | 6 ++++++ libcloud/common/base.py | 12 ++++++++++++ 2 files changed, 18 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 814672af9f..edc1711fbc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -11,6 +11,12 @@ General OpenStack Identity (Keystone) service API v3. [Tomaz Muraus] +- Add support for prettifying JSON response body which is printed to a file + like object when using ``LIBCLOUD_DEBUG`` environment variable. + This option can be enabled by setting ``LIBCLOUD_DEBUG_PRETTY_PRINT_JSON`` + environment variable. + [Tomaz Muraus] + Compute ~~~~~~~ diff --git a/libcloud/common/base.py b/libcloud/common/base.py index a42f228243..488f084202 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -300,17 +300,29 @@ def makefile(self, *args, **kwargs): headers = lowercase_keys(dict(r.getheaders())) encoding = headers.get('content-encoding', None) + content_type = headers.get('content-type', None) if encoding in ['zlib', 'deflate']: body = decompress_data('zlib', body) elif encoding in ['gzip', 'x-gzip']: body = decompress_data('gzip', body) + pretty_print_json = os.environ.get('LIBCLOUD_DEBUG_PRETTY_PRINT_JSON', + False) + if r.chunked: ht += "%x\r\n" % (len(body)) ht += u(body) ht += "\r\n0\r\n" else: + if pretty_print_json and content_type == 'application/json': + try: + body = json.loads(u(body)) + body = json.dumps(body, sort_keys=True, indent=4) + except: + # Invalid JSON or server is lying about content-type + pass + ht += u(body) if sys.version_info >= (2, 6) and sys.version_info < (2, 7): From b7a6c783d2bec762006fa475838d14507b61e61a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 14:55:42 +0200 Subject: [PATCH 160/315] Use a better variable name ("scope_to" -> "toke_scope"). --- libcloud/common/openstack_identity.py | 26 +++++++++---------- .../test/common/test_openstack_identity.py | 16 ++++++------ 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index f2082581f3..d468e73f3c 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -880,22 +880,22 @@ class OpenStackIdentity_3_0_Connection(OpenStackIdentityConnection): auth_version = '3.0' def __init__(self, auth_url, user_id, key, tenant_name=None, - domain_name='Default', scope_to='project', + domain_name='Default', token_scope='project', timeout=None, parent_conn=None): """ :param tenant_name: Name of the project this user belongs to. Note: - When scope_to is set to project, this argument + When token_scope is set to project, this argument control to which project to scope the token to. :type tenant_name: ``str`` - :param domain_name: Domain the user belongs to. Note: Then scope_to + :param domain_name: Domain the user belongs to. Note: Then token_scope is set to token, this argument controls to which domain to scope the token to. :type domain_name: ``str`` - :param scope_to: Whether to scope a token to a "project" or a + :param token_scope: Whether to scope a token to a "project" or a "domain" - :type scope_to: ``str`` + :type token_scope: ``str`` """ super(OpenStackIdentity_3_0_Connection, self).__init__(auth_url=auth_url, @@ -904,19 +904,19 @@ def __init__(self, auth_url, user_id, key, tenant_name=None, tenant_name=tenant_name, timeout=timeout, parent_conn=parent_conn) - if scope_to not in ['project', 'domain']: - raise ValueError('Invalid value for "scope_to" argument: %s' % - (scope_to)) + if token_scope not in ['project', 'domain']: + raise ValueError('Invalid value for "token_scope" argument: %s' % + (token_scope)) - if scope_to == 'project' and (not tenant_name or not domain_name): + if token_scope == 'project' and (not tenant_name or not domain_name): raise ValueError('Must provide tenant_name and domain_name ' 'argument') - elif scope_to == 'domain' and not domain_name: + elif token_scope == 'domain' and not domain_name: raise ValueError('Must provide domain_name argument') self.tenant_name = tenant_name self.domain_name = domain_name - self.scope_to = scope_to + self.token_scope = token_scope self.auth_user_roles = None def authenticate(self, force=False): @@ -943,7 +943,7 @@ def authenticate(self, force=False): } } - if self.scope_to == 'project': + if self.token_scope == 'project': # Scope token to project (tenant) data['auth']['scope'] = { 'project': { @@ -953,7 +953,7 @@ def authenticate(self, force=False): 'name': self.tenant_name } } - elif self.domain_name: + elif self.token_scope == 'domain': # Scope token to domain data['auth']['scope'] = { 'domain': { diff --git a/libcloud/test/common/test_openstack_identity.py b/libcloud/test/common/test_openstack_identity.py index 20ec116b26..3cbab1520d 100644 --- a/libcloud/test/common/test_openstack_identity.py +++ b/libcloud/test/common/test_openstack_identity.py @@ -233,15 +233,15 @@ def setUp(self): tenant_name='test') self.auth_instance.auth_token = 'mock' - def test_scope_to_argument(self): - # Invalid scope_to value - expected_msg = 'Invalid value for "scope_to" argument: foo' + def test_token_scope_argument(self): + # Invalid token_scope value + expected_msg = 'Invalid value for "token_scope" argument: foo' self.assertRaisesRegexp(ValueError, expected_msg, OpenStackIdentity_3_0_Connection, auth_url='http://none', user_id='test', key='test', - scope_to='foo') + token_scope='foo') # Missing tenant_name expected_msg = 'Must provide tenant_name and domain_name argument' @@ -250,7 +250,7 @@ def test_scope_to_argument(self): auth_url='http://none', user_id='test', key='test', - scope_to='project') + token_scope='project') # Missing domain_name expected_msg = 'Must provide domain_name argument' @@ -259,21 +259,21 @@ def test_scope_to_argument(self): auth_url='http://none', user_id='test', key='test', - scope_to='domain', + token_scope='domain', domain_name=None) # Scope to project all ok OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test', key='test', - scope_to='project', + token_scope='project', tenant_name='test', domain_name='Default') # Scope to domain OpenStackIdentity_3_0_Connection(auth_url='http://none', user_id='test', key='test', - scope_to='domain', + token_scope='domain', tenant_name=None, domain_name='Default') From 988b1392a7aa061b220318f2fd85f727db2e9f61 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 19:16:35 +0200 Subject: [PATCH 161/315] Rename grant_role_to_user to grant_domain_role_to_user and revoke_role_from user to revoke_domain_role_from_user and also add methods for granting and removing project roles. --- libcloud/common/openstack_identity.py | 69 +++++++++++++++++-- .../test/common/test_openstack_identity.py | 53 +++++++++++--- 2 files changed, 107 insertions(+), 15 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index d468e73f3c..c0a3697f03 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -1087,11 +1087,14 @@ def list_user_domain_roles(self, domain, user): result = self._to_roles(data=response.object['roles']) return result - def grant_role_to_user(self, domain, role, user): + def grant_domain_role_to_user(self, domain, role, user): """ - Grant role to the domain user. + Grant domain role to a user. - Note: This function appeats to be idempodent. + Note: This function appears to be idempodent. + + :param domain: Domain to grant the role to. + :type domain: :class:`.OpenStackIdentityDomain` :param role: Role to grant. :type role: :class:`.OpenStackIdentityRole` @@ -1107,15 +1110,71 @@ def grant_role_to_user(self, domain, role, user): response = self.authenticated_request(path, method='PUT') return response.status == httplib.NO_CONTENT - def revoke_role_from_user(self, domain, user, role): + def revoke_domain_role_from_user(self, domain, user, role): """ - Revoke role from a domain user. + Revoke domain role from a user. + + :param domain: Domain to revoke the role from. + :type domain: :class:`.OpenStackIdentityDomain` + + :param role: Role to revoke. + :type role: :class:`.OpenStackIdentityRole` + + :param user: User to revoke the role from. + :type user: :class:`.OpenStackIdentityUser` + + :return: ``True`` on success. + :rtype: ``bool`` """ path = ('/v3/domains/%s/users/%s/roles/%s' % (domain.id, user.id, role.id)) response = self.authenticated_request(path, method='DELETE') return response.status == httplib.NO_CONTENT + def grant_project_role_to_user(self, project, role, user): + """ + Grant project role to a user. + + Note: This function appeats to be idempodent. + + :param project: Project to grant the role to. + :type project: :class:`.OpenStackIdentityDomain` + + :param role: Role to grant. + :type role: :class:`.OpenStackIdentityRole` + + :param user: User to grant the role to. + :type user: :class:`.OpenStackIdentityUser` + + :return: ``True`` on success. + :rtype: ``bool`` + """ + path = ('/v3/projects/%s/users/%s/roles/%s' % + (project.id, user.id, role.id)) + response = self.authenticated_request(path, method='PUT') + return response.status == httplib.NO_CONTENT + + def revoke_project_role_from_user(self, project, role, user): + """ + Revoke project role from a user. + + :param project: Project to revoke the role from. + :type project: :class:`.OpenStackIdentityDomain` + + :param role: Role to revoke. + :type role: :class:`.OpenStackIdentityRole` + + :param user: User to revoke the role from. + :type user: :class:`.OpenStackIdentityUser` + + :return: ``True`` on success. + :rtype: ``bool`` + """ + path = ('/v3/projects/%s/users/%s/roles/%s' % + (project.id, user.id, role.id)) + response = self.authenticated_request(path, method='DELETE') + return response.status == httplib.NO_CONTENT + def create_user(self, email, password, name, description=None, domain_id=None, default_project_id=None, enabled=True): """ diff --git a/libcloud/test/common/test_openstack_identity.py b/libcloud/test/common/test_openstack_identity.py index 3cbab1520d..1c1320ee5c 100644 --- a/libcloud/test/common/test_openstack_identity.py +++ b/libcloud/test/common/test_openstack_identity.py @@ -342,24 +342,44 @@ def test_create_user(self): self.assertEqual(user.id, 'c') self.assertEqual(user.name, 'test2') - def test_grant_role_to_user(self): + def test_grant_domain_role_to_user(self): domain = self.auth_instance.list_domains()[0] role = self.auth_instance.list_roles()[0] user = self.auth_instance.list_users()[0] - result = self.auth_instance.grant_role_to_user(domain=domain, - role=role, - user=user) + result = self.auth_instance.grant_domain_role_to_user(domain=domain, + role=role, + user=user) self.assertTrue(result) - def test_revoke_role_from_user(self): + def test_revoke_domain_role_from_user(self): domain = self.auth_instance.list_domains()[0] role = self.auth_instance.list_roles()[0] user = self.auth_instance.list_users()[0] - result = self.auth_instance.revoke_role_from_user(domain=domain, - role=role, - user=user) + result = self.auth_instance.revoke_domain_role_from_user(domain=domain, + role=role, + user=user) + self.assertTrue(result) + + def test_grant_project_role_to_user(self): + project = self.auth_instance.list_projects()[0] + role = self.auth_instance.list_roles()[0] + user = self.auth_instance.list_users()[0] + + result = self.auth_instance.grant_project_role_to_user(project=project, + role=role, + user=user) + self.assertTrue(result) + + def test_revoke_project_role_from_user(self): + project = self.auth_instance.list_projects()[0] + role = self.auth_instance.list_roles()[0] + user = self.auth_instance.list_users()[0] + + result = self.auth_instance.revoke_project_role_from_user(project=project, + role=role, + user=user) self.assertTrue(result) @@ -531,12 +551,25 @@ def _v3_roles(self, method, url, body, headers): def _v3_domains_default_users_a_roles_a(self, method, url, body, headers): if method == 'PUT': - # grant role + # grant domain role + body = '' + return (httplib.NO_CONTENT, body, self.json_content_headers, + httplib.responses[httplib.NO_CONTENT]) + elif method == 'DELETE': + # revoke domain role + body = '' + return (httplib.NO_CONTENT, body, self.json_content_headers, + httplib.responses[httplib.NO_CONTENT]) + raise NotImplementedError() + + def _v3_projects_a_users_a_roles_a(self, method, url, body, headers): + if method == 'PUT': + # grant project role body = '' return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.NO_CONTENT]) elif method == 'DELETE': - # revoke role + # revoke project role body = '' return (httplib.NO_CONTENT, body, self.json_content_headers, httplib.responses[httplib.NO_CONTENT]) From 8a3e80809a1fdd9df1553b37e5391aeb4c145278 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 19:23:10 +0200 Subject: [PATCH 162/315] Add and use OpenStackIdentityTokenScope enum class. --- libcloud/common/openstack_identity.py | 28 +++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index c0a3697f03..d30716fd5d 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -83,6 +83,14 @@ class OpenStackIdentityEndpointType(object): ADMIN = 'admin' +class OpenStackIdentityTokenScope(object): + """ + Enum class for openstack identity token scope. + """ + PROJECT = 'project' + DOMAIN = 'domain' + + class OpenStackIdentityVersion(object): def __init__(self, version, status, updated, url): self.version = version @@ -879,8 +887,14 @@ class OpenStackIdentity_3_0_Connection(OpenStackIdentityConnection): name = 'OpenStack Identity API v3.x' auth_version = '3.0' + VALID_TOKEN_SCOPES = [ + OpenStackIdentityTokenScope.PROJECT, + OpenStackIdentityTokenScope.DOMAIN + ] + def __init__(self, auth_url, user_id, key, tenant_name=None, - domain_name='Default', token_scope='project', + domain_name='Default', + token_scope=OpenStackIdentityTokenScope.PROJECT, timeout=None, parent_conn=None): """ :param tenant_name: Name of the project this user belongs to. Note: @@ -904,14 +918,16 @@ def __init__(self, auth_url, user_id, key, tenant_name=None, tenant_name=tenant_name, timeout=timeout, parent_conn=parent_conn) - if token_scope not in ['project', 'domain']: + if token_scope not in self.VALID_TOKEN_SCOPES: raise ValueError('Invalid value for "token_scope" argument: %s' % (token_scope)) - if token_scope == 'project' and (not tenant_name or not domain_name): + if (token_scope == OpenStackIdentityTokenScope.PROJECT and + (not tenant_name or not domain_name)): raise ValueError('Must provide tenant_name and domain_name ' 'argument') - elif token_scope == 'domain' and not domain_name: + elif (token_scope == OpenStackIdentityTokenScope.DOMAIN and + not domain_name): raise ValueError('Must provide domain_name argument') self.tenant_name = tenant_name @@ -943,7 +959,7 @@ def authenticate(self, force=False): } } - if self.token_scope == 'project': + if self.token_scope == OpenStackIdentityTokenScope.PROJECT: # Scope token to project (tenant) data['auth']['scope'] = { 'project': { @@ -953,7 +969,7 @@ def authenticate(self, force=False): 'name': self.tenant_name } } - elif self.token_scope == 'domain': + elif self.token_scope == OpenStackIdentityTokenScope.DOMAIN: # Scope token to domain data['auth']['scope'] = { 'domain': { From 9b6110e3519272b91f9d51ca58a09ba25fd4aa3a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 14 Aug 2014 19:51:23 +0200 Subject: [PATCH 163/315] Fix typos in __repr__. --- libcloud/common/openstack_identity.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index d30716fd5d..d13975e0ed 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -100,7 +100,7 @@ def __init__(self, version, status, updated, url): def __repr__(self): return (('' % (self.version, self.status, self.updated, self.url))) @@ -153,8 +153,8 @@ def __init__(self, id, domain_id, name, email, description, enabled): def __repr__(self): return (('' % (self.id, self.domain_id, self.name, + self.email, self.enabled))) class OpenStackServiceCatalog(object): From 3f8fc51e99a88e9d96895cfb68d92e2a97c152e3 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 15 Aug 2014 15:05:47 +0200 Subject: [PATCH 164/315] Allow user to use perform authentication against Keystone API v3 without specifying a scope. Note: Depending on the OpenStack policies configuratiion, unscoped tokens are usually of a limited use. --- libcloud/common/openstack_identity.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index d13975e0ed..a3492f15cc 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -89,6 +89,7 @@ class OpenStackIdentityTokenScope(object): """ PROJECT = 'project' DOMAIN = 'domain' + UNSCOPED = 'unscoped' class OpenStackIdentityVersion(object): @@ -889,7 +890,8 @@ class OpenStackIdentity_3_0_Connection(OpenStackIdentityConnection): VALID_TOKEN_SCOPES = [ OpenStackIdentityTokenScope.PROJECT, - OpenStackIdentityTokenScope.DOMAIN + OpenStackIdentityTokenScope.DOMAIN, + OpenStackIdentityTokenScope.UNSCOPED ] def __init__(self, auth_url, user_id, key, tenant_name=None, @@ -976,6 +978,8 @@ def authenticate(self, force=False): 'name': self.domain_name } } + elif self.token_scope == OpenStackIdentityTokenScope.UNSCOPED: + pass else: raise ValueError('Token needs to be scoped either to project or ' 'a domain') @@ -1008,7 +1012,8 @@ def authenticate(self, force=False): self.auth_token = headers['x-subject-token'] self.auth_token_expires = parse_date(expires) - self.urls = body['token']['catalog'] + # Note: catalog is not returned for unscoped tokens + self.urls = body['token'].get('catalog', None) self.auth_user_info = None self.auth_user_roles = roles except KeyError: From 7557d7b332dbef6688ea167fde6cd2997f0e2ffa Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 15 Aug 2014 15:52:03 +0200 Subject: [PATCH 165/315] Add enable_user and disable_user method to Identity API v3 class. --- libcloud/common/openstack_identity.py | 46 +++++++++++++++++++ .../test/common/test_openstack_identity.py | 18 ++++++++ 2 files changed, 64 insertions(+) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index a3492f15cc..3363aac7fc 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -1248,6 +1248,52 @@ def create_user(self, email, password, name, description=None, user = self._to_user(data=response.object['user']) return user + def enable_user(self, user): + """ + Enable user account. + + Note: This operation appears to be idempotent. + + :param user: User to enable. + :type user: :class:`.OpenStackIdentityUser` + + :return: User account which has been enabled. + :rtype: :class:`.OpenStackIdentityUser` + """ + data = { + 'enabled': True + } + data = json.dumps({'user': data}) + response = self.authenticated_request('/v3/users/%s' % (user.id), + data=data, + method='PATCH') + + user = self._to_user(data=response.object['user']) + return user + + def disable_user(self, user): + """ + Disable user account. + + Note: This operation appears to be idempotent. + + :param user: User to disable. + :type user: :class:`.OpenStackIdentityUser` + + :return: User account which has been disabled. + :rtype: :class:`.OpenStackIdentityUser` + """ + data = { + 'enabled': False + } + data = json.dumps({'user': data}) + response = self.authenticated_request('/v3/users/%s' % (user.id), + data=data, + method='PATCH') + + user = self._to_user(data=response.object['user']) + return user + def _to_domains(self, data): result = [] for item in data: diff --git a/libcloud/test/common/test_openstack_identity.py b/libcloud/test/common/test_openstack_identity.py index 1c1320ee5c..9ebd5990fd 100644 --- a/libcloud/test/common/test_openstack_identity.py +++ b/libcloud/test/common/test_openstack_identity.py @@ -30,6 +30,7 @@ from libcloud.common.openstack_identity import OpenStackIdentity_2_0_Connection from libcloud.common.openstack_identity import OpenStackServiceCatalog from libcloud.common.openstack_identity import OpenStackIdentity_3_0_Connection +from libcloud.common.openstack_identity import OpenStackIdentityUser from libcloud.compute.drivers.openstack import OpenStack_1_0_NodeDriver from libcloud.test import unittest @@ -342,6 +343,16 @@ def test_create_user(self): self.assertEqual(user.id, 'c') self.assertEqual(user.name, 'test2') + def test_enable_user(self): + user = self.auth_instance.list_users()[0] + result = self.auth_instance.enable_user(user=user) + self.assertTrue(isinstance(result, OpenStackIdentityUser)) + + def test_disable_user(self): + user = self.auth_instance.list_users()[0] + result = self.auth_instance.disable_user(user=user) + self.assertTrue(isinstance(result, OpenStackIdentityUser)) + def test_grant_domain_role_to_user(self): domain = self.auth_instance.list_domains()[0] role = self.auth_instance.list_roles()[0] @@ -543,6 +554,13 @@ def _v3_users(self, method, url, body, headers): httplib.responses[httplib.CREATED]) raise NotImplementedError() + def _v3_users_a(self, method, url, body, headers): + if method == 'PATCH': + # enable / disable user + body = self.fixtures.load('v3_users_a.json') + return (httplib.OK, body, self.json_content_headers, httplib.responses[httplib.OK]) + raise NotImplementedError() + def _v3_roles(self, method, url, body, headers): if method == 'GET': body = self.fixtures.load('v3_roles.json') From cbd2ae659e5a5bb675355c039880b19fd641dbd0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 15 Aug 2014 15:57:05 +0200 Subject: [PATCH 166/315] Update doap file. --- doap_libcloud.rdf | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/doap_libcloud.rdf b/doap_libcloud.rdf index 2bfca577cd..c6c52501de 100644 --- a/doap_libcloud.rdf +++ b/doap_libcloud.rdf @@ -195,6 +195,48 @@ 2013-09-18 0.13.2 + + + + 0.13.3 + 2013-12-31 + 0.13.3 + + + + + 0.14.0-beta3 + 2013-11-21 + 0.14.0-beta3 + + + + + 0.14.0 + 2014-01-22 + 0.14.0 + + + + + 0.15.0 + 2014-06-26 + 0.15.0 + + + + + 0.15.1 + 2014-07-10 + 0.15.1 + + + + + 0.14.1 + 2014-02-08 + 0.14.1 + From 8b8b67cf78b42a861b67b289d9ca9d0f812e7f93 Mon Sep 17 00:00:00 2001 From: Eddy Reyes Date: Thu, 14 Aug 2014 13:11:28 -0500 Subject: [PATCH 167/315] Fix ParamikoSSHClient.run() so that it works in Python 3.x by decoding incoming bytes into strings using the bytes decode method. Closes #347 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/ssh.py | 5 +++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index edc1711fbc..a7943bbc5f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -61,6 +61,11 @@ Compute (GITHUB-346) [Roeland Kuipers] +- Fix ``ParamikoSSHClient.run`` and ``deploy_node`` method to work correctly + under Python 3. + (GITHUB-347) + [Eddy Reyes] + Storage ~~~~~~~ diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index af9c43a529..1a88e9f38f 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -40,6 +40,7 @@ from libcloud.utils.logging import ExtraLogFormatter from libcloud.utils.py3 import StringIO +from libcloud.utils.py3 import b __all__ = [ 'BaseSSHClient', @@ -364,7 +365,7 @@ def run(self, cmd, timeout=None): data = chan.recv(CHUNK_SIZE) while data: - stdout.write(data) + stdout.write(b(data).decode('utf-8')) ready = chan.recv_ready() if not ready: @@ -376,7 +377,7 @@ def run(self, cmd, timeout=None): data = chan.recv_stderr(CHUNK_SIZE) while data: - stderr.write(data) + stderr.write(b(data).decode('utf-8')) ready = chan.recv_stderr_ready() if not ready: From 950f972c3bcd05ef29afdf2eaba8fafadfa02d03 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 15 Aug 2014 16:17:53 +0200 Subject: [PATCH 168/315] Add missing fixture. --- .../openstack_identity/v3_users_a.json | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v3_users_a.json diff --git a/libcloud/test/compute/fixtures/openstack_identity/v3_users_a.json b/libcloud/test/compute/fixtures/openstack_identity/v3_users_a.json new file mode 100644 index 0000000000..4d0a359151 --- /dev/null +++ b/libcloud/test/compute/fixtures/openstack_identity/v3_users_a.json @@ -0,0 +1,18 @@ +{ + "user": + { + "name": "cloud", + "links": { + "self": "http://192.168.18.100:5000/v3/users/a" + }, + "domain_id": "default", + "enabled": true, + "email": "openstack-test@localhost", + "id": "a" + }, + "links": { + "self": "http://192.168.18.100:5000/v3/users", + "previous": null, + "next": null + } +} From c849b95af822ca32c2cd75fc280682b81426116a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 18 Aug 2014 12:25:21 +0200 Subject: [PATCH 169/315] Add list_projects / list_tenants method to OpenStackIdentity_2_0 class. Also re-organize fixtures in directories based on the API version. --- libcloud/common/openstack_identity.py | 45 +++++++++++-------- .../test/common/test_openstack_identity.py | 36 ++++++++++++++- .../openstack_identity/v2/v2_0_tenants.json | 17 +++++++ .../{ => v3}/v3_create_user.json | 0 .../{ => v3}/v3_domains.json | 0 .../{ => v3}/v3_domains_default.json | 0 .../v3_domains_default_users_a_roles.json | 0 .../{ => v3}/v3_projects.json | 0 .../openstack_identity/{ => v3}/v3_roles.json | 0 .../openstack_identity/{ => v3}/v3_users.json | 0 .../{ => v3}/v3_users_a.json | 0 .../{ => v3}/v3_users_a_projects.json | 0 .../{ => v3}/v3_versions.json | 0 13 files changed, 78 insertions(+), 20 deletions(-) create mode 100644 libcloud/test/compute/fixtures/openstack_identity/v2/v2_0_tenants.json rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_create_user.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_domains.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_domains_default.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_domains_default_users_a_roles.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_projects.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_roles.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_users.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_users_a.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_users_a_projects.json (100%) rename libcloud/test/compute/fixtures/openstack_identity/{ => v3}/v3_versions.json (100%) diff --git a/libcloud/common/openstack_identity.py b/libcloud/common/openstack_identity.py index 3363aac7fc..3eb0d14c53 100644 --- a/libcloud/common/openstack_identity.py +++ b/libcloud/common/openstack_identity.py @@ -117,12 +117,12 @@ def __repr__(self): class OpenStackIdentityProject(object): - def __init__(self, id, domain_id, name, description, enabled): + def __init__(self, id, name, description, enabled, domain_id=None): self.id = id - self.domain_id = domain_id self.name = name self.description = description self.enabled = enabled + self.domain_id = domain_id def __repr__(self): return ((' Date: Tue, 19 Aug 2014 13:43:47 +0200 Subject: [PATCH 170/315] Update OpenStack driver to map more node states to states recognized by Libcloud. Patch by Chris DeRamus. --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/openstack.py | 4 +++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index a7943bbc5f..4a547aa402 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -66,6 +66,10 @@ Compute (GITHUB-347) [Eddy Reyes] +- Update OpenStack driver to map more node states to states recognized by + Libcloud. + [Chris DeRamus] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 93834dc2b0..af306e0c1d 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -86,7 +86,8 @@ class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin): 'BUILD': NodeState.PENDING, 'REBUILD': NodeState.PENDING, 'ACTIVE': NodeState.RUNNING, - 'SUSPENDED': NodeState.TERMINATED, + 'SUSPENDED': NodeState.STOPPED, + 'SHUTOFF': NodeState.STOPPED, 'DELETED': NodeState.TERMINATED, 'QUEUE_RESIZE': NodeState.PENDING, 'PREP_RESIZE': NodeState.PENDING, @@ -98,6 +99,7 @@ class OpenStackNodeDriver(NodeDriver, OpenStackDriverMixin): 'SHARE_IP': NodeState.PENDING, 'SHARE_IP_NO_CONFIG': NodeState.PENDING, 'DELETE_IP': NodeState.PENDING, + 'ERROR': NodeState.ERROR, 'UNKNOWN': NodeState.UNKNOWN } From f3f600028384bc19c0f7ff051306a401d57afd35 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 19 Aug 2014 16:29:54 +0200 Subject: [PATCH 171/315] Add support for HTTP proxy to LibcloudHTTPConnection and LibcloudHTTPSConnection class. User can specify which HTTP proxy to use using one of the following approaches: * by setting "proxy_url" environment variable (global / process wide) * by passing "proxy_url" argument to the Connection class constructor (per connection instance) * by calling "set_http_proxy" method on the Connection class (per connection instance) --- .../http_proxy/constructor_argument.py | 6 + .../http_proxy/set_http_proxy_method.py | 12 ++ docs/other/using-http-proxy.rst | 52 +++++++ libcloud/common/base.py | 15 +- libcloud/httplib_ssl.py | 129 +++++++++++++++++- libcloud/test/test_connection.py | 53 +++++++ 6 files changed, 262 insertions(+), 5 deletions(-) create mode 100644 docs/examples/http_proxy/constructor_argument.py create mode 100644 docs/examples/http_proxy/set_http_proxy_method.py create mode 100644 docs/other/using-http-proxy.rst diff --git a/docs/examples/http_proxy/constructor_argument.py b/docs/examples/http_proxy/constructor_argument.py new file mode 100644 index 0000000000..ae5494562e --- /dev/null +++ b/docs/examples/http_proxy/constructor_argument.py @@ -0,0 +1,6 @@ +from libcloud.compute.drivers.dreamhost import DreamhostConnection + +PROXY_URL = 'http://:' + +conn = DreamhostConnection(host='dreamhost.com', port=443, + timeout=None, proxy_url=PROXY_URL) diff --git a/docs/examples/http_proxy/set_http_proxy_method.py b/docs/examples/http_proxy/set_http_proxy_method.py new file mode 100644 index 0000000000..d8c5e83f7a --- /dev/null +++ b/docs/examples/http_proxy/set_http_proxy_method.py @@ -0,0 +1,12 @@ +from pprint import pprint + +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +PROXY_URL = 'http://:' + +cls = get_driver(Provider.RACKSPACE) +driver = cls('username', 'api key', region='ord') +driver.set_http_proxy(proxy_url=PROXY_URL) + +pprint(driver.list_nodes()) diff --git a/docs/other/using-http-proxy.rst b/docs/other/using-http-proxy.rst new file mode 100644 index 0000000000..8c12dc5adc --- /dev/null +++ b/docs/other/using-http-proxy.rst @@ -0,0 +1,52 @@ +Using an HTTP proxy +=================== + +.. note:: + + Support for HTTP proxies is only available in Libcloud trunk and higher. + +Libcloud supports using an HTTP proxy for outgoing HTTP and HTTPS requests. At +the moment, using a proxy is only supported if you are using Python 2.7 or +above (it has been tested with 2.7, PyPy, 3.1, 3.3, 3.3, 3.4). + +You can specify which HTTP proxy to use using one of the approaches described +bellow: + +* By setting ``http_proxy`` environment variable (this setting is system / + process wide) +* By passing ``http_proxy`` argument to the + :class:`libcloud.common.base.LibcloudHTTPConnection` class constructor (this + setting is local to the connection instance) +* By calling :meth:`libcloud.common.base.LibcloudHTTPConnection.set_http_proxy` + method (this setting is local to the connection instance) + +Known limitations +----------------- + +* HTTP proxies which require authentication are not supported +* Python 2.6 is not supported + +Examples +-------- + +This section includes some code examples which show how to use an HTTP proxy +with Libcloud. + +1. Using http_proxy environment variable +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. sourcecode:: python + + http_proxy=http://: python my_script.py + +2. Passing http_proxy argument to the connection class +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/http_proxy/constructor_argument.py + :language: python + +3. Calling set_http_proxy method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/http_proxy/set_http_proxy_method.py + :language: python diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 488f084202..daf04d2d06 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -46,10 +46,9 @@ from libcloud.utils.compression import decompress_data from libcloud.common.types import LibcloudError, MalformedResponseError +from libcloud.httplib_ssl import LibcloudHTTPConnection from libcloud.httplib_ssl import LibcloudHTTPSConnection -LibcloudHTTPConnection = httplib.HTTPConnection - class HTTPResponse(httplib.HTTPResponse): # On python 2.6 some calls can hang because HEAD isn't quite properly @@ -267,7 +266,9 @@ class LoggingConnection(): :cvar log: file-like object that logs entries are written to. """ + log = None + http_proxy_used = False def _log_response(self, r): rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r)) @@ -341,7 +342,15 @@ def makefile(self, *args, **kwargs): return (rr, rv) def _log_curl(self, method, url, body, headers): - cmd = ["curl", "-i"] + cmd = ["curl"] + + if self.http_proxy_used: + proxy_url = 'http://%s:%s' % (self.proxy_host, + self.proxy_port) + proxy_url = pquote(proxy_url) + cmd.extend(['--proxy', proxy_url]) + + cmd.extend(['-i']) if method.lower() == 'head': # HEAD method need special handling diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 29136ef201..2024433c3b 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -19,14 +19,126 @@ import os import re import socket +import sys import ssl import warnings import libcloud.security from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlparse +__all__ = [ + 'LibcloudBaseConnection', + 'LibcloudHTTPConnection', + 'LibcloudHTTPSConnection' +] -class LibcloudHTTPSConnection(httplib.HTTPSConnection): +HTTP_PROXY_ENV_VARIABLE_NAME = 'http_proxy' + + +class LibcloudBaseConnection(object): + """ + Base connection class to inherit from. + + Note: This class should not be instantiated directly. + """ + + proxy_scheme = None + proxy_host = None + proxy_port = None + http_proxy_used = False + + def set_http_proxy(self, proxy_url): + """ + Set a HTTP proxy which will be used with this connection. + + :param proxy_url: Proxy URL (e.g. http://hostname:3128) + :type proxy_url: ``str`` + """ + if sys.version_info[:2] == (2, 6): + raise Exception('HTTP proxy support requires Python 2.7 or higher') + + scheme, host, port = self._parse_proxy_url(proxy_url=proxy_url) + + self.proxy_scheme = scheme + self.proxy_host = host + self.proxy_port = port + + self._setup_http_proxy() + + def _parse_proxy_url(self, proxy_url): + """ + Parse and validate a proxy URL. + + :param proxy_url: Proxy URL (e.g. http://hostname:3128) + :type proxy_url: ``str`` + + :rtype: ``tuple`` (``scheme``, ``hostname``, ``port``) + """ + parsed = urlparse.urlparse(proxy_url) + + if parsed.scheme != 'http': + raise ValueError('Only http proxies are supported') + + if not parsed.hostname or not parsed.port: + raise ValueError('proxy_url must be in the following format: ' + 'http://:') + + proxy_scheme = parsed.scheme + proxy_host, proxy_port = parsed.hostname, parsed.port + + return (proxy_scheme, proxy_host, proxy_port) + + def _setup_http_proxy(self): + """ + Set up HTTP proxy. + + :param proxy_url: Proxy URL (e.g. http://:3128) + :type proxy_url: ``str`` + """ + self.set_tunnel(host=self.host, port=self.port) + self._set_hostport(host=self.proxy_host, port=self.proxy_port) + + def _activate_http_proxy(self, sock): + self.sock = sock + self._tunnel() + + def _set_hostport(self, host, port): + """ + Backported from Python stdlib so Proxy support also works with + Python 3.4. + """ + if port is None: + i = host.rfind(':') + j = host.rfind(']') # ipv6 addresses have [...] + if i > j: + try: + port = int(host[i+1:]) + except ValueError: + msg = "nonnumeric port: '%s'" % host[i+1:] + raise httplib.InvalidURL(msg) + host = host[:i] + else: + port = self.default_port + if host and host[0] == '[' and host[-1] == ']': + host = host[1:-1] + self.host = host + self.port = port + + +class LibcloudHTTPConnection(httplib.HTTPConnection, LibcloudBaseConnection): + def __init__(self, *args, **kwargs): + # Support for HTTP proxy + proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None) + proxy_url = kwargs.pop('proxy_url', proxy_url_env) + + super(LibcloudHTTPConnection, self).__init__(*args, **kwargs) + + if proxy_url: + self.set_http_proxy(proxy_url=proxy_url) + + +class LibcloudHTTPSConnection(httplib.HTTPSConnection, LibcloudBaseConnection): """ LibcloudHTTPSConnection @@ -41,7 +153,15 @@ def __init__(self, *args, **kwargs): Constructor """ self._setup_verify() - httplib.HTTPSConnection.__init__(self, *args, **kwargs) + + # Support for HTTP proxy + proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None) + proxy_url = kwargs.pop('proxy_url', proxy_url_env) + + super(LibcloudHTTPSConnection, self).__init__(*args, **kwargs) + + if proxy_url: + self.set_http_proxy(proxy_url=proxy_url) def _setup_verify(self): """ @@ -97,6 +217,11 @@ def connect(self): else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((self.host, self.port)) + + # Activate the HTTP proxy + if self.http_proxy_used: + self._activate_http_proxy(sock=sock) + self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index 5df79191a0..b5c2abbd12 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -14,6 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import sys import ssl @@ -22,6 +23,58 @@ from libcloud.test import unittest from libcloud.common.base import Connection from libcloud.common.base import LoggingConnection +from libcloud.httplib_ssl import LibcloudBaseConnection +from libcloud.httplib_ssl import LibcloudHTTPConnection + + +class BaseConnectionClassTestCase(unittest.TestCase): + def test_parse_proxy_url(self): + conn = LibcloudBaseConnection() + + proxy_url = 'http://127.0.0.1:3128' + result = conn._parse_proxy_url(proxy_url=proxy_url) + self.assertEqual(result[0], 'http') + self.assertEqual(result[1], '127.0.0.1') + self.assertEqual(result[2], 3128) + + proxy_url = 'https://127.0.0.1:3128' + expected_msg = 'Only http proxies are supported' + self.assertRaisesRegexp(ValueError, expected_msg, + conn._parse_proxy_url, + proxy_url=proxy_url) + + proxy_url = 'http://127.0.0.1' + expected_msg = 'proxy_url must be in the following format' + self.assertRaisesRegexp(ValueError, expected_msg, + conn._parse_proxy_url, + proxy_url=proxy_url) + + def test_constructor(self): + conn = LibcloudHTTPConnection(host='localhost', port=80) + self.assertEqual(conn.proxy_scheme, None) + self.assertEqual(conn.proxy_host, None) + self.assertEqual(conn.proxy_port, None) + + proxy_url = 'http://127.0.0.3:3128' + conn.set_http_proxy(proxy_url=proxy_url) + self.assertEqual(conn.proxy_scheme, 'http') + self.assertEqual(conn.proxy_host, '127.0.0.3') + self.assertEqual(conn.proxy_port, 3128) + + proxy_url = 'http://127.0.0.4:3128' + conn = LibcloudHTTPConnection(host='localhost', port=80, + proxy_url=proxy_url) + self.assertEqual(conn.proxy_scheme, 'http') + self.assertEqual(conn.proxy_host, '127.0.0.4') + self.assertEqual(conn.proxy_port, 3128) + + os.environ['http_proxy'] = proxy_url + proxy_url = 'http://127.0.0.5:3128' + conn = LibcloudHTTPConnection(host='localhost', port=80, + proxy_url=proxy_url) + self.assertEqual(conn.proxy_scheme, 'http') + self.assertEqual(conn.proxy_host, '127.0.0.5') + self.assertEqual(conn.proxy_port, 3128) class ConnectionClassTestCase(unittest.TestCase): From 2bd3344b803f9e43a03ada009a55db972be44001 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 19 Aug 2014 19:07:44 +0200 Subject: [PATCH 172/315] Update CHANGES. --- CHANGES.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 4a547aa402..d52ab22e84 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -17,6 +17,9 @@ General environment variable. [Tomaz Muraus] +- Add support for using an HTTP proxy for outgoing HTTP and HTTPS requests. + [Tomaz Muraus, Philip Kershaw] + Compute ~~~~~~~ From 3eaa60bc232b05b01ce34c9a541374dbceb34942 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 19 Aug 2014 19:10:33 +0200 Subject: [PATCH 173/315] Fix broken test and invalid variable declarations. --- libcloud/test/test_connection.py | 7 +++++++ libcloud/utils/py3.py | 8 ++++++-- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index b5c2abbd12..75d63fdb5d 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -25,10 +25,14 @@ from libcloud.common.base import LoggingConnection from libcloud.httplib_ssl import LibcloudBaseConnection from libcloud.httplib_ssl import LibcloudHTTPConnection +from libcloud.utils.py3 import PY26 class BaseConnectionClassTestCase(unittest.TestCase): def test_parse_proxy_url(self): + if PY26: + return + conn = LibcloudBaseConnection() proxy_url = 'http://127.0.0.1:3128' @@ -50,6 +54,9 @@ def test_parse_proxy_url(self): proxy_url=proxy_url) def test_constructor(self): + if PY26: + return + conn = LibcloudHTTPConnection(host='localhost', port=80) self.assertEqual(conn.proxy_scheme, None) self.assertEqual(conn.proxy_host, None) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 1f7d229ec6..2e4fc381e2 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -30,6 +30,7 @@ PY2 = False PY25 = False +PY26 = False PY27 = False PY3 = False PY32 = False @@ -37,10 +38,13 @@ if sys.version_info >= (2, 0) and sys.version_info < (3, 0): PY2 = True -if sys.version_info >= (2, 5) and sys.version_info <= (2, 6): +if sys.version_info >= (2, 5) and sys.version_info < (2, 6): PY25 = True -if sys.version_info >= (2, 7) and sys.version_info <= (2, 8): +if sys.version_info >= (2, 6) and sys.version_info < (2, 7): + PY26 = True + +if sys.version_info >= (2, 7) and sys.version_info < (2, 8): PY27 = True if sys.version_info >= (3, 0): From b968247c6acbaeafaf0ffcde62c5baab00b0e0bd Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 19 Aug 2014 20:39:42 +0200 Subject: [PATCH 174/315] Allow user to pass proxy_url argument to Connection class. --- libcloud/common/base.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index daf04d2d06..ae0e699e52 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -444,7 +444,7 @@ class Connection(object): allow_insecure = True def __init__(self, secure=True, host=None, port=None, url=None, - timeout=None): + timeout=None, proxy_url=None): self.secure = secure and 1 or 0 self.ua = [] self.context = {} @@ -475,6 +475,8 @@ def __init__(self, secure=True, host=None, port=None, url=None, if timeout: self.timeout = timeout + self.proxy_url = proxy_url + def set_context(self, context): if not isinstance(context, dict): raise TypeError('context needs to be a dictionary') @@ -543,6 +545,9 @@ def connect(self, host=None, port=None, base_url=None): if self.timeout and not PY25: kwargs.update({'timeout': self.timeout}) + if self.proxy_url: + kwargs.update({'proxy_url': self.proxy_url}) + connection = self.conn_classes[secure](**kwargs) # You can uncoment this line, if you setup a reverse proxy server # which proxies to your endpoint, and lets you easily capture @@ -893,14 +898,15 @@ class ConnectionKey(Connection): Base connection class which accepts a single ``key`` argument. """ def __init__(self, key, secure=True, host=None, port=None, url=None, - timeout=None): + timeout=None, proxy_url=None): """ Initialize `user_id` and `key`; set `secure` to an ``int`` based on passed value. """ super(ConnectionKey, self).__init__(secure=secure, host=host, port=port, url=url, - timeout=timeout) + timeout=timeout, + proxy_url=proxy_url) self.key = key @@ -911,11 +917,12 @@ class ConnectionUserAndKey(ConnectionKey): user_id = None - def __init__(self, user_id, key, secure=True, - host=None, port=None, url=None, timeout=None): + def __init__(self, user_id, key, secure=True, host=None, port=None, + url=None, timeout=None, proxy_url=None): super(ConnectionUserAndKey, self).__init__(key, secure=secure, host=host, port=port, - url=url, timeout=timeout) + url=url, timeout=timeout, + proxy_url=proxy_url) self.user_id = user_id From 2ed6882a73d00df6baa465435980c2dbf3c2a835 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 19 Aug 2014 21:27:01 +0200 Subject: [PATCH 175/315] Add missing variable assignment. --- libcloud/httplib_ssl.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 2024433c3b..3783f48829 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -63,6 +63,7 @@ def set_http_proxy(self, proxy_url): self.proxy_scheme = scheme self.proxy_host = host self.proxy_port = port + self.http_proxy_used = True self._setup_http_proxy() From 3a8d7ff5f047c7b3476b8dcffa0e6850e952a645 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 20 Aug 2014 13:37:24 +0200 Subject: [PATCH 176/315] Fix a typo in the example. --- docs/examples/http_proxy/set_http_proxy_method.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/http_proxy/set_http_proxy_method.py b/docs/examples/http_proxy/set_http_proxy_method.py index d8c5e83f7a..cb9b84a7f6 100644 --- a/docs/examples/http_proxy/set_http_proxy_method.py +++ b/docs/examples/http_proxy/set_http_proxy_method.py @@ -7,6 +7,6 @@ cls = get_driver(Provider.RACKSPACE) driver = cls('username', 'api key', region='ord') -driver.set_http_proxy(proxy_url=PROXY_URL) +driver.connection.set_http_proxy(proxy_url=PROXY_URL) pprint(driver.list_nodes()) From 298f5db6ef42d22338d60fc3582cc31cdd7c4114 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 20 Aug 2014 16:49:40 +0200 Subject: [PATCH 177/315] Add new base paused node state. --- libcloud/compute/types.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index e2961726db..fbe9317731 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -189,6 +189,7 @@ class NodeState(object): :cvar SUSPENDED: Node is suspended. :cvar ERROR: Node is an error state. Usually no operations can be performed on the node once it ends up in the error state. + :cvar PAUSED: Node is paused. :cvar UNKNOWN: Node state is unknown. """ RUNNING = 0 @@ -199,6 +200,7 @@ class NodeState(object): STOPPED = 5 SUSPENDED = 6 ERROR = 7 + PAUSED = 8 class Architecture(object): From 5544e75ef2f9fce5d92fba7d04582699f69ba986 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 20 Aug 2014 17:02:07 +0200 Subject: [PATCH 178/315] Update Node.__repr__ to display a friendly name for the state attribute. --- libcloud/compute/base.py | 4 +++- libcloud/compute/types.py | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index a585128b8d..c043706cfc 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -251,9 +251,11 @@ def destroy(self): return self.driver.destroy_node(self) def __repr__(self): + state = NodeState.tostring(self.state) + return (('') - % (self.uuid, self.name, self.state, self.public_ips, + % (self.uuid, self.name, state, self.public_ips, self.private_ips, self.driver.name)) diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index fbe9317731..c7c7616c2a 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -202,6 +202,20 @@ class NodeState(object): ERROR = 7 PAUSED = 8 + @classmethod + def tostring(cls, value): + values = cls.__dict__ + values = dict([(key, string) for key, string in values.items() if + not key.startswith('__')]) + + for item_key, item_value in values.items(): + if value == item_value: + return item_key + + @classmethod + def fromstring(cls, value): + return getattr(cls, value.upper(), None) + class Architecture(object): """ From a03e9f31d3f3a11a6fb6c0a3bdd81a40dd4c9169 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 20 Aug 2014 17:45:29 +0200 Subject: [PATCH 179/315] Update HTTP proxy code so it supports basic auth authentication. --- .../http_proxy/constructor_argument.py | 5 +- .../http_proxy/set_http_proxy_method.py | 6 +-- docs/other/using-http-proxy.rst | 10 +++- libcloud/httplib_ssl.py | 49 +++++++++++++++++-- libcloud/test/test_connection.py | 10 ++++ 5 files changed, 70 insertions(+), 10 deletions(-) diff --git a/docs/examples/http_proxy/constructor_argument.py b/docs/examples/http_proxy/constructor_argument.py index ae5494562e..3542bebd48 100644 --- a/docs/examples/http_proxy/constructor_argument.py +++ b/docs/examples/http_proxy/constructor_argument.py @@ -1,6 +1,7 @@ from libcloud.compute.drivers.dreamhost import DreamhostConnection -PROXY_URL = 'http://:' +PROXY_URL_NO_AUTH = 'http://:' +PROXY_URL_BASIC_AUTH = 'http://:@:' conn = DreamhostConnection(host='dreamhost.com', port=443, - timeout=None, proxy_url=PROXY_URL) + timeout=None, proxy_url=PROXY_URL_NO_AUTH) diff --git a/docs/examples/http_proxy/set_http_proxy_method.py b/docs/examples/http_proxy/set_http_proxy_method.py index cb9b84a7f6..b9d5bdf03e 100644 --- a/docs/examples/http_proxy/set_http_proxy_method.py +++ b/docs/examples/http_proxy/set_http_proxy_method.py @@ -3,10 +3,10 @@ from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver -PROXY_URL = 'http://:' +PROXY_URL_NO_AUTH = 'http://:' +PROXY_URL_BASIC_AUTH = 'http://:@:' cls = get_driver(Provider.RACKSPACE) driver = cls('username', 'api key', region='ord') -driver.connection.set_http_proxy(proxy_url=PROXY_URL) - +driver.connection.set_http_proxy(proxy_url=PROXY_URL_NO_AUTH) pprint(driver.list_nodes()) diff --git a/docs/other/using-http-proxy.rst b/docs/other/using-http-proxy.rst index 8c12dc5adc..4231bea5c0 100644 --- a/docs/other/using-http-proxy.rst +++ b/docs/other/using-http-proxy.rst @@ -23,8 +23,8 @@ bellow: Known limitations ----------------- -* HTTP proxies which require authentication are not supported * Python 2.6 is not supported +* Only HTTP basic authentication proxy authorization method is supported Examples -------- @@ -35,10 +35,18 @@ with Libcloud. 1. Using http_proxy environment variable ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Without authentication: + .. sourcecode:: python http_proxy=http://: python my_script.py +With basic auth authentication: + +.. sourcecode:: python + + http_proxy=http://:@: python my_script.py + 2. Passing http_proxy argument to the connection class ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 3783f48829..d74ad40542 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -21,11 +21,14 @@ import socket import sys import ssl +import base64 import warnings import libcloud.security +from libcloud.utils.py3 import b from libcloud.utils.py3 import httplib from libcloud.utils.py3 import urlparse +from libcloud.utils.py3 import urlunquote __all__ = [ 'LibcloudBaseConnection', @@ -46,23 +49,37 @@ class LibcloudBaseConnection(object): proxy_scheme = None proxy_host = None proxy_port = None + + proxy_username = None + proxy_password = None + http_proxy_used = False def set_http_proxy(self, proxy_url): """ Set a HTTP proxy which will be used with this connection. - :param proxy_url: Proxy URL (e.g. http://hostname:3128) + :param proxy_url: Proxy URL (e.g. http://: without + authentication and + http://:@: for + basic auth authentication information. :type proxy_url: ``str`` """ if sys.version_info[:2] == (2, 6): raise Exception('HTTP proxy support requires Python 2.7 or higher') - scheme, host, port = self._parse_proxy_url(proxy_url=proxy_url) + result = self._parse_proxy_url(proxy_url=proxy_url) + scheme = result[0] + host = result[1] + port = result[2] + username = result[3] + password = result[4] self.proxy_scheme = scheme self.proxy_host = host self.proxy_port = port + self.proxy_username = username + self.proxy_password = password self.http_proxy_used = True self._setup_http_proxy() @@ -88,7 +105,22 @@ def _parse_proxy_url(self, proxy_url): proxy_scheme = parsed.scheme proxy_host, proxy_port = parsed.hostname, parsed.port - return (proxy_scheme, proxy_host, proxy_port) + netloc = parsed.netloc + + if '@' in netloc: + username_password = netloc.split('@', 1)[0] + split = username_password.split(':', 1) + + if len(split) == 0: + raise ValueError('URL is in an invalid format') + + proxy_username, proxy_password = split[0], split[1] + else: + proxy_username = None + proxy_password = None + + return (proxy_scheme, proxy_host, proxy_port, proxy_username, + proxy_password) def _setup_http_proxy(self): """ @@ -97,7 +129,16 @@ def _setup_http_proxy(self): :param proxy_url: Proxy URL (e.g. http://:3128) :type proxy_url: ``str`` """ - self.set_tunnel(host=self.host, port=self.port) + headers = {} + + if self.proxy_username and self.proxy_password: + # Include authentication header + user_pass = '%s:%s' % (self.proxy_username, self.proxy_password) + encoded = base64.encodestring(b(urlunquote(user_pass))).strip() + auth_header = 'Basic %s' % (encoded.decode('utf-8')) + headers['Proxy-Authorization'] = auth_header + + self.set_tunnel(host=self.host, port=self.port, headers=headers) self._set_hostport(host=self.proxy_host, port=self.proxy_port) def _activate_http_proxy(self, sock): diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index 75d63fdb5d..aabbd1d85e 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -40,6 +40,16 @@ def test_parse_proxy_url(self): self.assertEqual(result[0], 'http') self.assertEqual(result[1], '127.0.0.1') self.assertEqual(result[2], 3128) + self.assertEqual(result[3], None) + self.assertEqual(result[4], None) + + proxy_url = 'http://user1:pass1@127.0.0.1:3128' + result = conn._parse_proxy_url(proxy_url=proxy_url) + self.assertEqual(result[0], 'http') + self.assertEqual(result[1], '127.0.0.1') + self.assertEqual(result[2], 3128) + self.assertEqual(result[3], 'user1') + self.assertEqual(result[4], 'pass1') proxy_url = 'https://127.0.0.1:3128' expected_msg = 'Only http proxies are supported' From 816f3705ef7b274844363e8315e56ccf1f1cb8c7 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 20 Aug 2014 20:14:49 +0200 Subject: [PATCH 180/315] Update cURL lines to include proxy basic auth information (if available). --- libcloud/common/base.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index ae0e699e52..07647b2b46 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -345,8 +345,14 @@ def _log_curl(self, method, url, body, headers): cmd = ["curl"] if self.http_proxy_used: - proxy_url = 'http://%s:%s' % (self.proxy_host, - self.proxy_port) + if self.proxy_username and self.proxy_password: + proxy_url = 'http://%s:%s@%s:%s' % (self.proxy_username, + self.proxy_password, + self.proxy_host, + self.proxy_port) + else: + proxy_url = 'http://%s:%s' % (self.proxy_host, + self.proxy_port) proxy_url = pquote(proxy_url) cmd.extend(['--proxy', proxy_url]) From 06707ef0c8c52555f10ce9152388e6416a43fac4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 21 Aug 2014 12:33:22 +0200 Subject: [PATCH 181/315] Update proxy support so it also works with Python 2.6. --- .../http_proxy/set_http_proxy_method.py | 11 ++++- docs/other/using-http-proxy.rst | 42 ++++++++++++++----- libcloud/httplib_ssl.py | 14 ++++--- libcloud/test/test_connection.py | 7 ---- 4 files changed, 49 insertions(+), 25 deletions(-) diff --git a/docs/examples/http_proxy/set_http_proxy_method.py b/docs/examples/http_proxy/set_http_proxy_method.py index b9d5bdf03e..9cebf971b3 100644 --- a/docs/examples/http_proxy/set_http_proxy_method.py +++ b/docs/examples/http_proxy/set_http_proxy_method.py @@ -3,10 +3,17 @@ from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver -PROXY_URL_NO_AUTH = 'http://:' +PROXY_URL_NO_AUTH_1 = 'http://:' +PROXY_URL_NO_AUTH_2 = 'http://:' PROXY_URL_BASIC_AUTH = 'http://:@:' cls = get_driver(Provider.RACKSPACE) driver = cls('username', 'api key', region='ord') -driver.connection.set_http_proxy(proxy_url=PROXY_URL_NO_AUTH) + +# Use proxy 1 for this request +driver.connection.set_http_proxy(proxy_url=PROXY_URL_NO_AUTH_1) +pprint(driver.list_nodes()) + +# Use proxy 2 for this request +driver.connection.set_http_proxy(proxy_url=PROXY_URL_NO_AUTH_2) pprint(driver.list_nodes()) diff --git a/docs/other/using-http-proxy.rst b/docs/other/using-http-proxy.rst index 4231bea5c0..b59204b036 100644 --- a/docs/other/using-http-proxy.rst +++ b/docs/other/using-http-proxy.rst @@ -5,9 +5,16 @@ Using an HTTP proxy Support for HTTP proxies is only available in Libcloud trunk and higher. -Libcloud supports using an HTTP proxy for outgoing HTTP and HTTPS requests. At -the moment, using a proxy is only supported if you are using Python 2.7 or -above (it has been tested with 2.7, PyPy, 3.1, 3.3, 3.3, 3.4). +Libcloud supports using an HTTP proxy for outgoing HTTP and HTTPS requests. + +Proxy support has been tested with the following Python versions; + +* Python 2.6 +* Python 2.7 / PyPy +* Python 3.1 +* Python 3.2 +* Python 3.3 +* Python 3.4 You can specify which HTTP proxy to use using one of the approaches described bellow: @@ -23,8 +30,7 @@ bellow: Known limitations ----------------- -* Python 2.6 is not supported -* Only HTTP basic authentication proxy authorization method is supported +* Only HTTP basic access authentication proxy authorization method is supported Examples -------- @@ -32,8 +38,12 @@ Examples This section includes some code examples which show how to use an HTTP proxy with Libcloud. -1. Using http_proxy environment variable -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +1. Using ``http_proxy`` environment variable +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By setting ``http_proxy`` environment variable you can specify which proxy to +use for all of the outgoing requests for a duration / life-time of the process +or a script. Without authentication: @@ -47,14 +57,24 @@ With basic auth authentication: http_proxy=http://:@: python my_script.py -2. Passing http_proxy argument to the connection class -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +2. Passing ``http_proxy`` argument to the connection class constructor +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +By passing ``http_proxy`` argument to the +:class:`libcloud.common.base.Connection` class constructor, you can specify +which proxy to use for a particular connection. .. literalinclude:: /examples/http_proxy/constructor_argument.py :language: python -3. Calling set_http_proxy method -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +3. Calling ``set_http_proxy`` method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Calling ``set_http_proxy`` method allows you to specify which proxy to use +for all the outgoing requests which follow ``set_http_proxy`` method call. + +This method also allows you to use a different proxy for each request as shown +in the example bellow. .. literalinclude:: /examples/http_proxy/set_http_proxy_method.py :language: python diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index d74ad40542..4497da3942 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -19,7 +19,6 @@ import os import re import socket -import sys import ssl import base64 import warnings @@ -65,9 +64,6 @@ def set_http_proxy(self, proxy_url): basic auth authentication information. :type proxy_url: ``str`` """ - if sys.version_info[:2] == (2, 6): - raise Exception('HTTP proxy support requires Python 2.7 or higher') - result = self._parse_proxy_url(proxy_url=proxy_url) scheme = result[0] host = result[1] @@ -138,7 +134,15 @@ def _setup_http_proxy(self): auth_header = 'Basic %s' % (encoded.decode('utf-8')) headers['Proxy-Authorization'] = auth_header - self.set_tunnel(host=self.host, port=self.port, headers=headers) + if hasattr(self, 'set_tunnel'): + # Python 2.7 and higher + self.set_tunnel(host=self.host, port=self.port, headers=headers) + elif hasattr(self, '_set_tunnel'): + # Python 2.6 + self._set_tunnel(host=self.host, port=self.port, headers=headers) + else: + raise ValueError('Unsupported Python version') + self._set_hostport(host=self.proxy_host, port=self.proxy_port) def _activate_http_proxy(self, sock): diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index aabbd1d85e..c97cfb439a 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -25,14 +25,10 @@ from libcloud.common.base import LoggingConnection from libcloud.httplib_ssl import LibcloudBaseConnection from libcloud.httplib_ssl import LibcloudHTTPConnection -from libcloud.utils.py3 import PY26 class BaseConnectionClassTestCase(unittest.TestCase): def test_parse_proxy_url(self): - if PY26: - return - conn = LibcloudBaseConnection() proxy_url = 'http://127.0.0.1:3128' @@ -64,9 +60,6 @@ def test_parse_proxy_url(self): proxy_url=proxy_url) def test_constructor(self): - if PY26: - return - conn = LibcloudHTTPConnection(host='localhost', port=80) self.assertEqual(conn.proxy_scheme, None) self.assertEqual(conn.proxy_host, None) From a3a0959001d63e90a97bcc04916ec7e68fe44e43 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 21 Aug 2014 18:00:24 +0200 Subject: [PATCH 182/315] Add missing "set_http_proxy" method to the base Connection class --- libcloud/common/base.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 07647b2b46..78a9a1334f 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -483,6 +483,15 @@ def __init__(self, secure=True, host=None, port=None, url=None, self.proxy_url = proxy_url + def set_http_proxy(self, proxy_url): + """ + Set an HTTP proxy which will be used for all the outgoing HTTP requests. + + :param proxy_url: Proxy URL. + :type proxy_url: ``str`` + """ + self.proxy_url = proxy_url + def set_context(self, context): if not isinstance(context, dict): raise TypeError('context needs to be a dictionary') From bef5399b5bb9b539c40c3770ae28546468c6b6cc Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 21 Aug 2014 18:02:25 +0200 Subject: [PATCH 183/315] Improve URL validation, update docstring. --- libcloud/common/base.py | 7 +++++-- libcloud/httplib_ssl.py | 2 +- libcloud/test/test_connection.py | 12 ++++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 78a9a1334f..f559c22b17 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -485,9 +485,12 @@ def __init__(self, secure=True, host=None, port=None, url=None, def set_http_proxy(self, proxy_url): """ - Set an HTTP proxy which will be used for all the outgoing HTTP requests. + Set a HTTP proxy which will be used with this connection. - :param proxy_url: Proxy URL. + :param proxy_url: Proxy URL (e.g. http://: without + authentication and + http://:@: for + basic auth authentication information. :type proxy_url: ``str`` """ self.proxy_url = proxy_url diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 4497da3942..104bf34335 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -107,7 +107,7 @@ def _parse_proxy_url(self, proxy_url): username_password = netloc.split('@', 1)[0] split = username_password.split(':', 1) - if len(split) == 0: + if len(split) < 2: raise ValueError('URL is in an invalid format') proxy_username, proxy_password = split[0], split[1] diff --git a/libcloud/test/test_connection.py b/libcloud/test/test_connection.py index c97cfb439a..821425d127 100644 --- a/libcloud/test/test_connection.py +++ b/libcloud/test/test_connection.py @@ -59,6 +59,18 @@ def test_parse_proxy_url(self): conn._parse_proxy_url, proxy_url=proxy_url) + proxy_url = 'http://@127.0.0.1:3128' + expected_msg = 'URL is in an invalid format' + self.assertRaisesRegexp(ValueError, expected_msg, + conn._parse_proxy_url, + proxy_url=proxy_url) + + proxy_url = 'http://user@127.0.0.1:3128' + expected_msg = 'URL is in an invalid format' + self.assertRaisesRegexp(ValueError, expected_msg, + conn._parse_proxy_url, + proxy_url=proxy_url) + def test_constructor(self): conn = LibcloudHTTPConnection(host='localhost', port=80) self.assertEqual(conn.proxy_scheme, None) From 77ce4470311e8f8094b8a06f21fded80dc5c57da Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 22 Aug 2014 18:13:47 +0200 Subject: [PATCH 184/315] Fix a typo. Spotted by Brian Johnson. --- libcloud/compute/drivers/openstack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index af306e0c1d..c836109a65 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1168,7 +1168,7 @@ def create_node(self, **kwargs): :keyword networks: The server is launched into a set of Networks. - :type networks: :class:`OpenStackNetwork` + :type networks: ``list`` of :class:`OpenStackNetwork` :keyword ex_disk_config: Name of the disk configuration. Can be either ``AUTO`` or ``MANUAL``. From 9e8fa1b850f3a378ef3788de070c11125d147e40 Mon Sep 17 00:00:00 2001 From: Jon Chen Date: Mon, 25 Aug 2014 16:26:18 -0400 Subject: [PATCH 185/315] update linode plans mapping Closes #351 Signed-off-by: Tomaz Muraus --- libcloud/common/linode.py | 19 +++--- .../fixtures/linode/_avail_linodeplans.json | 65 ++++++++++++++----- .../compute/fixtures/linode/_linode_list.json | 2 +- libcloud/test/compute/test_linode.py | 4 +- 4 files changed, 62 insertions(+), 28 deletions(-) diff --git a/libcloud/common/linode.py b/libcloud/common/linode.py index 6ee9cc6c8d..c547514344 100644 --- a/libcloud/common/linode.py +++ b/libcloud/common/linode.py @@ -31,15 +31,16 @@ API_HOST = 'api.linode.com' API_ROOT = '/' -# Constants that map a RAM figure to a PlanID (updated 4/25/14) -LINODE_PLAN_IDS = {2048: '1', - 4096: '3', - 8192: '5', - 16384: '6', - 32768: '7', - 49152: '8', - 65536: '9', - 98304: '11'} +# Constants that map a RAM figure to a PlanID (updated 2014-08-25) +LINODE_PLAN_IDS = {1024: '1', + 2048: '2', + 4096: '4', + 8192: '6', + 16384: '7', + 32768: '8', + 49152: '9', + 65536: '10', + 98304: '12'} class LinodeException(Exception): diff --git a/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json b/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json index c492e626a6..ac2488934f 100644 --- a/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json +++ b/libcloud/test/compute/fixtures/linode/_avail_linodeplans.json @@ -1,10 +1,28 @@ { "ERRORARRAY": [], "DATA": [{ + "CORES": 1, + "PRICE": 10.00, + "RAM": 1024, + "XFER": 2000, + "PLANID": 1, + "LABEL": "Linode 1024", + "AVAIL": { + "3": 500, + "2": 500, + "7": 500, + "6": 500, + "4": 500, + "8": 500 + }, + "DISK": 24, + "HOURLY": 0.0150 + }, { + "CORES": 2, "PRICE": 20.00, "RAM": 2048, "XFER": 3000, - "PLANID": 1, + "PLANID": 2, "LABEL": "Linode 2048", "AVAIL": { "3": 500, @@ -14,12 +32,14 @@ "4": 500, "8": 500 }, - "DISK": 48 + "DISK": 48, + "HOURLY": 0.0300 }, { + "CORES": 4, "PRICE": 40.00, "RAM": 4096, "XFER": 4000, - "PLANID": 3, + "PLANID": 4, "LABEL": "Linode 4096", "AVAIL": { "3": 500, @@ -29,12 +49,14 @@ "4": 500, "8": 500 }, - "DISK": 96 + "DISK": 96, + "HOURLY": 0.0600 }, { + "CORES": 6, "PRICE": 80.00, "RAM": 8192, "XFER": 8000, - "PLANID": 5, + "PLANID": 6, "LABEL": "Linode 8192", "AVAIL": { "3": 500, @@ -44,12 +66,14 @@ "4": 500, "8": 500 }, - "DISK": 192 + "DISK": 192, + "HOURLY": 0.1200 }, { + "CORES": 8, "PRICE": 160.00, "RAM": 16384, "XFER": 16000, - "PLANID": 6, + "PLANID": 7, "LABEL": "Linode 16384", "AVAIL": { "3": 500, @@ -59,12 +83,14 @@ "4": 500, "8": 500 }, - "DISK": 384 + "DISK": 384, + "HOURLY": 0.2400 }, { + "CORES": 12, "PRICE": 320.00, "RAM": 32768, "XFER": 20000, - "PLANID": 7, + "PLANID": 8, "LABEL": "Linode 32768", "AVAIL": { "3": 500, @@ -74,12 +100,14 @@ "4": 500, "8": 500 }, - "DISK": 768 + "DISK": 768, + "HOURLY": 0.4800 }, { + "CORES": 16, "PRICE": 480.00, "RAM": 49152, "XFER": 20000, - "PLANID": 8, + "PLANID": 9, "LABEL": "Linode 49152", "AVAIL": { "3": 500, @@ -89,12 +117,14 @@ "4": 500, "8": 500 }, - "DISK": 1152 + "DISK": 1152, + "HOURLY": 0.7200 }, { + "CORES": 20, "PRICE": 640.00, "RAM": 65536, "XFER": 20000, - "PLANID": 9, + "PLANID": 10, "LABEL": "Linode 65536", "AVAIL": { "3": 500, @@ -104,12 +134,14 @@ "4": 500, "8": 500 }, - "DISK": 1536 + "DISK": 1536, + "HOURLY": 0.9600 }, { + "CORES": 20, "PRICE": 960.00, "RAM": 98304, "XFER": 20000, - "PLANID": 11, + "PLANID": 12, "LABEL": "Linode 98304", "AVAIL": { "3": 500, @@ -119,7 +151,8 @@ "4": 500, "8": 500 }, - "DISK": 1920 + "DISK": 1920, + "HOURLY": 1.4400 }], "ACTION": "avail.linodeplans" } diff --git a/libcloud/test/compute/fixtures/linode/_linode_list.json b/libcloud/test/compute/fixtures/linode/_linode_list.json index 4baf897257..345f7cadbb 100644 --- a/libcloud/test/compute/fixtures/linode/_linode_list.json +++ b/libcloud/test/compute/fixtures/linode/_linode_list.json @@ -22,7 +22,7 @@ "ALERT_BWOUT_ENABLED": 1, "BACKUPSENABLED": 1, "ALERT_CPU_THRESHOLD": 90, - "PLANID": "1", + "PLANID": "2", "BACKUPWEEKLYDAY": 0, "LABEL": "api-node3", "LPM_DISPLAYGROUP": "test", diff --git a/libcloud/test/compute/test_linode.py b/libcloud/test/compute/test_linode.py index 4c7c80f10c..090e4d84bb 100644 --- a/libcloud/test/compute/test_linode.py +++ b/libcloud/test/compute/test_linode.py @@ -43,7 +43,7 @@ def test_list_nodes(self): node = nodes[0] self.assertEqual(node.id, "8098") self.assertEqual(node.name, 'api-node3') - self.assertEqual(node.extra['PLANID'], '1') + self.assertEqual(node.extra['PLANID'], '2') self.assertTrue('75.127.96.245' in node.public_ips) self.assertEqual(node.private_ips, []) @@ -77,7 +77,7 @@ def test_create_node_ssh_key_auth(self): def test_list_sizes(self): sizes = self.driver.list_sizes() - self.assertEqual(len(sizes), 8) + self.assertEqual(len(sizes), 9) for size in sizes: self.assertEqual(size.ram, int(size.name.split(" ")[1])) From 860674ac745a9c8b2e8b389d642257f3cce2329b Mon Sep 17 00:00:00 2001 From: rtheberge Date: Fri, 29 Aug 2014 09:41:14 -0400 Subject: [PATCH 186/315] Resolve a bug introduced by the original fix of the issue. 1- Add separated verifications for "None" and malformed metadata. A clear error will be raised upon malformed metadata. If undefined or already None, we assume None. 2- Provide comments explaining the odd GCE dictionary format. 3- Perform check on metadata dictionary structure. We expect one "items" key and a tuple of arbitrary values. 4- Prefix the keys/values provided by a simple dictionary by "items" if not found, enforcing point 3's structure. Closes #353 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/gce.py | 20 +++++++++++++++++--- libcloud/test/compute/test_gce.py | 30 ++++++++++++++++++++++++++++++ 3 files changed, 52 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index d52ab22e84..53345c8f30 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -73,6 +73,11 @@ Compute Libcloud. [Chris DeRamus] +- Fix a bug with ``ex_metadata`` argument handling in the Google Compute Engine + driver ``create_node`` method. + (LIBCLOUD-544, GITHUB-349, GITHUB-353) + [Raphael Theberge] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index a6f3c9f157..991432478a 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1211,9 +1211,23 @@ def create_node(self, name, size, image, location=None, use_existing=use_existing_disk, ex_disk_type=ex_disk_type) - if ex_metadata is not None: - ex_metadata = {"items": [{"key": k, "value": v} - for k, v in ex_metadata.items()]} + if not ex_metadata: + ex_metadata = None + elif not isinstance(ex_metadata, dict): + raise ValueError('metadata field is not a dictionnary.') + else: + if 'items' not in ex_metadata: + # The expected GCE format is odd: + # items: [{'value': '1', 'key': 'one'}, + # {'value': '2', 'key': 'two'}, + # {'value': 'N', 'key': 'N'}] + # So the only real key is items, and the values are tuples + # Since arbitrary values are fine, we only check for the key. + # If missing, we prefix it to the items. + items = [] + for k, v in ex_metadata.items(): + items.append({'key': k, 'value': v}) + ex_metadata = {'items': items} request, node_data = self._create_node_req(name, size, image, location, ex_network, diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 7d3855e6f6..74cc8b64e5 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -19,6 +19,8 @@ import unittest import datetime +from mock import Mock + from libcloud.utils.py3 import httplib from libcloud.compute.drivers.gce import (GCENodeDriver, API_VERSION, timestamp_to_datetime, @@ -291,6 +293,34 @@ def test_create_node(self): self.assertTrue(isinstance(node, Node)) self.assertEqual(node.name, node_name) + def test_create_node_with_metadata(self): + node_name = 'node-name' + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + + self.driver._create_node_req = Mock() + self.driver._create_node_req.return_value = (None, None) + self.driver.connection.async_request = Mock() + self.driver.ex_get_node = Mock() + + # ex_metadata doesn't contain "items" key + ex_metadata = {'key1': 'value1', 'key2': 'value2'} + self.driver.create_node(node_name, size, image, + ex_metadata=ex_metadata) + + actual = self.driver._create_node_req.call_args[0][6] + self.assertTrue('items' in actual) + self.assertEqual(len(actual['items']), 2) + + # ex_metadata contains "items" key + ex_metadata = {'items': [{'key0': 'value0'}]} + self.driver.create_node(node_name, size, image, + ex_metadata=ex_metadata) + actual = self.driver._create_node_req.call_args[0][6] + self.assertTrue('items' in actual) + self.assertEqual(len(actual['items']), 1) + self.assertEqual(actual['items'][0], {'key0': 'value0'}) + def test_create_node_existing(self): node_name = 'libcloud-demo-europe-np-node' image = self.driver.ex_get_image('debian-7') From f007edaafcae79afb2890e75cd2ba1f91aa26d7e Mon Sep 17 00:00:00 2001 From: Itxaka Serrano Date: Sun, 31 Aug 2014 18:07:14 +0200 Subject: [PATCH 187/315] Adds the following methods for the Softlayer driver: - list_key_pairs() - get_key_pair() - import_key_pair_from_string() - delete_key_pair() - create_key_pair() (only if Pycripto is installed) Adds tests for all new methods. Modifies the test_create_node() to use an ssh key. Adds a new property on the Softlayer node "ex_keyname"which is the name of the key to be associated to the new node. Closes #354 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 + libcloud/compute/drivers/softlayer.py | 96 ++++++++++++++++++- .../v3__SoftLayer_Account_getSshKeys.xml | 63 ++++++++++++ ...oftLayer_Security_Ssh_Key_createObject.xml | 39 ++++++++ ...oftLayer_Security_Ssh_Key_deleteObject.xml | 15 +++ ...__SoftLayer_Security_Ssh_Key_getObject.xml | 33 +++++++ libcloud/test/compute/test_softlayer.py | 64 ++++++++++++- 7 files changed, 308 insertions(+), 6 deletions(-) create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Account_getSshKeys.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_createObject.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_deleteObject.xml create mode 100644 libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObject.xml diff --git a/CHANGES.rst b/CHANGES.rst index 53345c8f30..69ab3995ff 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -78,6 +78,10 @@ Compute (LIBCLOUD-544, GITHUB-349, GITHUB-353) [Raphael Theberge] +- Add SSH key pair management methods to the Softlayer driver. + (GITHUB-321, GITHUB-354) + [Itxaka Serrano] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/softlayer.py b/libcloud/compute/drivers/softlayer.py index 4fe8cdd2e3..61a1e1ac69 100644 --- a/libcloud/compute/drivers/softlayer.py +++ b/libcloud/compute/drivers/softlayer.py @@ -17,13 +17,19 @@ """ import time +try: + from Crypto.PublicKey import RSA + crypto = True +except ImportError: + crypto = False from libcloud.common.base import ConnectionUserAndKey from libcloud.common.xmlrpc import XMLRPCResponse, XMLRPCConnection from libcloud.common.types import InvalidCredsError, LibcloudError from libcloud.compute.types import Provider, NodeState from libcloud.compute.base import NodeDriver, Node, NodeLocation, NodeSize, \ - NodeImage + NodeImage, KeyPair +from libcloud.compute.types import KeyPairDoesNotExistError DEFAULT_DOMAIN = 'example.com' DEFAULT_CPU_SIZE = 1 @@ -158,7 +164,6 @@ def request(self, service, method, *args, **kwargs): args = ({'headers': headers}, ) + args endpoint = '%s/%s' % (self.endpoint, service) - return super(SoftLayerConnection, self).request(method, *args, **{'endpoint': endpoint}) @@ -204,7 +209,7 @@ class SoftLayerNodeDriver(NodeDriver): website = 'http://www.softlayer.com/' type = Provider.SOFTLAYER - features = {'create_node': ['generates_password']} + features = {'create_node': ['generates_password', 'ssh_key']} def _to_node(self, host): try: @@ -330,6 +335,8 @@ def create_node(self, **kwargs): :type ex_datacenter: ``str`` :keyword ex_os: e.g. UBUNTU_LATEST :type ex_os: ``str`` + :keyword ex_keyname: The name of the key pair + :type ex_keyname: ``str`` """ name = kwargs['name'] os = 'DEBIAN_LATEST' @@ -402,6 +409,9 @@ def create_node(self, **kwargs): if datacenter: newCCI['datacenter'] = {'name': datacenter} + if 'ex_keyname' in kwargs: + newCCI['sshKeys'] = [self._key_name_to_id(kwargs['ex_keyname'])] + res = self.connection.request( 'SoftLayer_Virtual_Guest', 'createObject', newCCI ).object @@ -411,6 +421,59 @@ def create_node(self, **kwargs): return self._to_node(raw_node) + def list_key_pairs(self): + result = self.connection.request( + 'SoftLayer_Account', 'getSshKeys' + ).object + elems = [x for x in result] + key_pairs = self._to_key_pairs(elems=elems) + return key_pairs + + def get_key_pair(self, name): + key_id = self._key_name_to_id(name=name) + result = self.connection.request( + 'SoftLayer_Security_Ssh_Key', 'getObject', id=key_id + ).object + return self._to_key_pair(result) + + # TODO: Check this with the libcloud guys, + # can we create new dependencies? + def create_key_pair(self, name, ex_size=4096): + if crypto is False: + raise NotImplementedError('create_key_pair needs' + 'the pycrypto library') + key = RSA.generate(ex_size) + new_key = { + 'key': key.publickey().exportKey('OpenSSH'), + 'label': name, + 'notes': '', + } + result = self.connection.request( + 'SoftLayer_Security_Ssh_Key', 'createObject', new_key + ).object + result['private'] = key.exportKey('PEM') + return self._to_key_pair(result) + + def import_key_pair_from_string(self, name, key_material): + new_key = { + 'key': key_material, + 'label': name, + 'notes': '', + } + result = self.connection.request( + 'SoftLayer_Security_Ssh_Key', 'createObject', new_key + ).object + + key_pair = self._to_key_pair(result) + return key_pair + + def delete_key_pair(self, key_pair): + key = self._key_name_to_id(key_pair) + result = self.connection.request( + 'SoftLayer_Security_Ssh_Key', 'deleteObject', id=key + ).object + return result + def _to_image(self, img): return NodeImage( id=img['template']['operatingSystemReferenceCode'], @@ -467,8 +530,31 @@ def list_nodes(self): }, } res = self.connection.request( - "SoftLayer_Account", - "getVirtualGuests", + 'SoftLayer_Account', + 'getVirtualGuests', object_mask=mask ).object return [self._to_node(h) for h in res] + + def _to_key_pairs(self, elems): + key_pairs = [self._to_key_pair(elem=elem) for elem in elems] + return key_pairs + + def _to_key_pair(self, elem): + key_pair = KeyPair(name=elem['label'], + public_key=elem['key'], + fingerprint=elem['fingerprint'], + private_key=elem.get('private', None), + driver=self, + extra={'id': elem['id']}) + return key_pair + + def _key_name_to_id(self, name): + result = self.connection.request( + 'SoftLayer_Account', 'getSshKeys' + ).object + key_id = [x for x in result if x['label'] == name] + if len(key_id) == 0: + raise KeyPairDoesNotExistError(name, self) + else: + return int(key_id[0]['id']) diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Account_getSshKeys.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Account_getSshKeys.xml new file mode 100644 index 0000000000..a06952c54c --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Account_getSshKeys.xml @@ -0,0 +1,63 @@ + + + + + + + id + + 1 + + + + key + + ssh-key + + + + label + + test1 + + + + fingerprint + + 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 + + + + + + + + + + id + + 2 + + + + key + + ssh-key + + + + label + + test2 + + + + fingerprint + + 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 + + + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_createObject.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_createObject.xml new file mode 100644 index 0000000000..716832eca3 --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_createObject.xml @@ -0,0 +1,39 @@ + + + + + + + id + + 1 + + + + key + + ssh-key + + + + label + + my-key-pair + + + + label + + my-key-pair + + + + fingerprint + + 1f:51:ae:28:bf:89:e9:d8:1f:25:5d:37:2d:7d:b8:ca:9f:f5:f1:6f + + + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_deleteObject.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_deleteObject.xml new file mode 100644 index 0000000000..119680e5dc --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_deleteObject.xml @@ -0,0 +1,15 @@ + + + + + + + status + + success + + + + + + diff --git a/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObject.xml b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObject.xml new file mode 100644 index 0000000000..cf96523023 --- /dev/null +++ b/libcloud/test/compute/fixtures/softlayer/v3__SoftLayer_Security_Ssh_Key_getObject.xml @@ -0,0 +1,33 @@ + + + + + + + id + + 1 + + + + key + + ssh-key + + + + label + + test1 + + + + fingerprint + + 00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00 + + + + + + diff --git a/libcloud/test/compute/test_softlayer.py b/libcloud/test/compute/test_softlayer.py index 74f4a3278b..8f4536b024 100644 --- a/libcloud/test/compute/test_softlayer.py +++ b/libcloud/test/compute/test_softlayer.py @@ -16,6 +16,13 @@ import unittest import sys +try: + import Crypto + Crypto + crypto = True +except ImportError: + crypto = False + from libcloud.common.types import InvalidCredsError from libcloud.utils.py3 import httplib @@ -25,12 +32,15 @@ from libcloud.compute.drivers.softlayer import SoftLayerNodeDriver as SoftLayer from libcloud.compute.drivers.softlayer import SoftLayerException, \ NODE_STATE_MAP -from libcloud.compute.types import NodeState +from libcloud.compute.types import NodeState, KeyPairDoesNotExistError from libcloud.test import MockHttp # pylint: disable-msg=E0611 from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.test.secrets import SOFTLAYER_PARAMS +null_fingerprint = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:' + \ + '00:00:00:00:00' + class SoftLayerTests(unittest.TestCase): @@ -119,6 +129,7 @@ def test_create_node_ex_options(self): ex_cpus=2, ex_ram=2048, ex_disk=100, + ex_key='test1', ex_bandwidth=10, ex_local_disk=False, ex_datacenter='Dal05', @@ -132,6 +143,37 @@ def test_destroy_node(self): node = self.driver.list_nodes()[0] self.driver.destroy_node(node) + def test_list_keypairs(self): + keypairs = self.driver.list_key_pairs() + self.assertEqual(len(keypairs), 2) + self.assertEqual(keypairs[0].name, 'test1') + self.assertEqual(keypairs[0].fingerprint, null_fingerprint) + + def test_get_key_pair(self): + key_pair = self.driver.get_key_pair(name='test1') + self.assertEqual(key_pair.name, 'test1') + + def test_get_key_pair_does_not_exist(self): + self.assertRaises(KeyPairDoesNotExistError, self.driver.get_key_pair, + name='test-key-pair') + + def test_create_key_pair(self): + if crypto: + key_pair = self.driver.create_key_pair(name='my-key-pair') + fingerprint = ('1f:51:ae:28:bf:89:e9:d8:1f:25:5d' + ':37:2d:7d:b8:ca:9f:f5:f1:6f') + + self.assertEqual(key_pair.name, 'my-key-pair') + self.assertEqual(key_pair.fingerprint, fingerprint) + self.assertTrue(key_pair.private_key is not None) + else: + self.assertRaises(NotImplementedError, self.driver.create_key_pair, + name='my-key-pair') + + def test_delete_key_pair(self): + success = self.driver.delete_key_pair('test1') + self.assertTrue(success) + class SoftLayerMockHttp(MockHttp): fixtures = ComputeFileFixtures('softlayer') @@ -188,6 +230,26 @@ def _xmlrpc_v3_SoftLayer_Virtual_Guest_deleteObject( body = self.fixtures.load('empty.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _xmlrpc_v3_SoftLayer_Account_getSshKeys( + self, method, url, body, headers): + body = self.fixtures.load('v3__SoftLayer_Account_getSshKeys.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Security_Ssh_Key_getObject( + self, method, url, body, headers): + body = self.fixtures.load('v3__SoftLayer_Security_Ssh_Key_getObject.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Security_Ssh_Key_createObject( + self, method, url, body, headers): + body = self.fixtures.load('v3__SoftLayer_Security_Ssh_Key_createObject.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _xmlrpc_v3_SoftLayer_Security_Ssh_Key_deleteObject( + self, method, url, body, headers): + body = self.fixtures.load('v3__SoftLayer_Security_Ssh_Key_deleteObject.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + if __name__ == '__main__': sys.exit(unittest.main()) From 65e0bfbc1de05f77ccdd659acafa5d19858bfb32 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 2 Sep 2014 15:45:49 +0200 Subject: [PATCH 188/315] Correctly categorize node public and private IP addresses when dealing with OpenStack floating IPs. Patch by Andrew Mann and Tomaz (tests). --- CHANGES.rst | 4 + libcloud/compute/drivers/openstack.py | 46 +++-- .../openstack_v1.1/_servers_detail.json | 163 +++++++++++++++++- libcloud/test/compute/test_openstack.py | 10 ++ 4 files changed, 207 insertions(+), 16 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 69ab3995ff..e1253d4412 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -82,6 +82,10 @@ Compute (GITHUB-321, GITHUB-354) [Itxaka Serrano] +- Correctly categorize node IP addresses into public and private when dealing + with OpenStack floating IPs. + [Andrew Mann] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index c836109a65..01fadee2c7 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -39,7 +39,7 @@ from libcloud.common.openstack import OpenStackDriverMixin from libcloud.common.openstack import OpenStackException from libcloud.common.openstack import OpenStackResponse -from libcloud.utils.networking import is_private_subnet +from libcloud.utils.networking import is_public_subnet from libcloud.compute.base import NodeSize, NodeImage from libcloud.compute.base import (NodeDriver, Node, NodeLocation, StorageVolume, VolumeSnapshot) @@ -1942,20 +1942,36 @@ def _to_node(self, api_node): public_ips, private_ips = [], [] for label, values in api_node['addresses'].items(): - ips = [v['addr'] for v in values] - - if label in public_networks_labels: - public_ips.extend(ips) - else: - for ip in ips: - # is_private_subnet does not check for ipv6 - try: - if is_private_subnet(ip): - private_ips.append(ip) - else: - public_ips.append(ip) - except: - private_ips.append(ip) + for value in values: + ip = value['addr'] + + is_public_ip = False + + try: + public_subnet = is_public_subnet(ip) + except: + # IPv6 + public_subnet = False + + # Openstack Icehouse sets 'OS-EXT-IPS:type' to 'floating' for + # public and 'fixed' for private + explicit_ip_type = value.get('OS-EXT-IPS:type', None) + + if explicit_ip_type == 'floating': + is_public_ip = True + elif explicit_ip_type == 'fixed': + is_public_ip = False + elif label in public_networks_labels: + # Try label next + is_public_ip = True + elif public_subnet: + # Check for public subnet + is_public_ip = True + + if is_public_ip: + public_ips.append(ip) + else: + private_ips.append(ip) # Sometimes 'image' attribute is not present if the node is in an error # state diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json index 35d90858a1..b2dfe18bd6 100644 --- a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_detail.json @@ -1 +1,162 @@ -{"servers": [{"status": "BUILD", "updated": "2011-10-11T00:50:04Z", "hostId": "912566d83a13fbb357ea3f13c629363d9f7e1ba3f925b49f3d2ab725", "user_id": "rs-reach", "name": "lc-test-2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12065", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12065", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.35"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe47:788a"}], "private": [{"version": 4, "addr": "10.182.64.34"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe60:187d"}], "mynetwork": [{"version": 4, "addr": "12.16.18.28"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:51:39Z", "uuid": "02786501-714e-40af-8342-9c17eccb166d", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 25, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12065, "metadata": {}, "OS-DCF:diskConfig": "AUTO"}, {"status": "ACTIVE", "updated": "2011-10-11T00:44:20Z", "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", "user_id": "rs-reach", "name": "lc-test", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", "rel": "self"}, {"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", "rel": "bookmark"}], "addresses": {"public": [{"version": 4, "addr": "50.57.94.30"}, {"version": 6, "addr": "2001:4801:7808:52:16:3eff:fe77:32e3"}], "private": [{"version": 4, "addr": "10.182.64.29"}, {"version": 6, "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2"}]}, "tenant_id": "rs-reach-project", "image": {"id": "7", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", "rel": "bookmark"}]}, "created": "2011-10-11T00:45:02Z", "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", "accessIPv4": "", "accessIPv6": "", "key_name": null, "progress": 100, "flavor": {"id": "2", "links": [{"href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", "rel": "bookmark"}]}, "config_drive": "", "id": 12064, "metadata": {}, "OS-DCF:diskConfig": "AUTO"}]} +{ + "servers": [ + { + "status": "BUILD", + "updated": "2011-10-11T00:50:04Z", + "hostId": "912566d83a13fbb357ea3f13c629363d9f7e1ba3f925b49f3d2ab725", + "user_id": "rs-reach", + "name": "lc-test-2", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12065", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12065", + "rel": "bookmark" + } + ], + "addresses": { + "public": [ + { + "version": 4, + "addr": "50.57.94.35" + }, + { + "version": 6, + "addr": "2001:4801:7808:52:16:3eff:fe47:788a" + } + ], + "private": [ + { + "version": 4, + "addr": "10.182.64.34" + }, + { + "version": 6, + "addr": "fec0:4801:7808:52:16:3eff:fe60:187d" + } + ], + "mynetwork": [ + { + "version": 4, + "addr": "12.16.18.28" + } + ], + "mynetwork_public": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:79:90:aa", + "OS-EXT-IPS:type": "floating", + "addr": "192.168.3.3", + "version": 4 + } + ], + "mynetwork_private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:79:90:aa", + "OS-EXT-IPS:type": "fixed", + "addr": "10.3.3.3", + "version": 4 + } + ] + }, + "tenant_id": "rs-reach-project", + "image": { + "id": "7", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", + "rel": "bookmark" + } + ] + }, + "created": "2011-10-11T00:51:39Z", + "uuid": "02786501-714e-40af-8342-9c17eccb166d", + "accessIPv4": "", + "accessIPv6": "", + "key_name": null, + "progress": 25, + "flavor": { + "id": "2", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", + "rel": "bookmark" + } + ] + }, + "config_drive": "", + "id": 12065, + "metadata": {}, + "OS-DCF:diskConfig": "AUTO" + }, + { + "status": "ACTIVE", + "updated": "2011-10-11T00:44:20Z", + "hostId": "a024053a6201e6c6c12660aab3d8fd879e332e663a5e1fdbc02a0307", + "user_id": "rs-reach", + "name": "lc-test", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/v1.1/rs-reach-project/servers/12064", + "rel": "self" + }, + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/servers/12064", + "rel": "bookmark" + } + ], + "addresses": { + "public": [ + { + "version": 4, + "addr": "50.57.94.30" + }, + { + "version": 6, + "addr": "2001:4801:7808:52:16:3eff:fe77:32e3" + } + ], + "private": [ + { + "version": 4, + "addr": "10.182.64.29" + }, + { + "version": 6, + "addr": "fec0:4801:7808:52:16:3eff:fe6e:b7e2" + } + ] + }, + "tenant_id": "rs-reach-project", + "image": { + "id": "7", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/images/7", + "rel": "bookmark" + } + ] + }, + "created": "2011-10-11T00:45:02Z", + "uuid": "ec53630b-e4fb-442a-a748-c376f5c4345b", + "accessIPv4": "", + "accessIPv6": "", + "key_name": null, + "progress": 100, + "flavor": { + "id": "2", + "links": [ + { + "href": "http://alpha.ord.servers.api.rackspacecloud.com:8774/rs-reach-project/flavors/2", + "rel": "bookmark" + } + ] + }, + "config_drive": "", + "id": 12064, + "metadata": {}, + "OS-DCF:diskConfig": "AUTO" + } + ] +} diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 642b7a85b9..e52c6d9451 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -750,14 +750,24 @@ def test_list_nodes(self): node = nodes[0] self.assertEqual('12065', node.id) + # test public IPv4 self.assertTrue('12.16.18.28' in node.public_ips) self.assertTrue('50.57.94.35' in node.public_ips) + + # floating ip + self.assertTrue('192.168.3.3' in node.public_ips) + # test public IPv6 self.assertTrue( '2001:4801:7808:52:16:3eff:fe47:788a' in node.public_ips) + # test private IPv4 self.assertTrue('10.182.64.34' in node.private_ips) + + # floating ip + self.assertTrue('10.3.3.3' in node.private_ips) + # test private IPv6 self.assertTrue( 'fec0:4801:7808:52:16:3eff:fe60:187d' in node.private_ips) From bd37d5c6f3a813e8c4b6cf8133b7a65493bd886e Mon Sep 17 00:00:00 2001 From: Sherif Zain Date: Wed, 3 Sep 2014 12:35:51 +0200 Subject: [PATCH 189/315] Added image_id and size_id fields when calling DigitalOcean driver's list_nodes method Closes #355 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/digitalocean.py | 2 +- libcloud/test/compute/test_digitalocean.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/digitalocean.py b/libcloud/compute/drivers/digitalocean.py index 7dbed418c0..4ec6da1513 100644 --- a/libcloud/compute/drivers/digitalocean.py +++ b/libcloud/compute/drivers/digitalocean.py @@ -178,7 +178,7 @@ def ex_destroy_ssh_key(self, key_id): return res.status == httplib.OK def _to_node(self, data): - extra_keys = ['backups_active', 'region_id'] + extra_keys = ['backups_active', 'region_id', 'image_id', 'size_id'] if 'status' in data: state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) else: diff --git a/libcloud/test/compute/test_digitalocean.py b/libcloud/test/compute/test_digitalocean.py index ee9ef555a5..ff4159b274 100644 --- a/libcloud/test/compute/test_digitalocean.py +++ b/libcloud/test/compute/test_digitalocean.py @@ -79,6 +79,8 @@ def test_list_nodes_success(self): self.assertEqual(len(nodes), 1) self.assertEqual(nodes[0].name, 'test-2') self.assertEqual(nodes[0].public_ips, []) + self.assertEqual(nodes[0].extra['image_id'], 1601) + self.assertEqual(nodes[0].extra['size_id'], 66) def test_create_node_invalid_size(self): image = NodeImage(id='invalid', name=None, driver=self.driver) From 7e9c607e1d47d8453e06f4bb5a845e9d778ef8ae Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 3 Sep 2014 13:45:10 +0200 Subject: [PATCH 190/315] Re-generate provider feature matrix table. --- docs/compute/_supported_methods_key_pair_management.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index 8941a8b700..5f98929cb1 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -56,7 +56,7 @@ Provider list key pairs get key pair create key pai `RimuHosting`_ no no no no no no `ServerLove`_ no no no no no no `skalicloud`_ no no no no no no -`SoftLayer`_ no no no no no no +`SoftLayer`_ yes yes yes yes no yes `vCloud`_ no no no no no no `VCL`_ no no no no no no `vCloud`_ no no no no no no From 37576f3d76843e1da79b608af2e53ee27619446b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 16:09:34 +0200 Subject: [PATCH 191/315] Update upgrade nodes. --- docs/upgrade_notes.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/upgrade_notes.rst b/docs/upgrade_notes.rst index ef54b38faf..656e3e1af2 100644 --- a/docs/upgrade_notes.rst +++ b/docs/upgrade_notes.rst @@ -16,7 +16,6 @@ Changes in the OpenStack authentication and service catalog classes touched the classes mentioned bellow, then you aren't affected and those changes are fully backward compatible. - To make OpenStack authentication and identity related classes more extensible, easier to main and easier to use, those classes have been refactored. All of the changes are described bellow. @@ -41,6 +40,9 @@ the changes are described bellow. the ``OpenStackServiceCatalog`` class have been modified to always return the result in the same order (result values are sorted beforehand). +For more information and examples, please refer to the +`Libcloud now supports OpenStack Identity (Keystone) API v3`_ blog post. + Libcloud 0.14.1 --------------- @@ -808,3 +810,5 @@ For example: driver = Cls('key', 'secret', api_version='1.4') For a full list of changes, please see the `CHANGES file `__. + +.. _`Libcloud now supports OpenStack Identity (Keystone) API v3`: http://www.tomaz.me/2014/08/23/libcloud-now-supports-openstack-identity-keystone-api-v3.html From 8769b8a5d401fbf3a60c2a63a7ff04562d49d6c1 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 16:18:01 +0200 Subject: [PATCH 192/315] Document LIBCLOUD_DEBUG_PRETTY_PRINT_JSON environment variable. --- docs/troubleshooting.rst | 122 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 121 insertions(+), 1 deletion(-) diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index c100662516..5f206025a2 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -32,7 +32,14 @@ response body (if compressed) before logging it. To enable it, set ``LIBCLOUD_DEBUG`` environment variable and make it point to a file where the debug output should be saved. -For example if you want the output to be logged to the standard error (on +If the API returns a JSON which is not human friendly, you can also set +``LIBCLOUD_DEBUG_PRETTY_PRINT_JSON`` environment variable which will cause +the JSON to be beautified / formated so it's easier for humands to read it. + +Example 1 - Logging output to standard error +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you want the output to be logged to the standard error (on Linux) you can set it to ``/dev/stderr``: .. sourcecode:: bash @@ -60,3 +67,116 @@ Example output: 0 # -------- end 4431824872:4431825232 response ---------- + +Example 2 - Making JSON response human friendly +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Non-formatted response: + +.. sourcecode:: bash + + LIBCLOUD_DEBUG=/dev/stderr python my_script.py + +.. sourcecode:: bash + + # -------- begin 23125648:23160304 response ---------- + HTTP/1.1 200 OK + Content-Length: 1572 + X-Compute-Request-Id: req-79ab42d8-a959-44eb-8dec-bc9458b2f4b3 + Server: nginx/1.4.7 + Connection: keep-alive + Date: Sat, 06 Sep 2014 14:13:37 GMT + Content-Type: application/json + + {"servers": [{"status": "ACTIVE", "updated": "2014-09-06T14:13:32Z", "hostId": "561d56de25c177c422278d7ca5f8b210118348040b12afbad06f278a", "addresses": {"internet-routable": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:3f:c0:a1", "version": 4, "addr": "10.100.100.101", "OS-EXT-IPS:type": "fixed"}]}, "links": [{"href": "http://nova/v2/d3b31ebfd32744d19d848f3e9c351869/servers/deb35f96-be41-431e-b931-6e615ec720f4", "rel": "self"}, {"href": "http://nova/d3b31ebfd32744d19d848f3e9c351869/servers/deb35f96-be41-431e-b931-6e615ec720f4", "rel": "bookmark"}], "key_name": null, "image": {"id": "e9537ddd-6579-4473-9898-d211ab90f6d3", "links": [{"href": "http://nova/d3b31ebfd32744d19d848f3e9c351869/images/e9537ddd-6579-4473-9898-d211ab90f6d3", "rel": "bookmark"}]}, "OS-EXT-STS:task_state": null, "OS-EXT-STS:vm_state": "active", "OS-SRV-USG:launched_at": "2014-09-06T14:13:32.000000", "flavor": {"id": "90c2a137-611b-4dd2-9d65-d4a0b0858531", "links": [{"href": "http://nova/d3b31ebfd32744d19d848f3e9c351869/flavors/90c2a137-611b-4dd2-9d65-d4a0b0858531", "rel": "bookmark"}]}, "id": "deb35f96-be41-431e-b931-6e615ec720f4", "security_groups": [{"name": "default"}], "OS-SRV-USG:terminated_at": null, "OS-EXT-AZ:availability_zone": "nova", "user_id": "06dda7c06aa246c88d7775d02bc119ac", "name": "test lc 2", "created": "2014-09-06T14:13:12Z", "tenant_id": "d3b31ebfd32744d19d848f3e9c351869", "OS-DCF:diskConfig": "MANUAL", "os-extended-volumes:volumes_attached": [], "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "config_drive": "", "metadata": {}}]} + # -------- end 23125648:23160304 response ---------- + +Human friendly formatted JSON response: + +.. sourcecode:: bash + + LIBCLOUD_DEBUG=/dev/stderr LIBCLOUD_DEBUG_PRETTY_PRINT_JSON=1 python my_script.py + +.. sourcecode:: bash + + # -------- begin 41102928:41133624 response ---------- + HTTP/1.1 200 OK + Content-Length: 1572 + X-Compute-Request-Id: req-3ce8b047-55cd-4e20-bfeb-b65619696aec + Server: nginx/1.4.7 + Connection: keep-alive + Date: Sat, 06 Sep 2014 14:14:38 GMT + Content-Type: application/json + + { + "servers": [ + { + "OS-DCF:diskConfig": "MANUAL", + "OS-EXT-AZ:availability_zone": "nova", + "OS-EXT-STS:power_state": 1, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2014-09-06T14:13:32.000000", + "OS-SRV-USG:terminated_at": null, + "accessIPv4": "", + "accessIPv6": "", + "addresses": { + "internet-routable": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:3f:c0:a1", + "OS-EXT-IPS:type": "fixed", + "addr": "10.100.100.101", + "version": 4 + } + ] + }, + "config_drive": "", + "created": "2014-09-06T14:13:12Z", + "flavor": { + "id": "90c2a137-611b-4dd2-9d65-d4a0b0858531", + "links": [ + { + "href": "http://nova/d3b31ebfd32744d19d848f3e9c351869/flavors/90c2a137-611b-4dd2-9d65-d4a0b0858531", + "rel": "bookmark" + } + ] + }, + "hostId": "561d56de25c177c422278d7ca5f8b210118348040b12afbad06f278a", + "id": "deb35f96-be41-431e-b931-6e615ec720f4", + "image": { + "id": "e9537ddd-6579-4473-9898-d211ab90f6d3", + "links": [ + { + "href": "http://nova/d3b31ebfd32744d19d848f3e9c351869/images/e9537ddd-6579-4473-9898-d211ab90f6d3", + "rel": "bookmark" + } + ] + }, + "key_name": null, + "links": [ + { + "href": "http://nova/v2/d3b31ebfd32744d19d848f3e9c351869/servers/deb35f96-be41-431e-b931-6e615ec720f4", + "rel": "self" + }, + { + "href": "http://nova/d3b31ebfd32744d19d848f3e9c351869/servers/deb35f96-be41-431e-b931-6e615ec720f4", + "rel": "bookmark" + } + ], + "metadata": {}, + "name": "test lc 2", + "os-extended-volumes:volumes_attached": [], + "progress": 0, + "security_groups": [ + { + "name": "default" + } + ], + "status": "ACTIVE", + "tenant_id": "d3b31ebfd32744d19d848f3e9c351869", + "updated": "2014-09-06T14:13:32Z", + "user_id": "06dda7c06aa246c88d7775d02bc119ac" + } + ] + } + # -------- end 41102928:41133624 response ---------- From ef823b9552601b5012860858883ce5962180163f Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 19:13:04 +0200 Subject: [PATCH 193/315] Add missing t2 instances to the EC2 driver. --- CHANGES.rst | 3 ++ contrib/scrape-ec2-prices.py | 8 ++++ libcloud/compute/drivers/ec2.py | 67 ++++++++++++++++++++++++++++--- libcloud/data/pricing.json | 26 +++++++++++- libcloud/test/compute/test_ec2.py | 8 ++-- 5 files changed, 101 insertions(+), 11 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e1253d4412..de10bddb8d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -86,6 +86,9 @@ Compute with OpenStack floating IPs. [Andrew Mann] +- Add new t2 instance types to the EC2 driver. + [Tomaz Muraus] + Storage ~~~~~~~ diff --git a/contrib/scrape-ec2-prices.py b/contrib/scrape-ec2-prices.py index 54a63e8b61..0013232ae9 100755 --- a/contrib/scrape-ec2-prices.py +++ b/contrib/scrape-ec2-prices.py @@ -68,6 +68,14 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] # Maps EC2 region name to region name used in the pricing file diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index af13a3d500..f26797b08b 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -335,6 +335,37 @@ 'ram': 244000, 'disk': 320, # x2 'bandwidth': None + }, + 't2.micro': { + 'id': 't2.micro', + 'name': 'Burstable Performance Micro Instance', + 'ram': 1024, + 'disk': 0, # EBS Only + 'bandwidth': None, + 'extra': { + 'cpu': 1 + } + }, + # Burstable Performance General Purpose + 't2.small': { + 'id': 't2.small', + 'name': 'Burstable Performance Small Instance', + 'ram': 2048, + 'disk': 0, # EBS Only + 'bandwidth': None, + 'extra': { + 'cpu': 11 + } + }, + 't2.medium': { + 'id': 't2.medium', + 'name': 'Burstable Performance Medium Instance', + 'ram': 4028, + 'disk': 0, # EBS Only + 'bandwidth': None, + 'extra': { + 'cpu': 2 + } } } @@ -377,7 +408,10 @@ 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', - 'r3.8xlarge' + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # US West (Northern California) Region @@ -453,7 +487,10 @@ 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', - 'r3.8xlarge' + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # EU (Ireland) Region @@ -492,7 +529,10 @@ 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', - 'r3.8xlarge' + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # Asia Pacific (Singapore) Region @@ -525,6 +565,9 @@ 'i2.2xlarge', 'i2.4xlarge', 'i2.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # Asia Pacific (Tokyo) Region @@ -562,7 +605,10 @@ 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', - 'r3.8xlarge' + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # South America (Sao Paulo) Region @@ -584,7 +630,10 @@ 'm3.xlarge', 'm3.2xlarge', 'c1.medium', - 'c1.xlarge' + 'c1.xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # Asia Pacific (Sydney) Region @@ -621,7 +670,13 @@ 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', - 'r3.8xlarge' + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' + 't2.micro', + 't2.small', + 't2.medium' ] }, 'nimbus': { diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index e8aa7bf3c2..3b4e8699cf 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -1,6 +1,9 @@ { "compute": { "ec2_us_west_oregon": { + "t2.micro": "0.013", + "t2.small": "0.026", + "t2.medium": "0.052", "m3.medium": "0.070", "m3.large": "0.140", "m3.xlarge": "0.280", @@ -36,6 +39,9 @@ "t1.micro": "0.020" }, "ec2_us_west": { + "t2.micro": "0.017", + "t2.small": "0.034", + "t2.medium": "0.068", "m3.medium": "0.077", "m3.large": "0.154", "m3.xlarge": "0.308", @@ -67,6 +73,9 @@ "t1.micro": "0.025" }, "ec2_eu_west": { + "t2.micro": "0.014", + "t2.small": "0.028", + "t2.medium": "0.056", "m3.medium": "0.077", "m3.large": "0.154", "m3.xlarge": "0.308", @@ -121,6 +130,9 @@ "8": 1.612 }, "ec2_ap_southeast_2": { + "t2.micro": "0.020", + "t2.small": "0.040", + "t2.medium": "0.080", "m3.medium": "0.098", "m3.large": "0.196", "m3.xlarge": "0.392", @@ -155,6 +167,9 @@ "1": 0.416 }, "ec2_us_east": { + "t2.micro": "0.013", + "t2.small": "0.026", + "t2.medium": "0.052", "m3.medium": "0.070", "m3.large": "0.140", "m3.xlarge": "0.280", @@ -209,6 +224,9 @@ "8": 1.2 }, "ec2_sa_east": { + "t2.micro": "0.027", + "t2.small": "0.054", + "t2.medium": "0.108", "m3.medium": "0.095", "m3.large": "0.190", "m3.xlarge": "0.381", @@ -254,6 +272,9 @@ "8": 1.44 }, "ec2_ap_northeast": { + "t2.micro": "0.020", + "t2.small": "0.040", + "t2.medium": "0.080", "m3.medium": "0.101", "m3.large": "0.203", "m3.xlarge": "0.405", @@ -374,6 +395,9 @@ "1gb": 0.15 }, "ec2_ap_southeast": { + "t2.micro": "0.020", + "t2.small": "0.040", + "t2.medium": "0.080", "m3.medium": "0.098", "m3.large": "0.196", "m3.xlarge": "0.392", @@ -562,5 +586,5 @@ } }, "storage": {}, - "updated": 1397154837 + "updated": 1410023366 } diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 3f531cde29..02ab8a8c2e 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -373,7 +373,7 @@ def test_list_sizes(self): self.assertTrue('m2.4xlarge' in ids) if region_name == 'us-east-1': - self.assertEqual(len(sizes), 33) + self.assertEqual(len(sizes), 36) self.assertTrue('cg1.4xlarge' in ids) self.assertTrue('cc2.8xlarge' in ids) self.assertTrue('cr1.8xlarge' in ids) @@ -382,11 +382,11 @@ def test_list_sizes(self): if region_name == 'us-west-2': self.assertEqual(len(sizes), 29) elif region_name == 'ap-southeast-1': - self.assertEqual(len(sizes), 24) + self.assertEqual(len(sizes), 27) elif region_name == 'ap-southeast-2': - self.assertEqual(len(sizes), 29) + self.assertEqual(len(sizes), 32) elif region_name == 'eu-west-1': - self.assertEqual(len(sizes), 31) + self.assertEqual(len(sizes), 34) self.driver.region_name = region_old From c20f5fe31ca9edf062d5bdff536617f57dbe10d1 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 19:17:04 +0200 Subject: [PATCH 194/315] Add support for Amazon GovCloud to the EC2 driver (us-gov-west-1 region). Patch by Chris DeRamus. --- CHANGES.rst | 3 +++ libcloud/compute/drivers/ec2.py | 38 +++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index de10bddb8d..a1da690b12 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -89,6 +89,9 @@ Compute - Add new t2 instance types to the EC2 driver. [Tomaz Muraus] +- Add support for Amazon GovCloud to the EC2 driver (us-gov-west-1 region). + [Chris DeRamus] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index f26797b08b..6a44d32222 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -674,6 +674,44 @@ 't2.micro', 't2.small', 't2.medium' + ] + }, + 'us-gov-west-1': { + 'endpoint': 'ec2.us-gov-west-1.amazonaws.com', + 'api_name': 'ec2_us_govwest', + 'country': 'US', + 'instance_types': [ + 't1.micro', + 'm1.small', + 'm1.medium', + 'm1.large', + 'm1.xlarge', + 'm2.xlarge', + 'm2.2xlarge', + 'm2.4xlarge', + 'm3.medium', + 'm3.large', + 'm3.xlarge', + 'm3.2xlarge', + 'c1.medium', + 'c1.xlarge', + 'g2.2xlarge', + 'c3.large', + 'c3.xlarge', + 'c3.2xlarge', + 'c3.4xlarge', + 'c3.8xlarge', + 'hs1.4xlarge', + 'hs1.8xlarge', + 'i2.xlarge', + 'i2.2xlarge', + 'i2.4xlarge', + 'i2.8xlarge', + 'r3.large', + 'r3.xlarge', + 'r3.2xlarge', + 'r3.4xlarge', + 'r3.8xlarge', 't2.micro', 't2.small', 't2.medium' From 51d0a7faf0b5a199dc034c9a2e30bc529ee3ad0c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 21:05:53 +0200 Subject: [PATCH 195/315] Update scrape ec2 pricing script so it can also handle new pricing format (JavaScript JSONP) and fix it so correctly (recursively) sorts the pricing data. --- contrib/scrape-ec2-prices.py | 63 +++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 15 deletions(-) diff --git a/contrib/scrape-ec2-prices.py b/contrib/scrape-ec2-prices.py index 0013232ae9..e8c2b999f6 100755 --- a/contrib/scrape-ec2-prices.py +++ b/contrib/scrape-ec2-prices.py @@ -18,13 +18,20 @@ # under the License. import os +import re import json import time from collections import defaultdict, OrderedDict import requests +import demjson -ON_DEMAND_LINUX_URL = 'http://aws.amazon.com/ec2/pricing/json/linux-od.json' +LINUX_PRICING_URLS = [ + # Deprecated instances, JSON format + 'http://aws.amazon.com/ec2/pricing/json/linux-od.json', + # Instancances JavaScript files + 'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js' +] EC2_REGIONS = [ 'us-east-1', @@ -96,23 +103,34 @@ def scrape_ec2_pricing(): - response = requests.get(ON_DEMAND_LINUX_URL) - data = response.json() + result = defaultdict(OrderedDict) - regions = data['config']['regions'] + for url in LINUX_PRICING_URLS: + response = requests.get(url) - result = defaultdict(OrderedDict) - for region_data in regions: - region_name = region_data['region'] - libcloud_region_name = REGION_NAME_MAP[region_name] - instance_types = region_data['instanceTypes'] + if re.match('.*?\.json$', url): + data = response.json() + elif re.match('.*?\.js$', url): + data = response.content + match = re.match('^.*callback\((.*?)\);?$', data, + re.MULTILINE | re.DOTALL) + data = match.group(1) + # demjson supports non-strict mode and can parse unquoted objects + data = demjson.decode(data) + + regions = data['config']['regions'] + + for region_data in regions: + region_name = region_data['region'] + libcloud_region_name = REGION_NAME_MAP[region_name] + instance_types = region_data['instanceTypes'] - for instance_type in instance_types: - sizes = instance_type['sizes'] + for instance_type in instance_types: + sizes = instance_type['sizes'] - for size in sizes: - price = size['valueColumns'][0]['prices']['USD'] - result[libcloud_region_name][size['size']] = price + for size in sizes: + price = size['valueColumns'][0]['prices']['USD'] + result[libcloud_region_name][size['size']] = price return result @@ -126,7 +144,7 @@ def update_pricing_file(pricing_file_path, pricing_data): data['compute'].update(pricing_data) # Always sort the pricing info - data = OrderedDict(sorted(data.items())) + data = sort_nested_dict(data) content = json.dumps(data, indent=4) lines = content.splitlines() @@ -137,6 +155,21 @@ def update_pricing_file(pricing_file_path, pricing_data): fp.write(content) +def sort_nested_dict(value): + """ + Recursively sort a nested dict. + """ + result = OrderedDict() + + for key, value in sorted(value.items()): + if isinstance(value, (dict, OrderedDict)): + result[key] = sort_nested_dict(value) + else: + result[key] = value + + return result + + def main(): print('Scraping EC2 pricing data') From 783f936741cf26d14657411bc7ff2d93d9d1a596 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 21:09:15 +0200 Subject: [PATCH 196/315] Add tox target for scraping ec2 pricing data. --- tox.ini | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tox.ini b/tox.ini index 0e13e6116e..00c7403aca 100644 --- a/tox.ini +++ b/tox.ini @@ -52,6 +52,12 @@ changedir = docs commands = python ../contrib/generate_provider_feature_matrix_table.py sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html +[testenv:scrape-ec2-prices] +deps = requests + demjson +basepython = python2.7 +commands = python contrib/scrape-ec2-prices.py + [testenv:lint] deps = flake8 commands = flake8 --exclude="test" libcloud/ From 743259ec424225729e8c0fee8bb399a296d613c0 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 21:12:46 +0200 Subject: [PATCH 197/315] Update ec2 pricing scrape script so it also scrapes pricing data for previous generation instances. --- contrib/scrape-ec2-prices.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/contrib/scrape-ec2-prices.py b/contrib/scrape-ec2-prices.py index e8c2b999f6..762e96d8b3 100755 --- a/contrib/scrape-ec2-prices.py +++ b/contrib/scrape-ec2-prices.py @@ -27,9 +27,11 @@ import demjson LINUX_PRICING_URLS = [ - # Deprecated instances, JSON format + # Deprecated instances (JSON format) 'http://aws.amazon.com/ec2/pricing/json/linux-od.json', - # Instancances JavaScript files + # Previous generation instances (JavaScript file) + 'http://a0.awsstatic.com/pricing/1/ec2/previous-generation/linux-od.min.js', + # New generation instances (JavaScript file) 'https://a0.awsstatic.com/pricing/1/ec2/linux-od.min.js' ] From a10b9eef9c0437911c2da7b9e4b2af64695f75b9 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 21:15:49 +0200 Subject: [PATCH 198/315] Ignore long lines in contrib/ --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 00c7403aca..52b51f7d1b 100644 --- a/tox.ini +++ b/tox.ini @@ -64,5 +64,5 @@ commands = flake8 --exclude="test" libcloud/ flake8 --max-line-length=160 libcloud/test/ flake8 demos/ flake8 --ignore=E902 docs/examples/ - flake8 --ignore=E902 contrib/ + flake8 --ignore=E902 --max-line-length=160 contrib/ python -mjson.tool libcloud/data/pricing.json From 26118a33eff67a1344fddceb52c31cd43d8d97ca Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 6 Sep 2014 21:16:01 +0200 Subject: [PATCH 199/315] Update / re-generate pricing file. --- libcloud/data/pricing.json | 1009 ++++++++++++++++++------------------ 1 file changed, 505 insertions(+), 504 deletions(-) diff --git a/libcloud/data/pricing.json b/libcloud/data/pricing.json index 3b4e8699cf..9c0c0eed13 100644 --- a/libcloud/data/pricing.json +++ b/libcloud/data/pricing.json @@ -1,590 +1,591 @@ { "compute": { - "ec2_us_west_oregon": { - "t2.micro": "0.013", - "t2.small": "0.026", - "t2.medium": "0.052", - "m3.medium": "0.070", - "m3.large": "0.140", - "m3.xlarge": "0.280", - "m3.2xlarge": "0.560", - "m1.small": "0.044", - "m1.medium": "0.087", - "m1.large": "0.175", - "m1.xlarge": "0.350", - "c3.large": "0.105", - "c3.xlarge": "0.210", - "c3.2xlarge": "0.420", - "c3.4xlarge": "0.840", - "c3.8xlarge": "1.680", - "c1.medium": "0.130", - "c1.xlarge": "0.520", - "cc2.8xlarge": "2.000", - "g2.2xlarge": "0.650", - "r3.large": "0.175", - "r3.xlarge": "0.350", - "r3.2xlarge": "0.700", - "r3.4xlarge": "1.400", - "r3.8xlarge": "2.800", - "m2.xlarge": "0.245", - "m2.2xlarge": "0.490", - "m2.4xlarge": "0.980", - "cr1.8xlarge": "3.500", - "i2.xlarge": "0.853", - "i2.2xlarge": "1.705", - "i2.4xlarge": "3.410", - "i2.8xlarge": "6.820", - "hs1.8xlarge": "4.600", - "hi1.4xlarge": "3.100", - "t1.micro": "0.020" + "bluebox": { + "1gb": 0.15, + "2gb": 0.25, + "4gb": 0.35, + "8gb": 0.45 }, - "ec2_us_west": { - "t2.micro": "0.017", - "t2.small": "0.034", - "t2.medium": "0.068", - "m3.medium": "0.077", - "m3.large": "0.154", - "m3.xlarge": "0.308", - "m3.2xlarge": "0.616", - "m1.small": "0.047", - "m1.medium": "0.095", - "m1.large": "0.190", - "m1.xlarge": "0.379", - "c3.large": "0.120", - "c3.xlarge": "0.239", + "cloudsigma_lvs": { + "high-cpu-extra-large": 0.0, + "high-cpu-medium": 0.0, + "high-memory-double-extra-large": 0.0, + "high-memory-extra-large": 0.0, + "micro-high-cpu": 0.0, + "micro-regular": 0.0, + "standard-extra-large": 0.0, + "standard-large": 0.0, + "standard-small": 0.0 + }, + "cloudsigma_zrh": { + "high-cpu-extra-large": 0.78, + "high-cpu-medium": 0.211, + "high-memory-double-extra-large": 1.383, + "high-memory-extra-large": 0.642, + "micro-high-cpu": 0.381, + "micro-regular": 0.0548, + "standard-extra-large": 0.762, + "standard-large": 0.381, + "standard-small": 0.0796 + }, + "dreamhost": { + "default": 115, + "high": 150, + "low": 50, + "maximum": 200, + "minimum": 15 + }, + "ec2_ap_northeast": { + "c1.medium": "0.158", + "c1.xlarge": "0.632", + "c3.2xlarge": "0.511", + "c3.4xlarge": "1.021", + "c3.8xlarge": "2.043", + "c3.large": "0.128", + "c3.xlarge": "0.255", + "cc2.8xlarge": "2.349", + "cr1.8xlarge": "4.105", + "g2.2xlarge": "0.898", + "hi1.4xlarge": "3.276", + "hs1.8xlarge": "5.400", + "i2.2xlarge": "2.001", + "i2.4xlarge": "4.002", + "i2.8xlarge": "8.004", + "i2.xlarge": "1.001", + "m1.large": "0.243", + "m1.medium": "0.122", + "m1.small": "0.061", + "m1.xlarge": "0.486", + "m2.2xlarge": "0.575", + "m2.4xlarge": "1.150", + "m2.xlarge": "0.287", + "m3.2xlarge": "0.810", + "m3.large": "0.203", + "m3.medium": "0.101", + "m3.xlarge": "0.405", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "t1.micro": "0.026", + "t2.medium": "0.080", + "t2.micro": "0.020", + "t2.small": "0.040" + }, + "ec2_ap_southeast": { + "c1.medium": "0.164", + "c1.xlarge": "0.655", + "c3.2xlarge": "0.529", + "c3.4xlarge": "1.058", + "c3.8xlarge": "2.117", + "c3.large": "0.132", + "c3.xlarge": "0.265", + "g2.2xlarge": "1.000", + "hs1.8xlarge": "5.570", + "i2.2xlarge": "2.035", + "i2.4xlarge": "4.070", + "i2.8xlarge": "8.140", + "i2.xlarge": "1.018", + "m1.large": "0.233", + "m1.medium": "0.117", + "m1.small": "0.058", + "m1.xlarge": "0.467", + "m2.2xlarge": "0.592", + "m2.4xlarge": "1.183", + "m2.xlarge": "0.296", + "m3.2xlarge": "0.784", + "m3.large": "0.196", + "m3.medium": "0.098", + "m3.xlarge": "0.392", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "t1.micro": "0.020", + "t2.medium": "0.080", + "t2.micro": "0.020", + "t2.small": "0.040" + }, + "ec2_ap_southeast_2": { + "c1.medium": "0.164", + "c1.xlarge": "0.655", + "c3.2xlarge": "0.529", + "c3.4xlarge": "1.058", + "c3.8xlarge": "2.117", + "c3.large": "0.132", + "c3.xlarge": "0.265", + "g2.2xlarge": "0.898", + "hs1.8xlarge": "5.570", + "i2.2xlarge": "2.035", + "i2.4xlarge": "4.070", + "i2.8xlarge": "8.140", + "i2.xlarge": "1.018", + "m1.large": "0.233", + "m1.medium": "0.117", + "m1.small": "0.058", + "m1.xlarge": "0.467", + "m2.2xlarge": "0.592", + "m2.4xlarge": "1.183", + "m2.xlarge": "0.296", + "m3.2xlarge": "0.784", + "m3.large": "0.196", + "m3.medium": "0.098", + "m3.xlarge": "0.392", + "r3.2xlarge": "0.840", + "r3.4xlarge": "1.680", + "r3.8xlarge": "3.360", + "r3.large": "0.210", + "r3.xlarge": "0.420", + "t1.micro": "0.020", + "t2.medium": "0.080", + "t2.micro": "0.020", + "t2.small": "0.040" + }, + "ec2_eu_west": { + "c1.medium": "0.148", + "c1.xlarge": "0.592", "c3.2xlarge": "0.478", "c3.4xlarge": "0.956", "c3.8xlarge": "1.912", - "c1.medium": "0.148", - "c1.xlarge": "0.592", + "c3.large": "0.120", + "c3.xlarge": "0.239", + "cc2.8xlarge": "2.250", + "cg1.4xlarge": "2.360", + "cr1.8xlarge": "3.750", "g2.2xlarge": "0.702", - "r3.large": "0.195", - "r3.xlarge": "0.390", - "r3.2xlarge": "0.780", - "r3.4xlarge": "1.560", - "r3.8xlarge": "3.120", - "m2.xlarge": "0.275", - "m2.2xlarge": "0.550", - "m2.4xlarge": "1.100", - "i2.xlarge": "0.938", + "hi1.4xlarge": "3.100", + "hs1.8xlarge": "4.900", "i2.2xlarge": "1.876", "i2.4xlarge": "3.751", "i2.8xlarge": "7.502", - "t1.micro": "0.025" - }, - "ec2_eu_west": { - "t2.micro": "0.014", - "t2.small": "0.028", - "t2.medium": "0.056", - "m3.medium": "0.077", - "m3.large": "0.154", - "m3.xlarge": "0.308", - "m3.2xlarge": "0.616", - "m1.small": "0.047", - "m1.medium": "0.095", + "i2.xlarge": "0.938", "m1.large": "0.190", + "m1.medium": "0.095", + "m1.small": "0.047", "m1.xlarge": "0.379", - "c3.large": "0.120", - "c3.xlarge": "0.239", - "c3.2xlarge": "0.478", - "c3.4xlarge": "0.956", - "c3.8xlarge": "1.912", - "c1.medium": "0.148", - "c1.xlarge": "0.592", - "cc2.8xlarge": "2.250", - "g2.2xlarge": "0.702", - "cg1.4xlarge": "2.360", - "r3.large": "0.195", - "r3.xlarge": "0.390", + "m2.2xlarge": "0.550", + "m2.4xlarge": "1.100", + "m2.xlarge": "0.275", + "m3.2xlarge": "0.616", + "m3.large": "0.154", + "m3.medium": "0.077", + "m3.xlarge": "0.308", "r3.2xlarge": "0.780", "r3.4xlarge": "1.560", "r3.8xlarge": "3.120", - "m2.xlarge": "0.275", - "m2.2xlarge": "0.550", - "m2.4xlarge": "1.100", - "cr1.8xlarge": "3.750", - "i2.xlarge": "0.938", - "i2.2xlarge": "1.876", - "i2.4xlarge": "3.751", - "i2.8xlarge": "7.502", - "hs1.8xlarge": "4.900", - "hi1.4xlarge": "3.100", - "t1.micro": "0.020" - }, - "rackspacenovalon": { - "performance2-60": 2.72, - "performance2-120": 5.44, - "performance1-1": 0.04, - "performance2-15": 0.68, - "performance1-4": 0.16, - "performance2-30": 1.36, - "performance2-90": 4.08, - "3": 0.064, - "2": 0.032, - "performance1-2": 0.08, - "4": 0.129, - "7": 0.967, - "6": 0.516, - "5": 0.258, - "performance1-8": 0.32, - "8": 1.612 + "r3.large": "0.195", + "r3.xlarge": "0.390", + "t1.micro": "0.020", + "t2.medium": "0.056", + "t2.micro": "0.014", + "t2.small": "0.028" }, - "ec2_ap_southeast_2": { - "t2.micro": "0.020", - "t2.small": "0.040", - "t2.medium": "0.080", - "m3.medium": "0.098", - "m3.large": "0.196", - "m3.xlarge": "0.392", - "m3.2xlarge": "0.784", - "m1.small": "0.058", - "m1.medium": "0.117", + "ec2_sa_east": { + "c1.medium": "0.179", + "c1.xlarge": "0.718", + "c3.2xlarge": "0.650", + "c3.4xlarge": "1.300", + "c3.8xlarge": "2.600", + "c3.large": "0.163", + "c3.xlarge": "0.325", "m1.large": "0.233", + "m1.medium": "0.117", + "m1.small": "0.058", "m1.xlarge": "0.467", - "c3.large": "0.132", - "c3.xlarge": "0.265", - "c3.2xlarge": "0.529", - "c3.4xlarge": "1.058", - "c3.8xlarge": "2.117", - "c1.medium": "0.164", - "c1.xlarge": "0.655", - "r3.large": "0.210", - "r3.xlarge": "0.420", - "r3.2xlarge": "0.840", - "r3.4xlarge": "1.680", - "r3.8xlarge": "3.360", - "m2.xlarge": "0.296", - "m2.2xlarge": "0.592", - "m2.4xlarge": "1.183", - "i2.xlarge": "1.018", - "i2.2xlarge": "2.035", - "i2.4xlarge": "4.070", - "i2.8xlarge": "8.140", - "hs1.8xlarge": "5.570", - "t1.micro": "0.020" - }, - "vps_net": { - "1": 0.416 + "m2.2xlarge": "0.645", + "m2.4xlarge": "1.291", + "m2.xlarge": "0.323", + "m3.2xlarge": "0.761", + "m3.large": "0.190", + "m3.medium": "0.095", + "m3.xlarge": "0.381", + "t1.micro": "0.027", + "t2.medium": "0.108", + "t2.micro": "0.027", + "t2.small": "0.054" }, "ec2_us_east": { - "t2.micro": "0.013", - "t2.small": "0.026", - "t2.medium": "0.052", - "m3.medium": "0.070", - "m3.large": "0.140", - "m3.xlarge": "0.280", - "m3.2xlarge": "0.560", - "m1.small": "0.044", - "m1.medium": "0.087", - "m1.large": "0.175", - "m1.xlarge": "0.350", - "c3.large": "0.105", - "c3.xlarge": "0.210", + "c1.medium": "0.130", + "c1.xlarge": "0.520", "c3.2xlarge": "0.420", "c3.4xlarge": "0.840", "c3.8xlarge": "1.680", - "c1.medium": "0.130", - "c1.xlarge": "0.520", + "c3.large": "0.105", + "c3.xlarge": "0.210", "cc2.8xlarge": "2.000", - "g2.2xlarge": "0.650", "cg1.4xlarge": "2.100", - "r3.large": "0.175", - "r3.xlarge": "0.350", + "cr1.8xlarge": "3.500", + "g2.2xlarge": "0.650", + "hi1.4xlarge": "3.100", + "hs1.8xlarge": "4.600", + "i2.2xlarge": "1.705", + "i2.4xlarge": "3.410", + "i2.8xlarge": "6.820", + "i2.xlarge": "0.853", + "m1.large": "0.175", + "m1.medium": "0.087", + "m1.small": "0.044", + "m1.xlarge": "0.350", + "m2.2xlarge": "0.490", + "m2.4xlarge": "0.980", + "m2.xlarge": "0.245", + "m3.2xlarge": "0.560", + "m3.large": "0.140", + "m3.medium": "0.070", + "m3.xlarge": "0.280", "r3.2xlarge": "0.700", "r3.4xlarge": "1.400", "r3.8xlarge": "2.800", - "m2.xlarge": "0.245", - "m2.2xlarge": "0.490", - "m2.4xlarge": "0.980", + "r3.large": "0.175", + "r3.xlarge": "0.350", + "t1.micro": "0.020", + "t2.medium": "0.052", + "t2.micro": "0.013", + "t2.small": "0.026" + }, + "ec2_us_west": { + "c1.medium": "0.148", + "c1.xlarge": "0.592", + "c3.2xlarge": "0.478", + "c3.4xlarge": "0.956", + "c3.8xlarge": "1.912", + "c3.large": "0.120", + "c3.xlarge": "0.239", + "g2.2xlarge": "0.702", + "i2.2xlarge": "1.876", + "i2.4xlarge": "3.751", + "i2.8xlarge": "7.502", + "i2.xlarge": "0.938", + "m1.large": "0.190", + "m1.medium": "0.095", + "m1.small": "0.047", + "m1.xlarge": "0.379", + "m2.2xlarge": "0.550", + "m2.4xlarge": "1.100", + "m2.xlarge": "0.275", + "m3.2xlarge": "0.616", + "m3.large": "0.154", + "m3.medium": "0.077", + "m3.xlarge": "0.308", + "r3.2xlarge": "0.780", + "r3.4xlarge": "1.560", + "r3.8xlarge": "3.120", + "r3.large": "0.195", + "r3.xlarge": "0.390", + "t1.micro": "0.025", + "t2.medium": "0.068", + "t2.micro": "0.017", + "t2.small": "0.034" + }, + "ec2_us_west_oregon": { + "c1.medium": "0.130", + "c1.xlarge": "0.520", + "c3.2xlarge": "0.420", + "c3.4xlarge": "0.840", + "c3.8xlarge": "1.680", + "c3.large": "0.105", + "c3.xlarge": "0.210", + "cc2.8xlarge": "2.000", "cr1.8xlarge": "3.500", - "i2.xlarge": "0.853", + "g2.2xlarge": "0.650", + "hi1.4xlarge": "3.100", + "hs1.8xlarge": "4.600", "i2.2xlarge": "1.705", "i2.4xlarge": "3.410", "i2.8xlarge": "6.820", - "hs1.8xlarge": "4.600", - "hi1.4xlarge": "3.100", - "t1.micro": "0.020" - }, - "rackspacenovaus": { - "performance2-60": 2.72, - "performance2-120": 5.44, - "performance1-1": 0.04, - "performance2-15": 0.68, - "performance1-4": 0.16, - "performance2-30": 1.36, - "performance2-90": 4.08, - "3": 0.06, - "2": 0.022, - "performance1-2": 0.08, - "4": 0.12, - "7": 0.96, - "6": 0.48, - "5": 0.24, - "performance1-8": 0.32, - "8": 1.2 - }, - "ec2_sa_east": { - "t2.micro": "0.027", - "t2.small": "0.054", - "t2.medium": "0.108", - "m3.medium": "0.095", - "m3.large": "0.190", - "m3.xlarge": "0.381", - "m3.2xlarge": "0.761", - "m1.small": "0.058", - "m1.medium": "0.117", - "m1.large": "0.233", - "m1.xlarge": "0.467", - "c1.medium": "0.179", - "c1.xlarge": "0.718", - "m2.xlarge": "0.323", - "m2.2xlarge": "0.645", - "m2.4xlarge": "1.291", - "t1.micro": "0.027" - }, - "cloudsigma_zrh": { - "high-cpu-medium": 0.211, - "standard-large": 0.381, - "micro-high-cpu": 0.381, - "standard-extra-large": 0.762, - "high-memory-double-extra-large": 1.383, - "micro-regular": 0.0548, - "standard-small": 0.0796, - "high-memory-extra-large": 0.642, - "high-cpu-extra-large": 0.78 + "i2.xlarge": "0.853", + "m1.large": "0.175", + "m1.medium": "0.087", + "m1.small": "0.044", + "m1.xlarge": "0.350", + "m2.2xlarge": "0.490", + "m2.4xlarge": "0.980", + "m2.xlarge": "0.245", + "m3.2xlarge": "0.560", + "m3.large": "0.140", + "m3.medium": "0.070", + "m3.xlarge": "0.280", + "r3.2xlarge": "0.700", + "r3.4xlarge": "1.400", + "r3.8xlarge": "2.800", + "r3.large": "0.175", + "r3.xlarge": "0.350", + "t1.micro": "0.020", + "t2.medium": "0.052", + "t2.micro": "0.013", + "t2.small": "0.026" }, - "rackspacenovasyd": { - "performance2-60": 2.72, - "performance2-120": 5.44, - "performance1-1": 0.04, - "performance2-15": 0.68, - "performance1-4": 0.16, - "performance2-30": 1.36, - "performance2-90": 4.08, - "3": 0.072, - "2": 0.026, - "performance1-2": 0.08, - "4": 0.144, - "7": 1.08, - "6": 0.576, - "5": 0.288, - "performance1-8": 0.32, - "8": 1.44 + "elastichosts": { + "extra-large": 0.579, + "high-cpu-extra-large": 0.77, + "high-cpu-medium": 0.18, + "large": 0.378, + "medium": 0.223, + "small": 0.1 }, - "ec2_ap_northeast": { - "t2.micro": "0.020", - "t2.small": "0.040", - "t2.medium": "0.080", - "m3.medium": "0.101", - "m3.large": "0.203", - "m3.xlarge": "0.405", - "m3.2xlarge": "0.810", - "m1.small": "0.061", - "m1.medium": "0.122", - "m1.large": "0.243", - "m1.xlarge": "0.486", - "c3.large": "0.128", - "c3.xlarge": "0.255", - "c3.2xlarge": "0.511", - "c3.4xlarge": "1.021", - "c3.8xlarge": "2.043", - "c1.medium": "0.158", - "c1.xlarge": "0.632", - "cc2.8xlarge": "2.349", - "g2.2xlarge": "0.898", - "r3.large": "0.210", - "r3.xlarge": "0.420", - "r3.2xlarge": "0.840", - "r3.4xlarge": "1.680", - "r3.8xlarge": "3.360", - "m2.xlarge": "0.287", - "m2.2xlarge": "0.575", - "m2.4xlarge": "1.150", - "cr1.8xlarge": "4.105", - "i2.xlarge": "1.001", - "i2.2xlarge": "2.001", - "i2.4xlarge": "4.002", - "i2.8xlarge": "8.004", - "hs1.8xlarge": "5.400", - "hi1.4xlarge": "3.276", - "t1.micro": "0.026" + "gandi": { + "1": 0.02, + "large": 0.06, + "medium": 0.03, + "small": 0.02, + "x-large": 0.12 }, "gogrid": { + "16GB": 3.04, + "1GB": 0.19, "24GB": 4.56, - "512MB": 0.095, - "8GB": 1.52, - "4GB": 0.76, "2GB": 0.38, - "1GB": 0.19, - "16GB": 3.04 - }, - "serverlove": { - "high-cpu-medium": 0.291, - "medium": 0.404, - "large": 0.534, - "small": 0.161, - "extra-large": 0.615, - "high-cpu-extra-large": 0.776 - }, - "elastichosts": { - "high-cpu-medium": 0.18, - "medium": 0.223, - "large": 0.378, - "small": 0.1, - "extra-large": 0.579, - "high-cpu-extra-large": 0.77 - }, - "rackspace": { - "performance2-60": 2.72, - "performance2-120": 5.44, - "performance1-1": 0.04, - "performance2-15": 0.68, - "performance1-4": 0.16, - "performance2-30": 1.36, - "1": 0.015, - "performance2-90": 4.08, - "3": 0.06, - "2": 0.03, - "performance1-2": 0.08, - "4": 0.12, - "7": 0.96, - "6": 0.48, - "5": 0.24, - "performance1-8": 0.32, - "8": 1.8 + "4GB": 0.76, + "512MB": 0.095, + "8GB": 1.52 }, "nephoscale": { + "1": 0.6, "11": 0.35, "27": 0.0, - "48": 0.15, + "3": 0.063, "46": 0.1, - "54": 0.938, - "56": 0.75, + "48": 0.15, + "5": 0.031, "50": 0.28, "52": 0.48, - "1": 0.6, - "3": 0.063, - "5": 0.031, + "54": 0.938, + "56": 0.75, "7": 0.125, "9": 0.188 }, "nimbus": { - "m1.xlarge": 0.0, + "m1.large": 0.0, "m1.small": 0.0, - "m1.large": 0.0 - }, - "gandi": { - "1": 0.02, - "small": 0.02, - "large": 0.06, - "medium": 0.03, - "x-large": 0.12 - }, - "skalicloud": { - "high-cpu-medium": 0.249, - "medium": 0.301, - "large": 0.505, - "small": 0.136, - "extra-large": 0.654, - "high-cpu-extra-large": 0.936 - }, - "bluebox": { - "4gb": 0.35, - "2gb": 0.25, - "8gb": 0.45, - "1gb": 0.15 - }, - "ec2_ap_southeast": { - "t2.micro": "0.020", - "t2.small": "0.040", - "t2.medium": "0.080", - "m3.medium": "0.098", - "m3.large": "0.196", - "m3.xlarge": "0.392", - "m3.2xlarge": "0.784", - "m1.small": "0.058", - "m1.medium": "0.117", - "m1.large": "0.233", - "m1.xlarge": "0.467", - "c3.large": "0.132", - "c3.xlarge": "0.265", - "c3.2xlarge": "0.529", - "c3.4xlarge": "1.058", - "c3.8xlarge": "2.117", - "c1.medium": "0.164", - "c1.xlarge": "0.655", - "r3.large": "0.210", - "r3.xlarge": "0.420", - "r3.2xlarge": "0.840", - "r3.4xlarge": "1.680", - "r3.8xlarge": "3.360", - "m2.xlarge": "0.296", - "m2.2xlarge": "0.592", - "m2.4xlarge": "1.183", - "i2.xlarge": "1.018", - "i2.2xlarge": "2.035", - "i2.4xlarge": "4.070", - "i2.8xlarge": "8.140", - "hs1.8xlarge": "5.570", - "t1.micro": "0.020" - }, - "cloudsigma_lvs": { - "high-cpu-medium": 0.0, - "standard-large": 0.0, - "micro-high-cpu": 0.0, - "standard-extra-large": 0.0, - "high-memory-double-extra-large": 0.0, - "micro-regular": 0.0, - "standard-small": 0.0, - "high-memory-extra-large": 0.0, - "high-cpu-extra-large": 0.0 + "m1.xlarge": 0.0 }, - "dreamhost": { - "default": 115, - "high": 150, - "minimum": 15, - "maximum": 200, - "low": 50 + "osc_inc_eu_west_1": { + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "cc1.4xlarge": "1.300", + "cc2.8xlarge": "2.400", + "cr1.8xlarge": "3.500", + "m1.large": "0.360", + "m1.medium": "0.120", + "m1.small": "0.090", + "m1.xlarge": "0.730", + "m2.2xlarge": "0.820", + "m2.4xlarge": "1.640", + "m2.xlarge": "0.410", + "m3.2xlarge": "1.560", + "m3.xlarge": "0.780", + "nv1.large": "5.490", + "nv1.medium": "5.250", + "nv1.small": "5.220", + "nv1.xlarge": "5.610", + "os1.8xlarge": "4.310", + "t1.micro": "0.040" }, - "osc_sas_eu_west_3": { - "t1.micro": "0.040", - "m1.small": "0.090", - "m1.medium": "0.130", + "osc_inc_eu_west_3": { + "c1.medium": "0.230", + "c1.xlarge": "0.900", + "cc1.4xlarge": "1.300", + "cc2.8xlarge": "2.400", + "cr1.8xlarge": "3.500", "m1.large": "0.360", + "m1.medium": "0.120", + "m1.small": "0.090", + "m1.xlarge": "0.730", + "m2.2xlarge": "0.820", + "m2.4xlarge": "1.640", + "m2.xlarge": "0.410", + "m3.2xlarge": "1.560", + "m3.xlarge": "0.780", + "nv1.large": "5.490", + "nv1.medium": "5.250", + "nv1.small": "5.220", + "nv1.xlarge": "5.610", + "os1.8xlarge": "4.310", + "t1.micro": "0.040" + }, + "osc_inc_us_east_1": { + "c1.medium": "0.150", + "c1.xlarge": "0.580", + "cc1.4xlarge": "1.610", + "cc2.8xlarge": "2.400", + "cr1.8xlarge": "3.500", + "m1.large": "0.240", + "m1.medium": "0.180", + "m1.small": "0.060", "m1.xlarge": "0.730", + "m2.2xlarge": "1.020", + "m2.4xlarge": "2.040", + "m2.xlarge": "0.410", + "m3.2xlarge": "1.560", + "m3.xlarge": "0.500", + "nv1.large": "5.490", + "nv1.medium": "5.250", + "nv1.small": "5.190", + "nv1.xlarge": "5.610", + "os1.8xlarge": "6.400", + "t1.micro": "0.020" + }, + "osc_sas_eu_west_1": { "c1.medium": "0.230", "c1.xlarge": "0.900", - "m2.xlarge": "0.460", + "cc1.4xlarge": "1.460", + "cc2.8xlarge": "2.700", + "cr1.8xlarge": "3.750", + "m1.large": "0.360", + "m1.medium": "0.130", + "m1.small": "0.090", + "m1.xlarge": "0.730", "m2.2xlarge": "0.920", "m2.4xlarge": "1.840", - "nv1.small": "5.220", - "nv1.medium": "5.310", + "m2.xlarge": "0.460", + "m3.2xlarge": "1.560", + "m3.xlarge": "0.780", "nv1.large": "5.490", + "nv1.medium": "5.310", + "nv1.small": "5.220", "nv1.xlarge": "5.860", + "os1.8xlarge": "6.400", + "t1.micro": "0.040" + }, + "osc_sas_eu_west_3": { + "c1.medium": "0.230", + "c1.xlarge": "0.900", "cc1.4xlarge": "1.460", "cc2.8xlarge": "2.700", - "m3.xlarge": "0.780", - "m3.2xlarge": "1.560", "cr1.8xlarge": "3.750", - "os1.8xlarge": "6.400", - "os1.8xlarge": "6.400" - }, - "osc_sas_eu_west_1": { - "t1.micro": "0.040", - "m1.small": "0.090", - "m1.medium": "0.130", "m1.large": "0.360", + "m1.medium": "0.130", + "m1.small": "0.090", "m1.xlarge": "0.730", - "c1.medium": "0.230", - "c1.xlarge": "0.900", - "m2.xlarge": "0.460", "m2.2xlarge": "0.920", "m2.4xlarge": "1.840", - "nv1.small": "5.220", - "nv1.medium": "5.310", + "m2.xlarge": "0.460", + "m3.2xlarge": "1.560", + "m3.xlarge": "0.780", "nv1.large": "5.490", + "nv1.medium": "5.310", + "nv1.small": "5.220", "nv1.xlarge": "5.860", - "cc1.4xlarge": "1.460", - "cc2.8xlarge": "2.700", - "m3.xlarge": "0.780", - "m3.2xlarge": "1.560", - "cr1.8xlarge": "3.750", "os1.8xlarge": "6.400", - "os1.8xlarge": "6.400" + "t1.micro": "0.040" }, "osc_sas_us_east_1": { - "t1.micro": "0.020", - "m1.small": "0.070", - "m1.medium": "0.180", - "m1.large": "0.260", - "m1.xlarge": "0.730", "c1.medium": "0.170", "c1.xlarge": "0.660", - "m2.xlarge": "0.460", - "m2.2xlarge": "1.020", - "m2.4xlarge": "2.040", - "nv1.small": "5.220", - "nv1.medium": "5.310", - "nv1.large": "5.490", - "nv1.xlarge": "5.860", "cc1.4xlarge": "1.610", "cc2.8xlarge": "2.700", - "m3.xlarge": "0.550", - "m3.2xlarge": "1.560", "cr1.8xlarge": "3.750", - "os1.8xlarge": "6.400", - "os1.8xlarge": "6.400" - }, - "osc_inc_eu_west_1": { - "t1.micro": "0.040", - "m1.small": "0.090", - "m1.medium": "0.120", - "m1.large": "0.360", - "m1.xlarge": "0.730", - "c1.medium": "0.230", - "c1.xlarge": "0.900", - "m2.xlarge": "0.410", - "m2.2xlarge": "0.820", - "m2.4xlarge": "1.640", - "nv1.small": "5.220", - "nv1.medium": "5.250", - "nv1.large": "5.490", - "nv1.xlarge": "5.610", - "cc1.4xlarge": "1.300", - "cc2.8xlarge": "2.400", - "m3.xlarge": "0.780", - "m3.2xlarge": "1.560", - "cr1.8xlarge": "3.500", - "os1.8xlarge": "4.310", - "os1.8xlarge": "4.310" - }, - "osc_inc_eu_west_3": { - "t1.micro": "0.040", - "m1.small": "0.090", - "m1.medium": "0.120", - "m1.large": "0.360", - "m1.xlarge": "0.730", - "c1.medium": "0.230", - "c1.xlarge": "0.900", - "m2.xlarge": "0.410", - "m2.2xlarge": "0.820", - "m2.4xlarge": "1.640", - "nv1.small": "5.220", - "nv1.medium": "5.250", - "nv1.large": "5.490", - "nv1.xlarge": "5.610", - "cc1.4xlarge": "1.300", - "cc2.8xlarge": "2.400", - "m3.xlarge": "0.780", - "m3.2xlarge": "1.560", - "cr1.8xlarge": "3.500", - "os1.8xlarge": "4.310", - "os1.8xlarge": "4.310" - }, - "osc_inc_us_east_1": { - "t1.micro": "0.020", - "m1.small": "0.060", + "m1.large": "0.260", "m1.medium": "0.180", - "m1.large": "0.240", + "m1.small": "0.070", "m1.xlarge": "0.730", - "c1.medium": "0.150", - "c1.xlarge": "0.580", - "m2.xlarge": "0.410", "m2.2xlarge": "1.020", "m2.4xlarge": "2.040", - "nv1.small": "5.190", - "nv1.medium": "5.250", - "nv1.large": "5.490", - "nv1.xlarge": "5.610", - "cc1.4xlarge": "1.610", - "cc2.8xlarge": "2.400", - "m3.xlarge": "0.500", + "m2.xlarge": "0.460", "m3.2xlarge": "1.560", - "cr1.8xlarge": "3.500", + "m3.xlarge": "0.550", + "nv1.large": "5.490", + "nv1.medium": "5.310", + "nv1.small": "5.220", + "nv1.xlarge": "5.860", "os1.8xlarge": "6.400", - "os1.8xlarge": "6.400" + "t1.micro": "0.020" + }, + "rackspace": { + "1": 0.015, + "2": 0.03, + "3": 0.06, + "4": 0.12, + "5": 0.24, + "6": 0.48, + "7": 0.96, + "8": 1.8, + "performance1-1": 0.04, + "performance1-2": 0.08, + "performance1-4": 0.16, + "performance1-8": 0.32, + "performance2-120": 5.44, + "performance2-15": 0.68, + "performance2-30": 1.36, + "performance2-60": 2.72, + "performance2-90": 4.08 + }, + "rackspacenovalon": { + "2": 0.032, + "3": 0.064, + "4": 0.129, + "5": 0.258, + "6": 0.516, + "7": 0.967, + "8": 1.612, + "performance1-1": 0.04, + "performance1-2": 0.08, + "performance1-4": 0.16, + "performance1-8": 0.32, + "performance2-120": 5.44, + "performance2-15": 0.68, + "performance2-30": 1.36, + "performance2-60": 2.72, + "performance2-90": 4.08 + }, + "rackspacenovasyd": { + "2": 0.026, + "3": 0.072, + "4": 0.144, + "5": 0.288, + "6": 0.576, + "7": 1.08, + "8": 1.44, + "performance1-1": 0.04, + "performance1-2": 0.08, + "performance1-4": 0.16, + "performance1-8": 0.32, + "performance2-120": 5.44, + "performance2-15": 0.68, + "performance2-30": 1.36, + "performance2-60": 2.72, + "performance2-90": 4.08 + }, + "rackspacenovaus": { + "2": 0.022, + "3": 0.06, + "4": 0.12, + "5": 0.24, + "6": 0.48, + "7": 0.96, + "8": 1.2, + "performance1-1": 0.04, + "performance1-2": 0.08, + "performance1-4": 0.16, + "performance1-8": 0.32, + "performance2-120": 5.44, + "performance2-15": 0.68, + "performance2-30": 1.36, + "performance2-60": 2.72, + "performance2-90": 4.08 + }, + "serverlove": { + "extra-large": 0.615, + "high-cpu-extra-large": 0.776, + "high-cpu-medium": 0.291, + "large": 0.534, + "medium": 0.404, + "small": 0.161 + }, + "skalicloud": { + "extra-large": 0.654, + "high-cpu-extra-large": 0.936, + "high-cpu-medium": 0.249, + "large": 0.505, + "medium": 0.301, + "small": 0.136 + }, + "vps_net": { + "1": 0.416 } }, "storage": {}, - "updated": 1410023366 -} + "updated": 1410030750 +} \ No newline at end of file From 039ecec2036d2910a467f8fe717f8bbf9c5af777 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 7 Sep 2014 14:57:32 +0200 Subject: [PATCH 200/315] Add support for pretty-formatting XML response bodies when LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE environment variable is set. Also fix issue with curl log lines under Python 3. --- CHANGES.rst | 8 ++--- docs/troubleshooting.rst | 69 ++++++++++++++++++++++++++++++++++++---- libcloud/common/base.py | 19 ++++++++--- 3 files changed, 80 insertions(+), 16 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index a1da690b12..42227ab07c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -11,10 +11,10 @@ General OpenStack Identity (Keystone) service API v3. [Tomaz Muraus] -- Add support for prettifying JSON response body which is printed to a file - like object when using ``LIBCLOUD_DEBUG`` environment variable. - This option can be enabled by setting ``LIBCLOUD_DEBUG_PRETTY_PRINT_JSON`` - environment variable. +- Add support for prettifying JSON or XML response body which is printed to a + file like object when using ``LIBCLOUD_DEBUG`` environment variable. + This option can be enabled by setting + ``LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE`` environment variable. [Tomaz Muraus] - Add support for using an HTTP proxy for outgoing HTTP and HTTPS requests. diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index 5f206025a2..8c121031bd 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -32,9 +32,11 @@ response body (if compressed) before logging it. To enable it, set ``LIBCLOUD_DEBUG`` environment variable and make it point to a file where the debug output should be saved. -If the API returns a JSON which is not human friendly, you can also set -``LIBCLOUD_DEBUG_PRETTY_PRINT_JSON`` environment variable which will cause -the JSON to be beautified / formated so it's easier for humands to read it. +If the API returns JSON or XML in the response body which is not human +friendly, you can also set ``LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE`` +environment variable which will cause the JSON or XML to be beautified +/ formated so it's easier for humans to read it. Keep in mind that this +only works for non-chunked responses. Example 1 - Logging output to standard error ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -68,10 +70,10 @@ Example output: # -------- end 4431824872:4431825232 response ---------- -Example 2 - Making JSON response human friendly -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Example 2 - Making JSON / XML response human friendly +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Non-formatted response: +Non-formatted JSON response: .. sourcecode:: bash @@ -95,7 +97,7 @@ Human friendly formatted JSON response: .. sourcecode:: bash - LIBCLOUD_DEBUG=/dev/stderr LIBCLOUD_DEBUG_PRETTY_PRINT_JSON=1 python my_script.py + LIBCLOUD_DEBUG=/dev/stderr LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE=1 python my_script.py .. sourcecode:: bash @@ -180,3 +182,56 @@ Human friendly formatted JSON response: ] } # -------- end 41102928:41133624 response ---------- + +Non-formatted XML response: + +.. sourcecode:: bash + + LIBCLOUD_DEBUG=/dev/stderr python my_script.py + +.. sourcecode:: bash + + # -------- begin 33145616:33126160 response ---------- + HTTP/1.1 200 OK + X-Amzn-Requestid: e84f62d0-368e-11e4-820b-8bf013dc269e + Date: Sun, 07 Sep 2014 13:00:13 GMT + Content-Length: 457 + Content-Type: text/xml + + + /hostedzone/Z14L0C73CHH1DNexample1.com.41747982-568E-0DFC-8C11-71C23757C740test9false100 + # -------- end 33145616:33126160 response ---------- + +Human friendly formatted XML response: + +.. sourcecode:: bash + + LIBCLOUD_DEBUG=/dev/stderr LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE=1 python my_script.py + +.. sourcecode:: bash + + # -------- begin 19444496:19425040 response ---------- + HTTP/1.1 200 OK + X-Amzn-Requestid: 01c02441-368f-11e4-b616-9b9bd7509a8f + Date: Sun, 07 Sep 2014 13:00:56 GMT + Content-Length: 457 + Content-Type: text/xml + + + + + + /hostedzone/Z14L0C73CHH1DN + example1.com. + 41747982-568E-0DFC-8C11-71C23757C740 + + test + + 9 + + + false + 100 + + + # -------- end 19444496:19425040 response ---------- diff --git a/libcloud/common/base.py b/libcloud/common/base.py index f559c22b17..1d05b4b89f 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -20,6 +20,8 @@ import binascii import time +import xml.dom.minidom + try: from lxml import etree as ET except ImportError: @@ -308,21 +310,28 @@ def makefile(self, *args, **kwargs): elif encoding in ['gzip', 'x-gzip']: body = decompress_data('gzip', body) - pretty_print_json = os.environ.get('LIBCLOUD_DEBUG_PRETTY_PRINT_JSON', - False) + pretty_print = os.environ.get('LIBCLOUD_DEBUG_PRETTY_PRINT_RESPONSE', + False) if r.chunked: ht += "%x\r\n" % (len(body)) - ht += u(body) + ht += body.decode('utf-8') ht += "\r\n0\r\n" else: - if pretty_print_json and content_type == 'application/json': + if pretty_print and content_type == 'application/json': try: - body = json.loads(u(body)) + body = json.loads(body.decode('utf-8')) body = json.dumps(body, sort_keys=True, indent=4) except: # Invalid JSON or server is lying about content-type pass + elif pretty_print and content_type == 'text/xml': + try: + elem = xml.dom.minidom.parseString(body.decode('utf-8')) + body = elem.toprettyxml() + except Exception: + # Invalid XML + pass ht += u(body) From 49a1b016d46d624c3d6b559f710a076d9bbd5b82 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 10 Sep 2014 20:25:41 +0200 Subject: [PATCH 201/315] [LIBCLOUD-613] Allow user to pass "gp2" for "ex_volume_type" argument to the create_volume method in the EC2 driver. Reported by Xavier Barbosa. --- CHANGES.rst | 6 ++++++ libcloud/compute/drivers/ec2.py | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 42227ab07c..c7e0a1e370 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -92,6 +92,12 @@ Compute - Add support for Amazon GovCloud to the EC2 driver (us-gov-west-1 region). [Chris DeRamus] +- Allow user to pass "gp2" for "ex_volume_type" argument to the create_volume + method in the EC2 driver. + + Reported by Xavier Barbosa. + [Tomaz Muraus, Xavier Barbosa] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 6a44d32222..693e5c3e7c 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2280,7 +2280,7 @@ def create_volume(self, size, name, location=None, snapshot=None, is io1. :type iops: ``int`` """ - valid_volume_types = ['standard', 'io1', 'g2'] + valid_volume_types = ['standard', 'io1', 'gp2'] params = { 'Action': 'CreateVolume', From ad43fc476c5d2c11963696400a1c7d7aa958e56b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 11 Sep 2014 19:02:56 +0200 Subject: [PATCH 202/315] Fix some pylint violations. --- libcloud/common/linode.py | 6 +++++- libcloud/dns/base.py | 3 +++ libcloud/httplib_ssl.py | 6 ++++-- libcloud/pricing.py | 6 +++++- libcloud/storage/drivers/cloudfiles.py | 8 ++++++++ libcloud/storage/drivers/local.py | 6 +++--- libcloud/storage/drivers/nimbus.py | 6 +++--- libcloud/storage/drivers/s3.py | 3 +++ libcloud/utils/py3.py | 2 ++ 9 files changed, 36 insertions(+), 10 deletions(-) diff --git a/libcloud/common/linode.py b/libcloud/common/linode.py index c547514344..2d60500fcd 100644 --- a/libcloud/common/linode.py +++ b/libcloud/common/linode.py @@ -75,7 +75,11 @@ class LinodeResponse(JsonResponse): } libcloud does not take advantage of batching, so a response will always - reflect the above format. A few weird quirks are caught here as well.""" + reflect the above format. A few weird quirks are caught here as well. + """ + + objects = None + def __init__(self, response, connection): """Instantiate a LinodeResponse from the HTTP response diff --git a/libcloud/dns/base.py b/libcloud/dns/base.py index 0e3ac6fe85..f09ccc1a4d 100644 --- a/libcloud/dns/base.py +++ b/libcloud/dns/base.py @@ -154,6 +154,9 @@ class DNSDriver(BaseDriver): name = None website = None + # Map libcloud record type enum to provider record type name + RECORD_TYPE_MAP = {} + def __init__(self, key, secret=None, secure=True, host=None, port=None, **kwargs): """ diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 104bf34335..7787e7b3d5 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -136,9 +136,11 @@ def _setup_http_proxy(self): if hasattr(self, 'set_tunnel'): # Python 2.7 and higher + # pylint: disable=no-member self.set_tunnel(host=self.host, port=self.port, headers=headers) elif hasattr(self, '_set_tunnel'): # Python 2.6 + # pylint: disable=no-member self._set_tunnel(host=self.host, port=self.port, headers=headers) else: raise ValueError('Unsupported Python version') @@ -147,7 +149,7 @@ def _setup_http_proxy(self): def _activate_http_proxy(self, sock): self.sock = sock - self._tunnel() + self._tunnel() # pylint: disable=no-member def _set_hostport(self, host, port): """ @@ -165,7 +167,7 @@ def _set_hostport(self, host, port): raise httplib.InvalidURL(msg) host = host[:i] else: - port = self.default_port + port = self.default_port # pylint: disable=no-member if host and host[0] == '[' and host[-1] == ']': host = host[1:-1] self.host = host diff --git a/libcloud/pricing.py b/libcloud/pricing.py index ebe99a8aa5..1dbdf204c4 100644 --- a/libcloud/pricing.py +++ b/libcloud/pricing.py @@ -23,8 +23,10 @@ try: import simplejson as json + JSONDecodeError = json.JSONDecodeError except ImportError: import json + JSONDecodeError = ValueError from libcloud.utils.connection import get_response_object @@ -95,6 +97,7 @@ def get_pricing(driver_type, driver_name, pricing_file_path=None): size_pricing = pricing_data[driver_type][driver_name] for driver_type in VALID_PRICING_DRIVER_TYPES: + # pylint: disable=maybe-no-member pricing = pricing_data.get(driver_type, None) if pricing: PRICING_DATA[driver_type] = pricing @@ -203,10 +206,11 @@ def download_pricing_file(file_url=DEFAULT_FILE_URL, # Verify pricing file is valid try: data = json.loads(body) - except json.decoder.JSONDecodeError: + except JSONDecodeError: msg = 'Provided URL doesn\'t contain valid pricing data' raise Exception(msg) + # pylint: disable=maybe-no-member if not data.get('updated', None): msg = 'Provided URL doesn\'t contain valid pricing data' raise Exception(msg) diff --git a/libcloud/storage/drivers/cloudfiles.py b/libcloud/storage/drivers/cloudfiles.py index b0694acfef..a2189c1c62 100644 --- a/libcloud/storage/drivers/cloudfiles.py +++ b/libcloud/storage/drivers/cloudfiles.py @@ -307,6 +307,7 @@ def get_object(self, container_name, object_name): raise LibcloudError('Unexpected status code: %s' % (response.status)) def get_container_cdn_url(self, container): + # pylint: disable=unexpected-keyword-arg container_name_encoded = self._encode_container_name(container.name) response = self.connection.request('/%s' % (container_name_encoded), method='HEAD', @@ -339,6 +340,7 @@ def enable_container_cdn(self, container, ex_ttl=None): if ex_ttl: headers['X-TTL'] = ex_ttl + # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s' % (container_name), method='PUT', headers=headers, @@ -467,6 +469,7 @@ def ex_purge_object_from_cdn(self, obj, email=None): object_name = self._encode_object_name(obj.name) headers = {'X-Purge-Email': email} if email else {} + # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s/%s' % (container_name, object_name), method='DELETE', @@ -538,6 +541,7 @@ def ex_enable_static_website(self, container, index_file='index.html'): container_name = container.name headers = {'X-Container-Meta-Web-Index': index_file} + # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s' % (container_name), method='POST', headers=headers, @@ -561,6 +565,7 @@ def ex_set_error_page(self, container, file_name='error.html'): container_name = container.name headers = {'X-Container-Meta-Web-Error': file_name} + # pylint: disable=unexpected-keyword-arg response = self.connection.request('/%s' % (container_name), method='POST', headers=headers, @@ -580,6 +585,7 @@ def ex_set_account_metadata_temp_url_key(self, key): """ headers = {'X-Account-Meta-Temp-URL-Key': key} + # pylint: disable=unexpected-keyword-arg response = self.connection.request('', method='POST', headers=headers, @@ -607,6 +613,7 @@ def ex_get_object_temp_url(self, obj, method='GET', timeout=60): :rtype: ``bool`` """ + # pylint: disable=no-member self.connection._populate_hosts_and_request_paths() expires = int(time() + timeout) path = '%s/%s/%s' % (self.connection.request_path, @@ -654,6 +661,7 @@ def _upload_object_manifest(self, container, object_name, extra=None, object_name_encoded = self._encode_object_name(object_name) request_path = '/%s/%s' % (container_name_encoded, object_name_encoded) + # pylint: disable=no-member headers = {'X-Auth-Token': self.connection.auth_token, 'X-Object-Manifest': '%s/%s/' % (container_name_encoded, diff --git a/libcloud/storage/drivers/local.py b/libcloud/storage/drivers/local.py index 5e326f902e..985243dd32 100644 --- a/libcloud/storage/drivers/local.py +++ b/libcloud/storage/drivers/local.py @@ -90,9 +90,9 @@ def __init__(self, key, secret=None, secure=True, host=None, port=None, if not os.path.isdir(self.base_path): raise LibcloudError('The base path is not a directory') - super(StorageDriver, self).__init__(key=key, secret=secret, - secure=secure, host=host, - port=port, **kwargs) + super(LocalStorageDriver, self).__init__(key=key, secret=secret, + secure=secure, host=host, + port=port, **kwargs) def _make_path(self, path, ignore_existing=True): """ diff --git a/libcloud/storage/drivers/nimbus.py b/libcloud/storage/drivers/nimbus.py index b649e8b306..583aefbb10 100644 --- a/libcloud/storage/drivers/nimbus.py +++ b/libcloud/storage/drivers/nimbus.py @@ -41,7 +41,7 @@ def parse_error(self): if self.status in [httplib.UNAUTHORIZED]: raise InvalidCredsError(self.body) raise LibcloudError('Unknown error. Status code: %d' % (self.status), - driver=self.driver) + driver=self.connection.driver) class NimbusConnection(ConnectionUserAndKey): @@ -89,13 +89,13 @@ def __init__(self, *args, **kwargs): def iterate_containers(self): response = self.connection.request('/customers/%s/collections' % - (self.connection.user_id)) + (self.user_id)) return self._to_containers(response.object) def create_container(self, container_name): params = {'action': 'create', 'name': container_name} response = self.connection.request('/customers/%s/collections' % - (self.connection.user_id), + (self.user_id), params=params, method='POST') return self._to_container(response.object) diff --git a/libcloud/storage/drivers/s3.py b/libcloud/storage/drivers/s3.py index 9577f98369..862f076f8b 100644 --- a/libcloud/storage/drivers/s3.py +++ b/libcloud/storage/drivers/s3.py @@ -602,6 +602,7 @@ def _commit_multipart(self, object_path, upload_id, chunks): if response.status != httplib.OK: element = response.object + # pylint: disable=maybe-no-member code, message = response._parse_error_details(element=element) msg = 'Error in multipart commit: %s (%s)' % (message, code) raise LibcloudError(msg, driver=self) @@ -730,6 +731,7 @@ def ex_iterate_multipart_uploads(self, container, prefix=None, driver=self) body = response.parse_body() + # pylint: disable=maybe-no-member for node in body.findall(fixxpath(xpath='Upload', namespace=self.namespace)): @@ -748,6 +750,7 @@ def ex_iterate_multipart_uploads(self, container, prefix=None, initiator, owner) # Check if this is the last entry in the listing + # pylint: disable=maybe-no-member is_truncated = body.findtext(fixxpath(xpath='IsTruncated', namespace=self.namespace)) diff --git a/libcloud/utils/py3.py b/libcloud/utils/py3.py index 2e4fc381e2..4e054197e3 100644 --- a/libcloud/utils/py3.py +++ b/libcloud/utils/py3.py @@ -58,6 +58,7 @@ from io import StringIO import urllib import urllib as urllib2 + # pylint: disable=no-name-in-module import urllib.parse as urlparse import xmlrpc.client as xmlrpclib @@ -180,6 +181,7 @@ def hexadigits(s): # Taken from http://jimmyg.org/work/code/barenecessities/index.html # (MIT license) + # pylint: disable=function-redefined def relpath(path, start=posixpath.curdir): # NOQA """Return a relative version of a path""" if not path: From 688ec75504d76ba370f96bbe06115762cc87b252 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 13 Sep 2014 17:52:43 +0200 Subject: [PATCH 203/315] Add pylint config and pylint tox target. --- .pylintrc | 35 +++++++++++++++++++++++++++++++++++ tox.ini | 4 ++++ 2 files changed, 39 insertions(+) create mode 100644 .pylintrc diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000000..56d02c2ccd --- /dev/null +++ b/.pylintrc @@ -0,0 +1,35 @@ +[MASTER] +# Add to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=test + + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] +disable=redefined-builtin,too-many-arguments,too-few-public-methods,missing-docstring,invalid-name,abstract-method,no-self-use + + +[TYPECHECK] +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. Python regular +# expressions are accepted. +generated-members=async_request,objects + +[VARIABLES] + +# Tells wether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= diff --git a/tox.ini b/tox.ini index 52b51f7d1b..30516b0b47 100644 --- a/tox.ini +++ b/tox.ini @@ -58,6 +58,10 @@ deps = requests basepython = python2.7 commands = python contrib/scrape-ec2-prices.py +[testenv:pylint] +depds = pylint +commands = pylint --rcfile=.pylintrc -E libcloud/ + [testenv:lint] deps = flake8 commands = flake8 --exclude="test" libcloud/ From d1b6bb749af5d5162c7fed4c538adb1fe3f1e9f6 Mon Sep 17 00:00:00 2001 From: Stefan Friesel Date: Tue, 9 Sep 2014 18:23:34 +0200 Subject: [PATCH 204/315] Customize HTTP vendor prefix for Google Storage Google storage uses/expects custom headers like meta data and storage class to be prefixed with "x-goog" instead of "x-amz". This enables use of object tagging in the Google Storage provider. Closes #356 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/storage/drivers/google_storage.py | 1 + libcloud/storage/drivers/s3.py | 12 +++++++----- libcloud/test/storage/test_google_storage.py | 18 ++++++++++++++++++ 4 files changed, 31 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c7e0a1e370..1495ea7b7a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -104,6 +104,11 @@ Storage - Fix a bug with CDN requests in the CloudFiles driver. [Tomaz Muraus] +- Fix a bug with not being able to specify meta_data / tags when uploading an + object using Google Storage driver. + (LIBCLOUD-612, GITHUB-356) + [Stefan Friesel] + Loadbalancer ~~~~~~~~~~~~ diff --git a/libcloud/storage/drivers/google_storage.py b/libcloud/storage/drivers/google_storage.py index 9caca9b9ee..0bc9583148 100644 --- a/libcloud/storage/drivers/google_storage.py +++ b/libcloud/storage/drivers/google_storage.py @@ -134,3 +134,4 @@ class GoogleStorageDriver(BaseS3StorageDriver): namespace = NAMESPACE supports_chunked_encoding = False supports_s3_multipart_upload = False + http_vendor_prefix = 'x-goog' diff --git a/libcloud/storage/drivers/s3.py b/libcloud/storage/drivers/s3.py index 862f076f8b..cd7eb8a9db 100644 --- a/libcloud/storage/drivers/s3.py +++ b/libcloud/storage/drivers/s3.py @@ -227,6 +227,7 @@ class BaseS3StorageDriver(StorageDriver): supports_s3_multipart_upload = True ex_location_name = '' namespace = NAMESPACE + http_vendor_prefix = 'x-amz' def iterate_containers(self): response = self.connection.request('/') @@ -799,7 +800,8 @@ def _put_object(self, container, object_name, upload_func, raise ValueError( 'Invalid storage class value: %s' % (storage_class)) - headers['x-amz-storage-class'] = storage_class.upper() + key = self.http_vendor_prefix + '-storage-class' + headers[key] = storage_class.upper() content_type = extra.get('content_type', None) meta_data = extra.get('meta_data', None) @@ -807,11 +809,11 @@ def _put_object(self, container, object_name, upload_func, if meta_data: for key, value in list(meta_data.items()): - key = 'x-amz-meta-%s' % (key) + key = self.http_vendor_prefix + '-meta-%s' % (key) headers[key] = value if acl: - headers['x-amz-acl'] = acl + headers[self.http_vendor_prefix + '-acl'] = acl request_path = self._get_object_path(container, object_name) @@ -883,10 +885,10 @@ def _headers_to_object(self, object_name, container, headers): extra['last_modified'] = headers['last-modified'] for key, value in headers.items(): - if not key.lower().startswith('x-amz-meta-'): + if not key.lower().startswith(self.http_vendor_prefix + '-meta-'): continue - key = key.replace('x-amz-meta-', '') + key = key.replace(self.http_vendor_prefix + '-meta-', '') meta_data[key] = value obj = Object(name=object_name, size=headers['content-length'], diff --git a/libcloud/test/storage/test_google_storage.py b/libcloud/test/storage/test_google_storage.py index ed6dd57f91..bd335721cc 100644 --- a/libcloud/test/storage/test_google_storage.py +++ b/libcloud/test/storage/test_google_storage.py @@ -16,6 +16,8 @@ import sys import unittest +from libcloud.utils.py3 import httplib + from libcloud.storage.drivers.google_storage import GoogleStorageDriver from libcloud.test.storage.test_s3 import S3Tests, S3MockHttp @@ -26,6 +28,22 @@ class GoogleStorageMockHttp(S3MockHttp): fixtures = StorageFileFixtures('google_storage') + def _test2_test_get_object(self, method, url, body, headers): + # test_get_object + # Google uses a different HTTP header prefix for meta data + body = self.fixtures.load('list_containers.xml') + headers = {'content-type': 'application/zip', + 'etag': '"e31208wqsdoj329jd"', + 'x-goog-meta-rabbits': 'monkeys', + 'content-length': 12345, + 'last-modified': 'Thu, 13 Sep 2012 07:13:22 GMT' + } + + return (httplib.OK, + body, + headers, + httplib.responses[httplib.OK]) + class GoogleStorageTests(S3Tests): driver_type = GoogleStorageDriver From 252c2e6e729703394d2787016c4ddb21d7b6656a Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 13 Sep 2014 19:48:33 +0200 Subject: [PATCH 205/315] Fix link. --- docs/storage/drivers/google_storage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/storage/drivers/google_storage.rst b/docs/storage/drivers/google_storage.rst index 4ac3e01e46..26630623e8 100644 --- a/docs/storage/drivers/google_storage.rst +++ b/docs/storage/drivers/google_storage.rst @@ -21,4 +21,4 @@ API Docs :inherited-members: .. _`XML API v1.0`: https://developers.google.com/storage/docs/reference-guide -.. _`official documentation`: https://developers.google.com/storage/docs/reference-guide +.. _`official documentation`: https://developers.google.com/storage/docs/migrating#migration-simple From 6869ebc1d5db2122461fa781852931423d573965 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 13 Sep 2014 19:52:05 +0200 Subject: [PATCH 206/315] Add a note about retrieving object meta data in the Google Storage driver. --- docs/storage/drivers/google_storage.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/docs/storage/drivers/google_storage.rst b/docs/storage/drivers/google_storage.rst index 26630623e8..d21e95c9dc 100644 --- a/docs/storage/drivers/google_storage.rst +++ b/docs/storage/drivers/google_storage.rst @@ -13,6 +13,19 @@ For information on how to do that, please see the `official documentation`_. If you don't do that, you will get a message that the request is missing a project id header. +Known limitations +----------------- + +1. Meta data / tags aren't returned when using list_container_objects method +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Meta data / tags associated with an object are only returned when using +:meth:`libcloud.storage.base.StorageDriver.get_object` method and not when +listing all the objects in a container using +:meth:`libcloud.storage.base.StorageDriver.list_container_objects` method. + +This is a limitation of the Google Storage API v1.0. + API Docs -------- From 34cff3012c097b1f37b821bea60eb2c761d4fe15 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 26 Aug 2014 13:59:37 -0700 Subject: [PATCH 207/315] [Libcloud-589] Add a new compute driver for ProfitBricks provider. Closes #352 --- CHANGES.rst | 4 + libcloud/compute/drivers/profitbricks.py | 1485 +++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 2 + .../fixtures/profitbricks/attach_volume.xml | 12 + .../fixtures/profitbricks/create_node.xml | 13 + .../fixtures/profitbricks/create_volume.xml | 13 + .../fixtures/profitbricks/destroy_node.xml | 12 + .../fixtures/profitbricks/destroy_volume.xml | 12 + .../fixtures/profitbricks/detach_volume.xml | 12 + .../profitbricks/ex_clear_datacenter.xml | 12 + .../profitbricks/ex_create_datacenter.xml | 13 + .../ex_create_network_interface.xml | 13 + .../profitbricks/ex_describe_datacenter.xml | 15 + .../ex_describe_network_interface.xml | 26 + .../profitbricks/ex_describe_node.xml | 77 + .../profitbricks/ex_describe_volume.xml | 22 + .../profitbricks/ex_destroy_datacenter.xml | 10 + .../ex_destroy_network_interface.xml | 12 + .../profitbricks/ex_list_datacenters.xml | 19 + .../ex_list_network_interfaces.xml | 75 + .../fixtures/profitbricks/ex_start_node.xml | 10 + .../fixtures/profitbricks/ex_stop_node.xml | 10 + .../profitbricks/ex_update_datacenter.xml | 12 + .../ex_update_network_interface.xml | 12 + .../fixtures/profitbricks/ex_update_node.xml | 12 + .../profitbricks/ex_update_volume.xml | 12 + .../fixtures/profitbricks/list_images.xml | 43 + .../fixtures/profitbricks/list_nodes.xml | 172 ++ .../fixtures/profitbricks/list_volumes.xml | 66 + .../fixtures/profitbricks/reboot_node.xml | 10 + libcloud/test/compute/test_profitbricks.py | 509 ++++++ libcloud/test/secrets.py-dist | 1 + 33 files changed, 2730 insertions(+) create mode 100644 libcloud/compute/drivers/profitbricks.py create mode 100644 libcloud/test/compute/fixtures/profitbricks/attach_volume.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/create_node.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/create_volume.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/destroy_node.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/destroy_volume.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/detach_volume.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_clear_datacenter.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_create_datacenter.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_create_network_interface.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_describe_datacenter.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_describe_network_interface.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_describe_node.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_describe_volume.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_destroy_datacenter.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_destroy_network_interface.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_list_datacenters.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_list_network_interfaces.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_start_node.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_stop_node.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_update_datacenter.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_update_network_interface.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_update_node.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/ex_update_volume.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/list_images.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/list_nodes.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/list_volumes.xml create mode 100644 libcloud/test/compute/fixtures/profitbricks/reboot_node.xml create mode 100644 libcloud/test/compute/test_profitbricks.py diff --git a/CHANGES.rst b/CHANGES.rst index 1495ea7b7a..e9ac0fd9f6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -98,6 +98,10 @@ Compute Reported by Xavier Barbosa. [Tomaz Muraus, Xavier Barbosa] +- Add new driver for ProfitBricks provider. + (LIBCLOUD-589, GITHUB-352) + [Matt Baldwin] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/profitbricks.py b/libcloud/compute/drivers/profitbricks.py new file mode 100644 index 0000000000..0d4a5ea101 --- /dev/null +++ b/libcloud/compute/drivers/profitbricks.py @@ -0,0 +1,1485 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""ProfitBricks Compute driver +""" +import base64 + +import copy +import time + +try: + from lxml import etree as ET +except ImportError: + from xml.etree import ElementTree as ET + +from libcloud.utils.networking import is_private_subnet +from libcloud.utils.py3 import b +from libcloud.compute.providers import Provider +from libcloud.common.base import ConnectionUserAndKey, XmlResponse +from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize +from libcloud.compute.base import NodeImage, StorageVolume +from libcloud.compute.base import UuidMixin +from libcloud.compute.types import NodeState +from libcloud.common.types import LibcloudError, MalformedResponseError + +__all__ = [ + 'API_VERSION', + 'API_HOST', + 'ProfitBricksNodeDriver', + 'Datacenter', + 'ProfitBricksNetworkInterface', + 'ProfitBricksAvailabilityZone' +] + +API_HOST = 'api.profitbricks.com' +API_VERSION = '/1.3/' + + +class ProfitBricksResponse(XmlResponse): + """ + ProfitBricks response parsing. + """ + def parse_error(self): + try: + body = ET.XML(self.body) + except: + raise MalformedResponseError('Failed to parse XML', + body=self.body, + driver=ProfitBricksNodeDriver) + + for e in body.findall('.//detail'): + if ET.iselement(e[0].find('httpCode')): + http_code = e[0].find('httpCode').text + else: + http_code = None + if ET.iselement(e[0].find('faultCode')): + fault_code = e[0].find('faultCode').text + else: + fault_code = None + if ET.iselement(e[0].find('message')): + message = e[0].find('message').text + else: + message = None + + return LibcloudError('HTTP Code: %s, Fault Code: %s, Message: %s' % + (http_code, fault_code, message), driver=self) + + +class ProfitBricksConnection(ConnectionUserAndKey): + """ + Represents a single connection to the ProfitBricks endpoint. + """ + host = API_HOST + api_prefix = API_VERSION + responseCls = ProfitBricksResponse + + def add_default_headers(self, headers): + headers['Content-Type'] = 'text/xml' + headers['Authorization'] = 'Basic %s' % (base64.b64encode( + b('%s:%s' % (self.user_id, self.key))).decode('utf-8')) + + return headers + + def encode_data(self, data): + soap_env = ET.Element('soapenv:Envelope', { + 'xmlns:soapenv': 'http://schemas.xmlsoap.org/soap/envelope/', + 'xmlns:ws': 'http://ws.api.profitbricks.com/' + }) + ET.SubElement(soap_env, 'soapenv:Header') + soap_body = ET.SubElement(soap_env, 'soapenv:Body') + soap_req_body = ET.SubElement(soap_body, 'ws:%s' % (data['action'])) + + if 'request' in data.keys(): + soap_req_body = ET.SubElement(soap_req_body, 'request') + for key, value in data.items(): + if key not in ['action', 'request']: + child = ET.SubElement(soap_req_body, key) + child.text = value + else: + for key, value in data.items(): + if key != 'action': + child = ET.SubElement(soap_req_body, key) + child.text = value + + soap_post = ET.tostring(soap_env) + + return soap_post + + def request(self, action, params=None, data=None, headers=None, + method='POST', raw=False): + action = self.api_prefix + action + + return super(ProfitBricksConnection, self).request(action=action, + params=params, + data=data, + headers=headers, + method=method, + raw=raw) + + +class Datacenter(UuidMixin): + """ + Class which stores information about ProfitBricks datacenter + instances. + + :param id: The datacenter ID. + :type id: ``str`` + + :param name: The datacenter name. + :type name: ``str`` + + :param version: Datacenter version. + :type version: ``str`` + + + Note: This class is ProfitBricks specific. + """ + def __init__(self, id, name, version, driver, extra=None): + self.id = str(id) + self.name = name + self.version = version + self.driver = driver + self.extra = extra or {} + UuidMixin.__init__(self) + + def __repr__(self): + return (( + ' ...>') + % (self.id, self.name, self.version, + self.driver.name)) + + +class ProfitBricksNetworkInterface(object): + """ + Class which stores information about ProfitBricks network + interfaces. + + :param id: The network interface ID. + :type id: ``str`` + + :param name: The network interface name. + :type name: ``str`` + + :param state: The network interface name. + :type state: ``int`` + + Note: This class is ProfitBricks specific. + """ + def __init__(self, id, name, state, extra=None): + self.id = id + self.name = name + self.state = state + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name)) + + +class ProfitBricksAvailabilityZone(object): + """ + Extension class which stores information about a ProfitBricks + availability zone. + + Note: This class is ProfitBricks specific. + """ + + def __init__(self, name): + self.name = name + + def __repr__(self): + return (('') + % (self.name)) + + +class ProfitBricksNodeDriver(NodeDriver): + """ + Base ProfitBricks node driver. + """ + connectionCls = ProfitBricksConnection + name = 'ProfitBricks' + website = 'http://www.profitbricks.com' + type = Provider.PROFIT_BRICKS + + PROVISIONING_STATE = { + 'INACTIVE': NodeState.PENDING, + 'INPROCESS': NodeState.PENDING, + 'AVAILABLE': NodeState.RUNNING, + 'DELETED': NodeState.TERMINATED, + } + + NODE_STATE_MAP = { + 'NOSTATE': NodeState.UNKNOWN, + 'RUNNING': NodeState.RUNNING, + 'BLOCKED': NodeState.STOPPED, + 'PAUSE': NodeState.STOPPED, + 'SHUTDOWN': NodeState.PENDING, + 'SHUTOFF': NodeState.STOPPED, + 'CRASHED': NodeState.STOPPED, + } + + REGIONS = { + '1': {'region': 'us/las', 'country': 'USA'}, + '2': {'region': 'de/fra', 'country': 'DEU'}, + '3': {'region': 'de/fkb', 'country': 'DEU'}, + } + + AVAILABILITY_ZONE = { + '1': {'name': 'AUTO'}, + '2': {'name': 'ZONE_1'}, + '3': {'name': 'ZONE_2'}, + } + + """ + ProfitBricks is unique in that they allow the user to define all aspects + of the instance size, i.e. disk size, core size, and memory size. + + These are instance types that match up with what other providers support. + + You can configure disk size, core size, and memory size using the ex_ + parameters on the create_node method. + """ + + PROFIT_BRICKS_GENERIC_SIZES = { + '1': { + 'id': '1', + 'name': 'Micro', + 'ram': 1024, + 'disk': 50, + 'cores': 1 + }, + '2': { + 'id': '2', + 'name': 'Small Instance', + 'ram': 2048, + 'disk': 50, + 'cores': 1 + }, + '3': { + 'id': '3', + 'name': 'Medium Instance', + 'ram': 4096, + 'disk': 50, + 'cores': 2 + }, + '4': { + 'id': '4', + 'name': 'Large Instance', + 'ram': 7168, + 'disk': 50, + 'cores': 4 + }, + '5': { + 'id': '5', + 'name': 'ExtraLarge Instance', + 'ram': 14336, + 'disk': 50, + 'cores': 8 + }, + '6': { + 'id': '6', + 'name': 'Memory Intensive Instance Medium', + 'ram': 28672, + 'disk': 50, + 'cores': 4 + }, + '7': { + 'id': '7', + 'name': 'Memory Intensive Instance Large', + 'ram': 57344, + 'disk': 50, + 'cores': 8 + } + } + + """ + Core Functions + """ + + def list_sizes(self): + """ + Lists all sizes + + :rtype: ``list`` of :class:`NodeSize` + """ + sizes = [] + + for key, values in self.PROFIT_BRICKS_GENERIC_SIZES.items(): + node_size = self._to_node_size(values) + sizes.append(node_size) + + return sizes + + def list_images(self): + """ + List all images. + + :rtype: ``list`` of :class:`NodeImage` + """ + + action = 'getAllImages' + body = {'action': action} + + return self._to_images(self.connection.request(action=action, + data=body, method='POST').object) + + def list_locations(self): + """ + List all locations. + """ + locations = [] + + for key, values in self.REGIONS.items(): + location = self._to_location(values) + locations.append(location) + + return locations + + def list_nodes(self): + """ + List all nodes. + + :rtype: ``list`` of :class:`Node` + """ + action = 'getAllServers' + body = {'action': action} + + return self._to_nodes(self.connection.request(action=action, + data=body, method='POST').object) + + def reboot_node(self, node): + """ + Reboots the node. + + :rtype: ``bool`` + """ + action = 'resetServer' + body = {'action': action, + 'serverId': node.id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def create_node(self, name, image, size=None, volume=None, + ex_datacenter=None, ex_internet_access=True, + ex_availability_zone=None, ex_ram=None, + ex_cores=None, ex_disk=None, **kwargs): + """ + Creates a node. + + image is optional as long as you pass ram, cores, and disk + to the method. ProfitBricks allows you to adjust compute + resources at a much more granular level. + + :param volume: If the volume already exists then pass this in. + :type volume: :class:`StorageVolume` + + :param ex_datacenter: If you've already created the DC then pass + it in. + :type ex_datacenter: :class:`Datacenter` + + :param ex_internet_access: Configure public Internet access. + :type ex_internet_access: : ``bool`` + + :param ex_availability_zone: The availability zone. + :type ex_availability_zone: class: `ProfitBricksAvailabilityZone` + + :param ex_ram: The amount of ram required. + :type ex_ram: : ``int`` + + :param ex_cores: The number of cores required. + :type ex_cores: : ``int`` + + :param ex_disk: The amount of disk required. + :type ex_disk: : ``int`` + + :return: Instance of class ``Node`` + :rtype: :class:`Node` + """ + if not ex_datacenter: + ''' + We generate a name from the server name passed into the function. + ''' + + 'Creating a Datacenter for the node since one was not provided.' + new_datacenter = self._create_new_datacenter_for_node(name=name) + datacenter_id = new_datacenter.id + + 'Waiting for the Datacenter create operation to finish.' + self._wait_for_datacenter_state(datacenter=new_datacenter) + else: + datacenter_id = ex_datacenter.id + new_datacenter = None + + if not size: + if not ex_ram: + raise ValueError('You need to either pass a ' + 'NodeSize or specify ex_ram as ' + 'an extra parameter.') + if not ex_cores: + raise ValueError('You need to either pass a ' + 'NodeSize or specify ex_cores as ' + 'an extra parameter.') + + if not volume: + if not size: + if not ex_disk: + raise ValueError('You need to either pass a ' + 'StorageVolume, a NodeSize, or specify ' + 'ex_disk as an extra parameter.') + + ''' + You can override the suggested sizes by passing in unique + values for ram, cores, and disk allowing you to size it + for your specific use. + ''' + + if not ex_disk: + ex_disk = size.disk + + if not ex_ram: + ex_ram = size.ram + + if not ex_cores: + ex_cores = size.extra['cores'] + + ''' + A pasword is automatically generated if it is + not provided. This is then sent via email to + the admin contact on record. + ''' + + if 'auth' in kwargs: + auth = self._get_and_check_auth(kwargs["auth"]) + password = auth.password + else: + password = None + + ''' + Create a StorageVolume that can be attached to the + server when it is created. + ''' + if not volume: + volume = self._create_node_volume(ex_disk=ex_disk, + image=image, + password=password, + name=name, + ex_datacenter=ex_datacenter, + new_datacenter=new_datacenter) + + storage_id = volume.id + + 'Waiting on the storage volume to be created before provisioning ' + 'the instance.' + self._wait_for_storage_volume_state(volume) + else: + if ex_datacenter: + datacenter_id = ex_datacenter.id + else: + datacenter_id = volume.extra['datacenter_id'] + + storage_id = volume.id + + action = 'createServer' + body = {'action': action, + 'request': 'true', + 'serverName': name, + 'cores': str(ex_cores), + 'ram': str(ex_ram), + 'bootFromStorageId': storage_id, + 'internetAccess': str(ex_internet_access).lower(), + 'dataCenterId': datacenter_id + } + + if ex_availability_zone: + body['availabilityZone'] = ex_availability_zone.name + + data = self.connection.request(action=action, + data=body, + method='POST').object + nodes = self._to_nodes(data) + return nodes[0] + + def destroy_node(self, node, ex_remove_attached_disks=False): + """ + Destroys a node. + + :param node: The node you wish to destroy. + :type volume: :class:`Node` + + :param ex_remove_attached_disks: True to destory all attached volumes. + :type ex_remove_attached_disks: : ``bool`` + + :rtype: : ``bool`` + """ + action = 'deleteServer' + body = {'action': action, + 'serverId': node.id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + """ + Volume Functions + """ + + def list_volumes(self): + """ + Lists all voumes. + """ + action = 'getAllStorages' + body = {'action': action} + + return self._to_volumes(self.connection.request(action=action, + data=body, + method='POST').object) + + def attach_volume(self, node, volume, device=None, ex_bus_type=None): + """ + Attaches a volume. + + :param volume: The volume you're attaching. + :type volume: :class:`StorageVolume` + + :param node: The node to which you're attaching the volume. + :type node: :class:`Node` + + :param device: The device number order. + :type device: : ``int`` + + :param ex_bus_type: Bus type. Either IDE or VIRTIO (default). + :type ex_bus_type: ``str`` + + :return: Instance of class ``StorageVolume`` + :rtype: :class:`StorageVolume` + """ + action = 'connectStorageToServer' + body = {'action': action, + 'request': 'true', + 'storageId': volume.id, + 'serverId': node.id, + 'busType': ex_bus_type, + 'deviceNumber': str(device) + } + + self.connection.request(action=action, + data=body, method='POST').object + return volume + + def create_volume(self, size, name=None, + ex_datacenter=None, ex_image=None, ex_password=None): + """ + Creates a volume. + + :param ex_datacenter: The datacenter you're placing + the storage in. (req) + :type ex_datacenter: :class:`Datacenter` + + :param ex_image: The OS image for the volume. + :type ex_image: :class:`NodeImage` + + :param ex_password: Optional password for root. + :type ex_password: : ``str`` + + :return: Instance of class ``StorageVolume`` + :rtype: :class:`StorageVolume` + """ + action = 'createStorage' + body = {'action': action, + 'request': 'true', + 'size': str(size), + 'storageName': name, + 'mountImageId': ex_image.id + } + + if ex_datacenter: + body['dataCenterId'] = ex_datacenter.id + + if ex_password: + body['profitBricksImagePassword'] = ex_password + + data = self.connection.request(action=action, + data=body, + method='POST').object + volumes = self._to_volumes(data) + return volumes[0] + + def detach_volume(self, volume): + """ + Detaches a volume. + + :param volume: The volume you're detaching. + :type volume: :class:`StorageVolume` + + :rtype: :``bool`` + """ + node_id = volume.extra['server_id'] + + action = 'disconnectStorageFromServer' + body = {'action': action, + 'storageId': volume.id, + 'serverId': node_id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def destroy_volume(self, volume): + """ + Destroys a volume. + + :param volume: The volume you're attaching. + :type volume: :class:`StorageVolume` + + :rtype: : ``bool`` + """ + action = 'deleteStorage' + body = {'action': action, + 'storageId': volume.id} + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_update_volume(self, volume, storage_name=None, size=None): + """ + Updates a volume. + + :param volume: The volume you're attaching.. + :type volume: :class:`StorageVolume` + + :param storage_name: The name of the volume. + :type storage_name: : ``str`` + + :param size: The desired size. + :type size: ``int`` + + :rtype: : ``bool`` + """ + action = 'updateStorage' + body = {'action': action, + 'request': 'true', + 'storageId': volume.id + } + + if storage_name: + body['storageName'] = storage_name + if size: + body['size'] = str(size) + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_describe_volume(self, volume_id): + """ + Describes a volume. + + :param volume_id: The ID of the volume you're describing. + :type volume_id: :class:`StorageVolume` + + :return: Instance of class ``StorageVolume`` + :rtype: :class:`StorageVolume` + """ + action = 'getStorage' + body = {'action': action, + 'storageId': volume_id + } + + data = self.connection.request(action=action, + data=body, + method='POST').object + volumes = self._to_volumes(data) + return volumes[0] + + """ + Extension Functions + """ + + ''' Server Extension Functions + ''' + def ex_stop_node(self, node): + """ + Stops a node. + + This also dealloctes the public IP space. + + :param node: The node you wish to halt. + :type node: :class:`Node` + + :rtype: : ``bool`` + """ + action = 'stopServer' + body = {'action': action, + 'serverId': node.id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_start_node(self, node): + """ + Starts a volume. + + :param node: The node you wish to start. + :type node: :class:`Node` + + :rtype: : ``bool`` + """ + action = 'startServer' + body = {'action': action, + 'serverId': node.id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_list_availability_zones(self): + """ + Returns a list of availability zones. + """ + + availability_zones = [] + + for key, values in self.AVAILABILITY_ZONE.items(): + name = copy.deepcopy(values)["name"] + + availability_zone = ProfitBricksAvailabilityZone( + name=name + ) + availability_zones.append(availability_zone) + + return availability_zones + + def ex_describe_node(self, node): + """ + Describes a node. + + :param node: The node you wish to describe. + :type node: :class:`Node` + + :return: Instance of class ``Node`` + :rtype: :class:`Node` + """ + action = 'getServer' + body = {'action': action, + 'serverId': node.id + } + + data = self.connection.request(action=action, + data=body, + method='POST').object + nodes = self._to_nodes(data) + return nodes[0] + + def ex_update_node(self, node, name=None, cores=None, + ram=None, availability_zone=None): + """ + Updates a node. + + :param cores: The number of CPUs the node should have. + :type device: : ``int`` + + :param ram: The amount of ram the machine should have. + :type ram: : ``int`` + + :param ex_availability_zone: Update the availability zone. + :type ex_availability_zone: :class:`ProfitBricksAvailabilityZone` + + :rtype: : ``bool`` + """ + action = 'updateServer' + + body = {'action': action, + 'request': 'true', + 'serverId': node.id + } + + if name: + body['serverName'] = name + + if cores: + body['cores'] = str(cores) + + if ram: + body['ram'] = str(ram) + + if availability_zone: + body['availabilityZone'] = availability_zone.name + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + ''' + Datacenter Extension Functions + ''' + + def ex_create_datacenter(self, name, location): + """ + Creates a datacenter. + + ProfitBricks has a concept of datacenters. + These represent buckets into which you + can place various compute resources. + + :param name: The DC name. + :type name: : ``str`` + + :param location: The DC region. + :type location: : ``str`` + + :return: Instance of class ``Datacenter`` + :rtype: :class:`Datacenter` + """ + action = 'createDataCenter' + + body = {'action': action, + 'request': 'true', + 'dataCenterName': name, + 'location': location.lower() + } + data = self.connection.request(action=action, + data=body, + method='POST').object + datacenters = self._to_datacenters(data) + return datacenters[0] + + def ex_destroy_datacenter(self, datacenter): + """ + Destroys a datacenter. + + :param datacenter: The DC you're destroying. + :type datacenter: :class:`Datacenter` + + :rtype: : ``bool`` + """ + action = 'deleteDataCenter' + body = {'action': action, + 'dataCenterId': datacenter.id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_describe_datacenter(self, datacenter_id): + """ + Describes a datacenter. + + :param datacenter_id: The DC you are describing. + :type datacenter_id: ``str`` + + :return: Instance of class ``Datacenter`` + :rtype: :class:`Datacenter` + """ + + action = 'getDataCenter' + body = {'action': action, + 'dataCenterId': datacenter_id + } + + data = self.connection.request(action=action, + data=body, + method='POST').object + datacenters = self._to_datacenters(data) + return datacenters[0] + + def ex_list_datacenters(self): + """ + Lists all datacenters. + + :return: ``list`` of class ``Datacenter`` + :rtype: :class:`Datacenter` + """ + action = 'getAllDataCenters' + body = {'action': action} + + return self._to_datacenters(self.connection.request( + action=action, + data=body, + method='POST').object) + + def ex_rename_datacenter(self, datacenter, name): + """ + Update a datacenter. + + :param datacenter: The DC you are renaming. + :type datacenter: :class:`Datacenter` + + :param name: The DC name. + :type name: : ``str`` + + :rtype: : ``bool`` + """ + action = 'updateDataCenter' + body = {'action': action, + 'request': 'true', + 'dataCenterId': datacenter.id, + 'dataCenterName': name + } + + self.connection.request(action=action, + data=body, + method='POST').object + + return True + + def ex_clear_datacenter(self, datacenter): + """ + Clear a datacenter. + + This removes all objects in a DC. + + :param datacenter: The DC you're clearing. + :type datacenter: :class:`Datacenter` + + :rtype: : ``bool`` + """ + action = 'clearDataCenter' + body = {'action': action, + 'dataCenterId': datacenter.id + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + ''' + Network Interface Extension Functions + ''' + + def ex_list_network_interfaces(self): + """ + Lists all network interfaces. + + :return: ``list`` of class ``ProfitBricksNetworkInterface`` + :rtype: :class:`ProfitBricksNetworkInterface` + """ + action = 'getAllNic' + body = {'action': action} + + return self._to_interfaces( + self.connection.request(action=action, + data=body, + method='POST').object) + + def ex_describe_network_interface(self, network_interface): + """ + Describes a network interface. + + :param network_interface: The NIC you wish to describe. + :type network_interface: :class:`ProfitBricksNetworkInterface` + + :return: Instance of class ``ProfitBricksNetworkInterface`` + :rtype: :class:`ProfitBricksNetworkInterface` + """ + action = 'getNic' + body = {'action': action, + 'nicId': network_interface.id + } + + return self._to_interface( + self.connection.request( + action=action, + data=body, + method='POST').object.findall('.//return')[0]) + + def ex_create_network_interface(self, node, + lan_id=None, ip=None, nic_name=None, + dhcp_active=True): + """ + Creates a network interface. + + :param lan_id: The ID for the LAN. + :type lan_id: : ``int`` + + :param ip: The IP address for the NIC. + :type ip: ``str`` + + :param nic_name: The name of the NIC, e.g. PUBLIC. + :type nic_name: ``str`` + + :param dhcp_active: Set to false to disable. + :type dhcp_active: ``bool`` + + :return: Instance of class ``ProfitBricksNetworkInterface`` + :rtype: :class:`ProfitBricksNetworkInterface` + """ + action = 'createNic' + body = {'action': action, + 'request': 'true', + 'serverId': node.id, + 'dhcpActive': str(dhcp_active) + } + + if lan_id: + body['lanId'] = str(lan_id) + else: + body['lanId'] = str(1) + + if ip: + body['ip'] = ip + + if nic_name: + body['nicName'] = nic_name + + data = self.connection.request(action=action, + data=body, + method='POST').object + interfaces = self._to_interfaces(data) + return interfaces[0] + + def ex_update_network_interface(self, network_interface, name=None, + lan_id=None, ip=None, + dhcp_active=None): + """ + Updates a network interface. + + :param lan_id: The ID for the LAN. + :type lan_id: : ``int`` + + :param ip: The IP address for the NIC. + :type ip: ``str`` + + :param name: The name of the NIC, e.g. PUBLIC. + :type name: ``str`` + + :param dhcp_active: Set to false to disable. + :type dhcp_active: ``bool`` + + :rtype: : ``bool`` + """ + action = 'updateNic' + body = {'action': action, + 'request': 'true', + 'nicId': network_interface.id + } + + if name: + body['nicName'] = name + + if lan_id: + body['lanId'] = str(lan_id) + + if ip: + body['ip'] = ip + + if dhcp_active is not None: + body['dhcpActive'] = str(dhcp_active).lower() + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_destroy_network_interface(self, network_interface): + """ + Destroy a network interface. + + :param network_interface: The NIC you wish to describe. + :type network_interface: :class:`ProfitBricksNetworkInterface` + + :rtype: : ``bool`` + """ + + action = 'deleteNic' + body = {'action': action, + 'nicId': network_interface.id} + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + def ex_set_inet_access(self, datacenter, + network_interface, internet_access=True): + + action = 'setInternetAccess' + + body = {'action': action, + 'dataCenterId': datacenter.id, + 'lanId': network_interface.extra['lan_id'], + 'internetAccess': str(internet_access).lower() + } + + self.connection.request(action=action, + data=body, method='POST').object + + return True + + """ + Private Functions + """ + + def _to_datacenters(self, object): + return [self._to_datacenter( + datacenter) for datacenter in object.findall('.//return')] + + def _to_datacenter(self, datacenter): + datacenter_id = datacenter.find('dataCenterId').text + if ET.iselement(datacenter.find('dataCenterName')): + datacenter_name = datacenter.find('dataCenterName').text + else: + datacenter_name = None + version = datacenter.find('dataCenterVersion').text + if ET.iselement(datacenter.find('provisioningState')): + provisioning_state = datacenter.find('provisioningState').text + else: + provisioning_state = None + if ET.iselement(datacenter.find('location')): + location = datacenter.find('location').text + else: + location = None + + provisioning_state = self.PROVISIONING_STATE.get(provisioning_state, + NodeState.UNKNOWN) + + return Datacenter(id=datacenter_id, + name=datacenter_name, + version=version, + driver=self.connection.driver, + extra={'provisioning_state': provisioning_state, + 'location': location}) + + def _to_images(self, object): + return [self._to_image(image) for image in object.findall('.//return')] + + def _to_image(self, image): + image_id = image.find('imageId').text + image_name = image.find('imageName').text + image_size = image.find('imageSize').text + image_type = image.find('imageType').text + os_type = image.find('osType').text + public = image.find('public').text + writeable = image.find('writeable').text + + if ET.iselement(image.find('cpuHotpluggable')): + cpu_hotpluggable = image.find('cpuHotpluggable').text + else: + cpu_hotpluggable = None + + if ET.iselement(image.find('memoryHotpluggable')): + memory_hotpluggable = image.find('memoryHotpluggable').text + else: + memory_hotpluggable = None + + if ET.iselement(image.find('location')): + if image.find('region'): + image_region = image.find('region').text + else: + image_region = None + else: + image_region = None + + return NodeImage(id=image_id, + name=image_name, + driver=self.connection.driver, + extra={'image_size': image_size, + 'image_type': image_type, + 'cpu_hotpluggable': cpu_hotpluggable, + 'memory_hotpluggable': memory_hotpluggable, + 'os_type': os_type, + 'public': public, + 'location': image_region, + 'writeable': writeable}) + + def _to_nodes(self, object): + return [self._to_node(n) for n in object.findall('.//return')] + + def _to_node(self, node): + """ + Convert the request into a node Node + """ + ATTRIBUTE_NAME_MAP = { + 'dataCenterId': 'datacenter_id', + 'dataCenterVersion': 'datacenter_version', + 'serverId': 'node_id', + 'serverName': 'node_name', + 'cores': 'cores', + 'ram': 'ram', + 'internetAccess': 'internet_access', + 'provisioningState': 'provisioning_state', + 'virtualMachineState': 'virtual_machine_state', + 'creationTime': 'creation_time', + 'lastModificationTime': 'last_modification_time', + 'osType': 'os_type', + 'availabilityZone': 'availability_zone', + 'cpuHotPlug': 'cpu_hotpluggable', + 'ramHotPlug': 'memory_hotpluggable', + 'nicHotPlug': 'nic_hotpluggable', + 'discVirtioHotPlug': 'disc_virtio_hotplug', + 'discVirtioHotUnPlug': 'disc_virtio_hotunplug' + } + + extra = {} + for attribute_name, extra_name in ATTRIBUTE_NAME_MAP.items(): + elem = node.find(attribute_name) + + if ET.iselement(elem): + value = elem.text + else: + value = None + + extra[extra_name] = value + + public_ips = [] + private_ips = [] + + if ET.iselement(node.find('nics')): + for nic in node.findall('.//nics'): + n_elements = list(nic.findall('.//ips')) + if len(n_elements) > 0: + ip = n_elements[0].text + if is_private_subnet(ip): + private_ips.append(ip) + else: + public_ips.append(ip) + + extra['provisioning_state'] = self.PROVISIONING_STATE.get( + extra['provisioning_state'], NodeState.UNKNOWN) + + node_id = extra['node_id'] + node_name = extra['node_name'] + state = self.NODE_STATE_MAP.get(extra['virtual_machine_state'], + NodeState.UNKNOWN) + + return Node( + id=node_id, + name=node_name, + state=state, + public_ips=public_ips, + private_ips=private_ips, + driver=self.connection.driver, + extra=extra) + + def _to_volumes(self, object): + return [self._to_volume( + volume) for volume in object.findall('.//return')] + + def _to_volume(self, volume, node=None): + ATTRIBUTE_NAME_MAP = { + 'dataCenterId': 'datacenter_id', + 'storageId': 'storage_id', + 'storageName': 'storage_name', + 'serverIds': 'server_id', + 'creationTime': 'creation_time', + 'lastModificationTime': 'last_modification_time', + 'provisioningState': 'provisioning_state', + 'size': 'size', + } + + extra = {} + for attribute_name, extra_name in ATTRIBUTE_NAME_MAP.items(): + elem = volume.find(attribute_name) + + if ET.iselement(elem): + value = elem.text + else: + value = None + + extra[extra_name] = value + + if ET.iselement(volume.find('mountImage')): + image_id = volume.find('mountImage')[0].text + image_name = volume.find('mountImage')[1].text + else: + image_id = None + image_name = None + + extra['image_id'] = image_id + extra['image_name'] = image_name + extra['size'] = int(extra['size']) if extra['size'] else 0 + extra['provisioning_state'] = \ + self.PROVISIONING_STATE.get(extra['provisioning_state'], + NodeState.UNKNOWN) + + storage_id = extra['storage_id'] + storage_name = extra['storage_name'] + size = extra['size'] + + return StorageVolume( + id=storage_id, + name=storage_name, + size=size, + driver=self.connection.driver, + extra=extra) + + def _to_interfaces(self, object): + return [self._to_interface( + interface) for interface in object.findall('.//return')] + + def _to_interface(self, interface): + ATTRIBUTE_NAME_MAP = { + 'nicId': 'nic_id', + 'nicName': 'nic_name', + 'serverId': 'server_id', + 'lanId': 'lan_id', + 'internetAccess': 'internet_access', + 'macAddress': 'mac_address', + 'dhcpActive': 'dhcp_active', + 'gatewayIp': 'gateway_ip', + 'provisioningState': 'provisioning_state', + 'dataCenterId': 'datacenter_id', + 'dataCenterVersion': 'datacenter_version' + } + + extra = {} + for attribute_name, extra_name in ATTRIBUTE_NAME_MAP.items(): + elem = interface.find(attribute_name) + + if ET.iselement(elem): + value = elem.text + else: + value = None + + extra[extra_name] = value + + ips = [] + + if ET.iselement(interface.find('ips')): + for ip in interface.findall('.//ips'): + ips.append(ip.text) + + extra['ips'] = ips + + nic_id = extra['nic_id'] + nic_name = extra['nic_name'] + state = self.PROVISIONING_STATE.get(extra['provisioning_state'], + NodeState.UNKNOWN) + + return ProfitBricksNetworkInterface( + id=nic_id, + name=nic_name, + state=state, + extra=extra) + + def _to_location(self, data): + + return NodeLocation(id=data["region"], + name=data["region"], + country=data["country"], + driver=self.connection.driver) + + def _to_node_size(self, data): + """ + Convert the PROFIT_BRICKS_GENERIC_SIZES into NodeSize + """ + return NodeSize(id=data["id"], + name=data["name"], + ram=data["ram"], + disk=data["disk"], + bandwidth=None, + price=None, + driver=self.connection.driver, + extra={ + 'cores': data["cores"]}) + + def _wait_for_datacenter_state(self, datacenter, state=NodeState.RUNNING, + timeout=300, interval=5): + """ + Private function that waits the datacenter to transition into the + specified state. + + :return: Datacenter object on success. + :rtype: :class:`.Datacenter` + """ + wait_time = 0 + datacenter = self.ex_describe_datacenter(datacenter_id=datacenter.id) + + while (datacenter.extra['provisioning_state'] != state): + datacenter = \ + self.ex_describe_datacenter(datacenter_id=datacenter.id) + if datacenter.extra['provisioning_state'] == state: + break + + if wait_time >= timeout: + raise Exception('Datacenter didn\'t transition to %s state ' + 'in %s seconds' % (state, timeout)) + + wait_time += interval + time.sleep(interval) + + return datacenter + + def _create_new_datacenter_for_node(self, name): + """ + Creates a Datacenter for a node. + """ + dc_name = name + '-DC' + + return self.ex_create_datacenter(name=dc_name, location='us/las') + + def _wait_for_storage_volume_state(self, volume, state=NodeState.RUNNING, + timeout=300, interval=5): + """ + Wait for volume to transition into the specified state. + + :return: Volume object on success. + :rtype: :class:`Volume` + """ + wait_time = 0 + volume = self.ex_describe_volume(volume_id=volume.id) + + while (volume.extra['provisioning_state'] != state): + volume = self.ex_describe_volume(volume_id=volume.id) + if volume.extra['provisioning_state'] == state: + break + + if wait_time >= timeout: + raise Exception('Volume didn\'t transition to %s state ' + 'in %s seconds' % (state, timeout)) + + wait_time += interval + time.sleep(interval) + + return volume + + def _create_node_volume(self, ex_disk, image, password, + name, ex_datacenter=None, new_datacenter=None): + + volume_name = name + '-volume' + + if ex_datacenter: + volume = self.create_volume(size=ex_disk, + ex_datacenter=ex_datacenter, + ex_image=image, + ex_password=password, + name=volume_name) + else: + volume = self.create_volume(size=ex_disk, + ex_datacenter=new_datacenter, + ex_image=image, + ex_password=password, + name=volume_name) + + return volume diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 5c19f61e50..b449e78353 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -149,6 +149,8 @@ ('libcloud.compute.drivers.ec2', 'OutscaleINCNodeDriver'), Provider.VSPHERE: ('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'), + Provider.PROFIT_BRICKS: + ('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index c7c7616c2a..f07bc4873a 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -76,6 +76,7 @@ class Provider(object): :cvar IKOULA: Ikoula driver. :cvar OUTSCALE_SAS: Outscale SAS driver. :cvar OUTSCALE_INC: Outscale INC driver. + :cvar PROFIT_BRICKS: ProfitBricks driver. """ DUMMY = 'dummy' EC2 = 'ec2_us_east' @@ -122,6 +123,7 @@ class Provider(object): OUTSCALE_SAS = 'outscale_sas' OUTSCALE_INC = 'outscale_inc' VSPHERE = 'vsphere' + PROFIT_BRICKS = 'profitbricks' # OpenStack based providers HPCLOUD = 'hpcloud' diff --git a/libcloud/test/compute/fixtures/profitbricks/attach_volume.xml b/libcloud/test/compute/fixtures/profitbricks/attach_volume.xml new file mode 100644 index 0000000000..8b2b1f5fd0 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/attach_volume.xml @@ -0,0 +1,12 @@ + + + + + + 3613039 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 4 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/create_node.xml b/libcloud/test/compute/fixtures/profitbricks/create_node.xml new file mode 100644 index 0000000000..ad515ba525 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/create_node.xml @@ -0,0 +1,13 @@ + + + + + + 3768523 + 3aefc31b-57e9-4af6-8348-af961ac00f74 + 3 + 7b18b85f-cc93-4c2d-abcc-5ce732d35750 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/create_volume.xml b/libcloud/test/compute/fixtures/profitbricks/create_volume.xml new file mode 100644 index 0000000000..326879ad26 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/create_volume.xml @@ -0,0 +1,13 @@ + + + + + + 3532463 + 06eac419-c2b3-4761-aeb9-10efdd2cf292 + 3 + f54aeea3-667a-4460-8cf0-80909509df0c + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/destroy_node.xml b/libcloud/test/compute/fixtures/profitbricks/destroy_node.xml new file mode 100644 index 0000000000..1dacfaf484 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/destroy_node.xml @@ -0,0 +1,12 @@ + + + + + + 3498434 + 782247bf-f12d-4f08-8050-302c02c4b2e0 + 2 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/destroy_volume.xml b/libcloud/test/compute/fixtures/profitbricks/destroy_volume.xml new file mode 100644 index 0000000000..0591e1cc2e --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/destroy_volume.xml @@ -0,0 +1,12 @@ + + + + + + 3616447 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 13 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/detach_volume.xml b/libcloud/test/compute/fixtures/profitbricks/detach_volume.xml new file mode 100644 index 0000000000..fafc327a2a --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/detach_volume.xml @@ -0,0 +1,12 @@ + + + + + + 3614242 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 6 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_clear_datacenter.xml b/libcloud/test/compute/fixtures/profitbricks/ex_clear_datacenter.xml new file mode 100644 index 0000000000..5259d45b13 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_clear_datacenter.xml @@ -0,0 +1,12 @@ + + + + + + 3339052 + 8669a69f-2274-4520-b51e-dbdf3986a476 + 2 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_create_datacenter.xml b/libcloud/test/compute/fixtures/profitbricks/ex_create_datacenter.xml new file mode 100644 index 0000000000..f4238d839d --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_create_datacenter.xml @@ -0,0 +1,13 @@ + + + + + + 3711001 + 0c793dd1-d4cd-4141-86f3-8b1a24b2d604 + 1 + us/las + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_create_network_interface.xml b/libcloud/test/compute/fixtures/profitbricks/ex_create_network_interface.xml new file mode 100644 index 0000000000..830d41d948 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_create_network_interface.xml @@ -0,0 +1,13 @@ + + + + + + 3633314 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 27 + 951e1b49-5f1b-4b2b-b7d9-263dba6e2ddd + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_describe_datacenter.xml b/libcloud/test/compute/fixtures/profitbricks/ex_describe_datacenter.xml new file mode 100644 index 0000000000..cb4a1a086f --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_describe_datacenter.xml @@ -0,0 +1,15 @@ + + + + + + 3719240 + a3e6f83a-8982-4d6a-aebc-60baf5755ede + 1 + StackPointCloud + AVAILABLE + us/las + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_describe_network_interface.xml b/libcloud/test/compute/fixtures/profitbricks/ex_describe_network_interface.xml new file mode 100644 index 0000000000..e2235b98c9 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_describe_network_interface.xml @@ -0,0 +1,26 @@ + + + + + + 3707226 + a3a2e730-0dc3-47e6-bac6-4c056d5e2aee + 6 + f1c7a244-2fa6-44ee-8fb6-871f337683a3 + 1 + false + c09f4f31-336c-4ad2-9ec7-591778513408 + 10.10.38.12 + 02:01:96:d7:60:e0 + + false + 01490a19-2b20-43cc-86a4-ff0b0460f076 + f1c7a244-2fa6-44ee-8fb6-871f337683a3 + AVAILABLE + + true + AVAILABLE + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_describe_node.xml b/libcloud/test/compute/fixtures/profitbricks/ex_describe_node.xml new file mode 100644 index 0000000000..5567e8f19b --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_describe_node.xml @@ -0,0 +1,77 @@ + + + + + + 3706813 + a3a2e730-0dc3-47e6-bac6-4c056d5e2aee + 6 + c09f4f31-336c-4ad2-9ec7-591778513408 + server001 + 1 + 1024 + true + 10.10.38.12 + 162.254.26.14 + + true + VIRTIO + 1 + 50 + addb19d8-e664-43c1-bd2d-ad9210edc610 + storage001 + + + a3a2e730-0dc3-47e6-bac6-4c056d5e2aee + 6 + f1c7a244-2fa6-44ee-8fb6-871f337683a3 + 1 + false + c09f4f31-336c-4ad2-9ec7-591778513408 + 10.10.38.12 + 02:01:96:d7:60:e0 + + false + 01490a19-2b20-43cc-86a4-ff0b0460f076 + f1c7a244-2fa6-44ee-8fb6-871f337683a3 + AVAILABLE + + true + AVAILABLE + + + a3a2e730-0dc3-47e6-bac6-4c056d5e2aee + 6 + e6263870-cd70-42e4-956a-00f3bbec70e3 + PUBLIC + 3 + true + c09f4f31-336c-4ad2-9ec7-591778513408 + 162.254.26.14 + 02:01:9c:53:c3:50 + + false + c0fa291e-38c2-48a6-bd15-b66ba54ac18a + e6263870-cd70-42e4-956a-00f3bbec70e3 + AVAILABLE + + false + 162.254.26.1 + AVAILABLE + + AVAILABLE + RUNNING + 2014-07-16T18:53:05.109Z + 2014-07-16T19:57:51.577Z + LINUX + AUTO + true + true + true + true + true + true + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_describe_volume.xml b/libcloud/test/compute/fixtures/profitbricks/ex_describe_volume.xml new file mode 100644 index 0000000000..050b4b5b54 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_describe_volume.xml @@ -0,0 +1,22 @@ + + + + + + 3767716 + 905f1346-d199-425d-a035-7dc28f6819cd + 2 + 00d0b9e7-e016-456f-85a0-517aa9a34bf5 + 50 + StackPointCloud-Volume + + cd59b162-0289-11e4-9f63-52540066fee9 + Debian-7-server-2014-07-01 + + AVAILABLE + 2014-07-21T17:37:45.958Z + 2014-07-21T17:37:45.958Z + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_destroy_datacenter.xml b/libcloud/test/compute/fixtures/profitbricks/ex_destroy_datacenter.xml new file mode 100644 index 0000000000..4d36bdb2e9 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_destroy_datacenter.xml @@ -0,0 +1,10 @@ + + + + + + 3339313 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_destroy_network_interface.xml b/libcloud/test/compute/fixtures/profitbricks/ex_destroy_network_interface.xml new file mode 100644 index 0000000000..6584f068f6 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_destroy_network_interface.xml @@ -0,0 +1,12 @@ + + + + + + 3634902 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 31 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_list_datacenters.xml b/libcloud/test/compute/fixtures/profitbricks/ex_list_datacenters.xml new file mode 100644 index 0000000000..dfb924526f --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_list_datacenters.xml @@ -0,0 +1,19 @@ + + + + + + a3e6f83a-8982-4d6a-aebc-60baf5755ede + StackPointCloud + 1 + AVAILABLE + + + c68f77b8-7ecb-40e9-8b41-79415dffc0f1 + XYZ + 2 + AVAILABLE + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_list_network_interfaces.xml b/libcloud/test/compute/fixtures/profitbricks/ex_list_network_interfaces.xml new file mode 100644 index 0000000000..7164f22151 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_list_network_interfaces.xml @@ -0,0 +1,75 @@ + + + + + + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 26 + 6b38a4f3-b851-4614-9e3a-5ddff4727727 + StackPointCloud + 3 + false + 234f0cf9-1efc-4ade-b829-036456584116 + 10.14.96.11 + 162.254.26.14 + 162.254.26.15 + 02:01:40:47:90:04 + + false + e93f74b2-d969-4b7d-8fad-3931b85dbc4d + + d6f7e726-c13d-464c-b454-cae726dac75d + ANY + 1.2.3.4 + 1.2.3.4 + + + 87773a01-b7e2-481e-8a44-d5ffac830292 + ICMP + + 6b38a4f3-b851-4614-9e3a-5ddff4727727 + AVAILABLE + + true + AVAILABLE + + + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 26 + 47e3b2ce-7846-41cc-8404-1475190e89a3 + 3 + false + 7fb5d34c-77c2-4452-b7b2-274fa0f46327 + 10.14.96.12 + 02:01:fe:1c:81:73 + + false + d3d1d8b9-0dd5-4866-8429-a1817be7b6e9 + 47e3b2ce-7846-41cc-8404-1475190e89a3 + AVAILABLE + + true + AVAILABLE + + + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 26 + cbd00ace-d210-43e8-8cb7-097ad6b33e82 + 1 + true + 234f0cf9-1efc-4ade-b829-036456584116 + 208.94.38.110 + 02:01:e9:20:cd:81 + + false + a1a2e1da-7672-4a5c-af62-6c37edaffd26 + cbd00ace-d210-43e8-8cb7-097ad6b33e82 + INPROCESS + + true + 208.94.38.1 + AVAILABLE + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_start_node.xml b/libcloud/test/compute/fixtures/profitbricks/ex_start_node.xml new file mode 100644 index 0000000000..f83d6a4550 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_start_node.xml @@ -0,0 +1,10 @@ + + + + + + 3494585 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_stop_node.xml b/libcloud/test/compute/fixtures/profitbricks/ex_stop_node.xml new file mode 100644 index 0000000000..f83d6a4550 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_stop_node.xml @@ -0,0 +1,10 @@ + + + + + + 3494585 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_update_datacenter.xml b/libcloud/test/compute/fixtures/profitbricks/ex_update_datacenter.xml new file mode 100644 index 0000000000..270aa41cd6 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_update_datacenter.xml @@ -0,0 +1,12 @@ + + + + + + 3325148 + d96dfafc-9a8c-4c0e-8a0c-857a15db572d + 3 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_update_network_interface.xml b/libcloud/test/compute/fixtures/profitbricks/ex_update_network_interface.xml new file mode 100644 index 0000000000..f9f8e0d43b --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_update_network_interface.xml @@ -0,0 +1,12 @@ + + + + + + 3665310 + aab9454d-c442-4d06-9dd7-7c6121ae5ca2 + 3 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_update_node.xml b/libcloud/test/compute/fixtures/profitbricks/ex_update_node.xml new file mode 100644 index 0000000000..03693a7765 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_update_node.xml @@ -0,0 +1,12 @@ + + + + + + 3623299 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 18 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/ex_update_volume.xml b/libcloud/test/compute/fixtures/profitbricks/ex_update_volume.xml new file mode 100644 index 0000000000..03693a7765 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/ex_update_volume.xml @@ -0,0 +1,12 @@ + + + + + + 3623299 + c2df1871-6aac-458e-ad1a-ef3f530cb7aa + 18 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/list_images.xml b/libcloud/test/compute/fixtures/profitbricks/list_images.xml new file mode 100644 index 0000000000..3c8ac7b1f2 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/list_images.xml @@ -0,0 +1,43 @@ + + + + + + false + 03b6c3e7-f2ad-11e3-a036-52540066fee9 + windows-2012-r2-server-2014-06 + 11264 + HDD + false + WINDOWS + true + NORTH_AMERICA + true + + + true + cd59b162-0289-11e4-9f63-52540066fee9 + Debian-7-server-2014-07-01 + 2048 + HDD + true + LINUX + true + NORTH_AMERICA + true + + + true + d2f627c4-0289-11e4-9f63-52540066fee9 + CentOS-6-server-2014-07-01 + 2048 + HDD + true + LINUX + true + NORTH_AMERICA + true + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/list_nodes.xml b/libcloud/test/compute/fixtures/profitbricks/list_nodes.xml new file mode 100644 index 0000000000..c392d25967 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/list_nodes.xml @@ -0,0 +1,172 @@ + + + + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + c8e57d7b-e731-46ad-a913-1828c0562246 + server001 + 1 + 1024 + true + 10.13.198.11 + 162.254.25.197 + 10.10.108.12 + + true + VIRTIO + 1 + 50 + b07d7c20-8cd4-4502-aab1-c0195b7f18a1 + server001-storage001 + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + 7fb08916-eb64-40b8-a081-fafe0e374145 + 2 + false + c8e57d7b-e731-46ad-a913-1828c0562246 + 10.10.108.12 + 02:01:b9:a5:81:d9 + + false + 27062b18-aab9-4046-9071-c375121fdcd4 + 7fb08916-eb64-40b8-a081-fafe0e374145 + AVAILABLE + + true + AVAILABLE + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + 3f980c78-89b7-4f65-8e5d-28abc2f158aa + 3 + false + c8e57d7b-e731-46ad-a913-1828c0562246 + 10.13.198.11 + 02:01:7f:31:2f:71 + + false + b644fbfd-ff6f-4ba5-882c-d0478f327819 + 3f980c78-89b7-4f65-8e5d-28abc2f158aa + AVAILABLE + + true + AVAILABLE + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + 8df3ec08-b6f7-4038-85eb-da6620d31aa5 + 1 + true + c8e57d7b-e731-46ad-a913-1828c0562246 + 162.254.25.197 + 02:01:1f:9c:f3:24 + + false + 0bd04380-b4f3-4013-96a2-d71d134fd895 + 8df3ec08-b6f7-4038-85eb-da6620d31aa5 + AVAILABLE + + true + 162.254.25.1 + AVAILABLE + + AVAILABLE + RUNNING + 2014-07-14T20:52:20.839Z + 2014-07-14T22:11:09.324Z + LINUX + ZONE_1 + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + 7080c05b-1a91-4661-a217-60d864acee44 + server002 + 1 + 1024 + false + 10.13.198.12 + 10.10.108.11 + + true + VIRTIO + 1 + 50 + b96f49f6-c1d3-4250-8135-35c17c827657 + server002-storage001 + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + f1e0a1c6-329b-4629-b95b-ba81a30a4d73 + 2 + false + 7080c05b-1a91-4661-a217-60d864acee44 + 10.10.108.11 + 02:01:58:5e:9a:3c + + false + 263d5a2c-c95b-4903-b290-a33cb47616c4 + f1e0a1c6-329b-4629-b95b-ba81a30a4d73 + AVAILABLE + + true + AVAILABLE + + + e1e8ec0d-b47f-4d39-a91b-6e885483c899 + 5 + 847c50c7-d7dc-4fe6-8216-e05dc7ea7b18 + 3 + false + 7080c05b-1a91-4661-a217-60d864acee44 + 10.13.198.12 + 02:01:1e:32:2f:40 + + false + 35c42660-94d5-483d-aca7-8e6e97507508 + 847c50c7-d7dc-4fe6-8216-e05dc7ea7b18 + AVAILABLE + + true + AVAILABLE + + AVAILABLE + RUNNING + 2014-07-14T21:40:12.265Z + 2014-07-14T22:11:09.324Z + LINUX + AUTO + + + 6571ecd4-8602-4692-ae14-2f85eedbc403 + 2 + c9b9b603-65a3-4f11-bd24-ff1b494a85e2 + server001 + 1 + 1024 + false + + true + VIRTIO + 1 + 50 + 06dad54e-85b2-4146-ab15-ef55cb121921 + Storage + + AVAILABLE + RUNNING + 2014-07-14T21:06:21.421Z + 2014-07-14T21:06:21.421Z + LINUX + ZONE_2 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/list_volumes.xml b/libcloud/test/compute/fixtures/profitbricks/list_volumes.xml new file mode 100644 index 0000000000..5b7304279d --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/list_volumes.xml @@ -0,0 +1,66 @@ + + + + + + 06eac419-c2b3-4761-aeb9-10efdd2cf292 + 2 + 453582cf-8d54-4ec8-bc0b-f9962f7fd232 + 50 + storage001 + + d2f627c4-0289-11e4-9f63-52540066fee9 + CentOS-6-server-2014-07-01 + + ebee7d83-912b-42f1-9b62-b953351a7e29 + AVAILABLE + 2014-07-15T03:19:38.252Z + 2014-07-15T03:28:58.724Z + + + 06eac419-c2b3-4761-aeb9-10efdd2cf292 + 2 + 4e547123-897b-4520-a74e-c4ae2ff62f92 + 50 + storage002 + + d2f627c4-0289-11e4-9f63-52540066fee9 + CentOS-6-server-2014-07-01 + + 86413124-8dd3-4708-8475-9b4e7d059401 + AVAILABLE + 2014-07-15T03:19:38.252Z + 2014-07-15T03:28:58.724Z + + + 06eac419-c2b3-4761-aeb9-10efdd2cf292 + 2 + 0c893ea8-6bd7-483f-9d72-5a2fc4edff83 + 50 + storage003 + + d2f627c4-0289-11e4-9f63-52540066fee9 + CentOS-6-server-2014-07-01 + + ebee7d83-912b-42f1-9b62-b953351a7e29 + AVAILABLE + 2014-07-15T03:28:58.724Z + 2014-07-15T03:28:58.724Z + + + 06eac419-c2b3-4761-aeb9-10efdd2cf292 + 2 + 6584f97e-90f3-4664-8d5b-8ff4539bf800 + 50 + storage004 + + 10033683-01e2-11e4-9f63-52540066fee9 + Ubuntu-13.10-server-2014-07-01 + + AVAILABLE + 2014-07-15T03:28:58.724Z + 2014-07-15T03:28:58.724Z + + + + \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/profitbricks/reboot_node.xml b/libcloud/test/compute/fixtures/profitbricks/reboot_node.xml new file mode 100644 index 0000000000..afb9e37f02 --- /dev/null +++ b/libcloud/test/compute/fixtures/profitbricks/reboot_node.xml @@ -0,0 +1,10 @@ + + + + + + 3492524 + + + + \ No newline at end of file diff --git a/libcloud/test/compute/test_profitbricks.py b/libcloud/test/compute/test_profitbricks.py new file mode 100644 index 0000000000..ab7ab06c1f --- /dev/null +++ b/libcloud/test/compute/test_profitbricks.py @@ -0,0 +1,509 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from libcloud.utils.py3 import httplib +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.compute.types import Provider +from libcloud.compute.types import NodeState +from libcloud.compute.providers import get_driver +from libcloud.test import unittest +from libcloud.test.secrets import PROFIT_BRICKS_PARAMS + + +class ProfitBricksTests(unittest.TestCase): + + def setUp(self): + ProfitBricks = get_driver(Provider.PROFIT_BRICKS) + ProfitBricks.connectionCls.conn_classes = (None, ProfitBricksMockHttp) + self.driver = ProfitBricks(*PROFIT_BRICKS_PARAMS) + + ''' Server Function Tests + ''' + def test_list_nodes(self): + nodes = self.driver.list_nodes() + + self.assertEqual(len(nodes), 3) + + node = nodes[0] + self.assertEquals(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246") + self.assertEquals(node.name, "server001") + self.assertEquals(node.state, 0) + self.assertEquals(node.public_ips, ['162.254.25.197']) + self.assertEquals(node.private_ips, ['10.10.108.12', '10.13.198.11']) + self.assertEquals(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899") + self.assertEquals(node.extra['datacenter_version'], "5") + self.assertEquals(node.extra['provisioning_state'], 0) + self.assertEquals(node.extra['creation_time'], "2014-07-14T20:52:20.839Z") + self.assertEquals(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z") + self.assertEquals(node.extra['os_type'], "LINUX") + self.assertEquals(node.extra['availability_zone'], "ZONE_1") + + def test_ex_describe_node(self): + image = type('NodeImage', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9", + name="Debian-7-server-2014-07-01")) + size = type('NodeSize', (object,), + dict(id="2", + name="Small Instance", + ram=2048, + disk=50, + extra={'cores': 1})) + + node = self.driver.create_node(name="SPC-Server", + image=image, + size=size) + + self.assertEquals(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750") + + def test_reboot_node(self): + node = type('Node', (object,), + dict(id="c8e57d7b-e731-46ad-a913-1828c0562246")) + reboot = self.driver.reboot_node(node=node) + + self.assertTrue(reboot) + + def test_ex_stop_node(self): + node = type('Node', (object,), + dict(id="c8e57d7b-e731-46ad-a913-1828c0562246")) + stop = self.driver.ex_stop_node(node=node) + + self.assertTrue(stop) + + def test_ex_start_node(self): + node = type('Node', (object,), + dict(id="c8e57d7b-e731-46ad-a913-1828c0562246")) + start = self.driver.ex_start_node(node=node) + + self.assertTrue(start) + + def test_destroy_node(self): + node = type('Node', (object,), + dict(id="c8e57d7b-e731-46ad-a913-1828c0562246")) + destroy = self.driver.destroy_node(node=node) + + self.assertTrue(destroy) + + def test_ex_update_node(self): + node = type('Node', (object,), + dict(id="c8e57d7b-e731-46ad-a913-1828c0562246")) + + zone = type('ExProfitBricksAvailabilityZone', (object,), + dict(name="ZONE_2")) + + update = self.driver.ex_update_node(node=node, ram=2048, cores=2, name="server002", availability_zone=zone) + + self.assertTrue(update) + + ''' Volume Function Tests + ''' + def test_list_volumes(self): + volumes = self.driver.list_volumes() + + self.assertEqual(len(volumes), 4) + + volume = volumes[0] + self.assertEquals(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232") + self.assertEquals(volume.name, "storage001") + self.assertEquals(volume.size, 50) + self.assertEquals(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29") + self.assertEquals(volume.extra['provisioning_state'], 0) + self.assertEquals(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z") + self.assertEquals(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z") + self.assertEquals(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9") + self.assertEquals(volume.extra['image_name'], "CentOS-6-server-2014-07-01") + self.assertEquals(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292") + + def test_create_volume(self): + datacenter = type('Datacenter', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + image = type('NodeImage', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9")) + + create = self.driver.create_volume(name="StackPointCloudStorage001", + size=50, + ex_datacenter=datacenter, + ex_image=image) + + self.assertTrue(create) + + def test_attach_volume_general(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + node = type('Node', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9")) + + attach = self.driver.attach_volume(node=node, + volume=volume, + device=None, ex_bus_type=None) + + self.assertTrue(attach) + + def test_attach_volume_device_defined(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + node = type('Node', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9")) + + attach = self.driver.attach_volume(node=node, volume=volume, device=1, ex_bus_type=None) + + self.assertTrue(attach) + + def test_attach_volume_bus_type_defined(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + node = type('Node', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9")) + + attach = self.driver.attach_volume(node=node, + volume=volume, + device=None, + ex_bus_type="IDE") + + self.assertTrue(attach) + + def test_attach_volume_options_defined(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + node = type('Node', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9")) + + attach = self.driver.attach_volume(node=node, volume=volume, + device=1, ex_bus_type="IDE") + + self.assertTrue(attach) + + def test_detach_volume(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476", + extra={'server_id': "cd59b162-0289-11e4-9f63-52540066fee9"} + )) + + attach = self.driver.detach_volume(volume=volume) + + self.assertTrue(attach) + + def test_destroy_volume(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + destroy = self.driver.destroy_volume(volume=volume) + + self.assertTrue(destroy) + + def test_update_volume(self): + volume = type('StorageVolume', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + + destroy = self.driver.ex_update_volume(volume=volume) + + self.assertTrue(destroy) + + def test_ex_describe_volume(self): + describe = self.driver.ex_describe_volume(volume_id="8669a69f-2274-4520-b51e-dbdf3986a476") + + self.assertEqual(describe.id, "00d0b9e7-e016-456f-85a0-517aa9a34bf5") + self.assertEqual(describe.size, 50) + self.assertEqual(describe.name, "StackPointCloud-Volume") + self.assertEqual(describe.extra['provisioning_state'], NodeState.RUNNING) + + ''' Image Function Tests + ''' + def test_list_images(self): + images = self.driver.list_images() + + self.assertEqual(len(images), 3) + + image = images[0] + self.assertEqual(image.extra['cpu_hotpluggable'], "false") + self.assertEqual(image.id, "03b6c3e7-f2ad-11e3-a036-52540066fee9") + self.assertEqual(image.name, "windows-2012-r2-server-2014-06") + self.assertEqual(image.extra['image_size'], "11264") + self.assertEqual(image.extra['image_type'], "HDD") + self.assertEqual(image.extra['memory_hotpluggable'], "false") + self.assertEqual(image.extra['os_type'], "WINDOWS") + self.assertEqual(image.extra['public'], "true") + self.assertEqual(image.extra['location'], None) + self.assertEqual(image.extra['writeable'], "true") + + ''' Datacenter Function Tests + ''' + def test_ex_create_datacenter(self): + datacenter = self.driver.ex_create_datacenter(name="StackPointCloud", + location="us/la") + + self.assertEqual(datacenter.id, '0c793dd1-d4cd-4141-86f3-8b1a24b2d604') + self.assertEqual(datacenter.extra['location'], 'us/las') + self.assertEqual(datacenter.version, '1') + + def test_ex_destroy_datacenter(self): + datacenter = type('Datacenter', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + destroy = self.driver.ex_destroy_datacenter(datacenter=datacenter) + + self.assertTrue(destroy) + + def test_ex_describe_datacenter(self): + datacenter = type('Datacenter', (object,), + dict(id="d96dfafc-9a8c-4c0e-8a0c-857a15db572d")) + describe = self.driver.ex_describe_datacenter(datacenter_id=datacenter.id) + + self.assertEqual(describe.id, 'a3e6f83a-8982-4d6a-aebc-60baf5755ede') + self.assertEqual(describe.name, 'StackPointCloud') + self.assertEqual(describe.version, '1') + self.assertEqual(describe.extra['location'], 'us/las') + self.assertEqual(describe.extra['provisioning_state'], NodeState.RUNNING) + + def test_ex_clear_datacenter(self): + datacenter = type('Datacenter', (object,), + dict(id="8669a69f-2274-4520-b51e-dbdf3986a476")) + clear = self.driver.ex_clear_datacenter(datacenter=datacenter) + + self.assertTrue(clear) + + def test_ex_list_datacenters(self): + datacenters = self.driver.ex_list_datacenters() + + self.assertEqual(len(datacenters), 2) + + dc1 = datacenters[0] + self.assertEquals(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede") + self.assertEquals(dc1.name, "StackPointCloud") + self.assertEquals(dc1.version, "1") + + def test_ex_rename_datacenter(self): + datacenter = type('Datacenter', (object,), + dict(id="d96dfafc-9a8c-4c0e-8a0c-857a15db572d")) + + update = self.driver.ex_rename_datacenter(datacenter=datacenter, + name="StackPointCloud") + + self.assertTrue(update) + + def test_list_locations(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 3) + + locationNamesResult = sorted(list(a.name for a in locations)) + locationNamesExpected = ['de/fkb', 'de/fra', 'us/las'] + + self.assertEquals(locationNamesResult, locationNamesExpected) + + ''' Availability Zone Tests + ''' + + def test_ex_list_availability_zones(self): + zones = self.driver.ex_list_availability_zones() + self.assertEqual(len(zones), 3) + + zoneNamesResult = sorted(list(a.name for a in zones)) + zoneNamesExpected = ['AUTO', 'ZONE_1', 'ZONE_2'] + + self.assertEquals(zoneNamesResult, zoneNamesExpected) + + ''' Interface Tests + ''' + + def test_ex_list_interfaces(self): + interfaces = self.driver.ex_list_network_interfaces() + + self.assertEqual(len(interfaces), 3) + + interface = interfaces[0] + self.assertEquals(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727") + self.assertEquals(interface.name, "StackPointCloud") + self.assertEquals(interface.state, 0) + self.assertEquals(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116") + self.assertEquals(interface.extra['lan_id'], '3') + self.assertEquals(interface.extra['internet_access'], 'false') + self.assertEquals(interface.extra['mac_address'], "02:01:40:47:90:04") + self.assertEquals(interface.extra['dhcp_active'], "true") + self.assertEquals(interface.extra['gateway_ip'], None) + self.assertEquals(interface.extra['ips'], ['10.14.96.11', '162.254.26.14', '162.254.26.15']) + + def test_ex_create_network_interface(self): + node = type('Node', (object,), + dict(id="cd59b162-0289-11e4-9f63-52540066fee9")) + + interface = self.driver.ex_create_network_interface(node=node) + self.assertEqual(interface.id, '6b38a4f3-b851-4614-9e3a-5ddff4727727') + + def test_ex_destroy_network_interface(self): + network_interface = type('ProfitBricksNetworkInterface', (object,), + dict( + id="cd59b162-0289-11e4-9f63-52540066fee9")) + + destroy = self.driver.ex_destroy_network_interface( + network_interface=network_interface) + + self.assertTrue(destroy) + + def test_ex_update_network_interface(self): + network_interface = type('ProfitBricksNetworkInterface', (object,), + dict( + id="cd59b162-0289-11e4-9f63-52540066fee9")) + + create = self.driver.ex_update_network_interface( + network_interface=network_interface) + + self.assertTrue(create) + + def test_ex_describe_network_interface(self): + network_interface = type('ProfitBricksNetworkInterface', (object,), + dict( + id="cd59b162-0289-11e4-9f63-52540066fee9")) + + describe = self.driver.ex_describe_network_interface(network_interface=network_interface) + + self.assertEquals(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3") + self.assertEquals(describe.name, None) + self.assertEquals(describe.state, 0) + self.assertEquals(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee") + self.assertEquals(describe.extra['datacenter_version'], "6") + self.assertEquals(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408") + self.assertEquals(describe.extra['lan_id'], "1") + self.assertEquals(describe.extra['internet_access'], "false") + self.assertEquals(describe.extra['mac_address'], "02:01:96:d7:60:e0") + self.assertEquals(describe.extra['dhcp_active'], "true") + self.assertEquals(describe.extra['gateway_ip'], None) + self.assertEquals(describe.extra['ips'], ['10.10.38.12']) + + def test_list_sizes(self): + sizes = self.driver.list_sizes() + + self.assertEqual(len(sizes), 7) + + +class ProfitBricksMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('profitbricks') + + def _1_3_clearDataCenter(self, method, url, body, headers): + body = self.fixtures.load('ex_clear_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_createDataCenter(self, method, url, body, headers): + body = self.fixtures.load('ex_create_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_deleteDataCenter(self, method, url, body, headers): + body = self.fixtures.load('ex_destroy_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getDataCenter(self, method, url, body, headers): + body = self.fixtures.load('ex_describe_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getAllDataCenters(self, method, url, body, headers): + body = self.fixtures.load('ex_list_datacenters.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_updateDataCenter(self, method, url, body, headers): + body = self.fixtures.load('ex_update_datacenter.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getAllImages(self, method, url, body, headers): + body = self.fixtures.load('list_images.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getAllServers(self, method, url, body, headers): + body = self.fixtures.load('list_nodes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_resetServer(self, method, url, body, headers): + body = self.fixtures.load('reboot_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_stopServer(self, method, url, body, headers): + body = self.fixtures.load('ex_stop_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_startServer(self, method, url, body, headers): + body = self.fixtures.load('ex_start_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_deleteServer(self, method, url, body, headers): + body = self.fixtures.load('destroy_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getAllStorages(self, method, url, body, headers): + body = self.fixtures.load('list_volumes.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_createStorage(self, method, url, body, headers): + body = self.fixtures.load('create_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_connectStorageToServer(self, method, url, body, headers): + body = self.fixtures.load('attach_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_disconnectStorageFromServer(self, method, url, body, headers): + body = self.fixtures.load('detach_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_deleteStorage(self, method, url, body, headers): + body = self.fixtures.load('destroy_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_updateStorage(self, method, url, body, headers): + body = self.fixtures.load('ex_update_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_updateServer(self, method, url, body, headers): + body = self.fixtures.load('ex_update_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getNic(self, method, url, body, headers): + body = self.fixtures.load('ex_describe_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getAllNic(self, method, url, body, headers): + body = self.fixtures.load('ex_list_network_interfaces.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_createNic(self, method, url, body, headers): + body = self.fixtures.load('ex_list_network_interfaces.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_deleteNic(self, method, url, body, headers): + body = self.fixtures.load('ex_destroy_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_updateNic(self, method, url, body, headers): + body = self.fixtures.load('ex_update_network_interface.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getServer(self, method, url, body, headers): + body = self.fixtures.load('ex_describe_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_getStorage(self, method, url, body, headers): + body = self.fixtures.load('ex_describe_volume.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _1_3_createServer(self, method, url, body, headers): + body = self.fixtures.load('create_node.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index 030176e4c0..b3525e88cc 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -44,6 +44,7 @@ GRIDSPOT_PARAMS = ('key',) HOSTVIRTUAL_PARAMS = ('key',) DIGITAL_OCEAN_PARAMS = ('user', 'key') CLOUDFRAMES_PARAMS = ('key', 'secret', False, 'host', 8888) +PROFIT_BRICKS_PARAMS = ('user', 'key') # Storage STORAGE_S3_PARAMS = ('key', 'secret') From c8184e98a55c7692c11c92924bf279c3ffa5d67d Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 14 Sep 2014 13:20:25 +0200 Subject: [PATCH 208/315] Re-generate supported providers and methods table. --- docs/compute/_supported_methods_block_storage.rst | 2 ++ docs/compute/_supported_methods_image_management.rst | 2 ++ docs/compute/_supported_methods_key_pair_management.rst | 2 ++ docs/compute/_supported_methods_main.rst | 2 ++ docs/compute/_supported_providers.rst | 2 ++ 5 files changed, 10 insertions(+) diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index bc7f9e9725..f14d375f88 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -51,6 +51,7 @@ Provider list volumes create volume destroy volume `Opsource`_ no no no no no no no `Outscale INC`_ yes yes yes yes yes yes yes `Outscale SAS`_ yes yes yes yes yes yes yes +`ProfitBricks`_ yes yes yes yes yes no no `Rackspace Cloud (Next Gen)`_ yes yes yes yes yes no no `Rackspace Cloud (First Gen)`_ yes yes yes yes yes no no `RimuHosting`_ no no no no no no no @@ -115,6 +116,7 @@ Provider list volumes create volume destroy volume .. _`Opsource`: http://www.opsource.net/ .. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com +.. _`ProfitBricks`: http://www.profitbricks.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index dcbc1d0863..6ef61d1acf 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -51,6 +51,7 @@ Provider list images get image create image delete `Opsource`_ yes no no no no `Outscale INC`_ yes yes yes yes yes `Outscale SAS`_ yes yes yes yes yes +`ProfitBricks`_ yes no no no no `Rackspace Cloud (Next Gen)`_ yes yes yes yes no `Rackspace Cloud (First Gen)`_ yes yes yes yes no `RimuHosting`_ yes no no no no @@ -115,6 +116,7 @@ Provider list images get image create image delete .. _`Opsource`: http://www.opsource.net/ .. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com +.. _`ProfitBricks`: http://www.profitbricks.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index 5f98929cb1..d177e3f270 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -51,6 +51,7 @@ Provider list key pairs get key pair create key pai `Opsource`_ no no no no no no `Outscale INC`_ yes yes yes yes no yes `Outscale SAS`_ yes yes yes yes no yes +`ProfitBricks`_ no no no no no no `Rackspace Cloud (Next Gen)`_ yes yes yes yes no yes `Rackspace Cloud (First Gen)`_ no no no no no no `RimuHosting`_ no no no no no no @@ -115,6 +116,7 @@ Provider list key pairs get key pair create key pai .. _`Opsource`: http://www.opsource.net/ .. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com +.. _`ProfitBricks`: http://www.profitbricks.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index fbf8feb223..1e6a0a43a6 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -51,6 +51,7 @@ Provider list nodes create node reboot node destroy `Opsource`_ yes yes yes yes yes yes yes `Outscale INC`_ yes yes yes yes yes yes yes `Outscale SAS`_ yes yes yes yes yes yes yes +`ProfitBricks`_ yes yes yes yes yes yes no `Rackspace Cloud (Next Gen)`_ yes yes yes yes yes yes yes `Rackspace Cloud (First Gen)`_ yes yes yes yes yes yes yes `RimuHosting`_ yes yes yes yes yes yes yes @@ -115,6 +116,7 @@ Provider list nodes create node reboot node destroy .. _`Opsource`: http://www.opsource.net/ .. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com +.. _`ProfitBricks`: http://www.profitbricks.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index fa24076584..da9bff6402 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -51,6 +51,7 @@ Provider Documentation `Opsource`_ OPSOURCE :mod:`libcloud.compute.drivers.opsource` :class:`OpsourceNodeDriver` `Outscale INC`_ :doc:`Click ` OUTSCALE_INC :mod:`libcloud.compute.drivers.ec2` :class:`OutscaleINCNodeDriver` `Outscale SAS`_ :doc:`Click ` OUTSCALE_SAS :mod:`libcloud.compute.drivers.ec2` :class:`OutscaleSASNodeDriver` +`ProfitBricks`_ PROFIT_BRICKS :mod:`libcloud.compute.drivers.profitbricks` :class:`ProfitBricksNodeDriver` `Rackspace Cloud (Next Gen)`_ :doc:`Click ` RACKSPACE :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceNodeDriver` `Rackspace Cloud (First Gen)`_ RACKSPACE_FIRST_GEN :mod:`libcloud.compute.drivers.rackspace` :class:`RackspaceFirstGenNodeDriver` `RimuHosting`_ RIMUHOSTING :mod:`libcloud.compute.drivers.rimuhosting` :class:`RimuHostingNodeDriver` @@ -115,6 +116,7 @@ Provider Documentation .. _`Opsource`: http://www.opsource.net/ .. _`Outscale INC`: http://www.outscale.com .. _`Outscale SAS`: http://www.outscale.com +.. _`ProfitBricks`: http://www.profitbricks.com .. _`Rackspace Cloud (Next Gen)`: http://www.rackspace.com .. _`Rackspace Cloud (First Gen)`: http://www.rackspace.com .. _`RimuHosting`: http://rimuhosting.com/ From f61cbd5a4888c703b0992d3023124d8a29799904 Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Mon, 30 Jun 2014 17:12:35 +0300 Subject: [PATCH 209/315] GCE: healthcheck: add description param to ex_create_healthcheck(); targetpool: multiple bugfixes Fix #1: targetpool = driver.ex_get_targetpool('tpname') targetpool.add_node(node) node.destroy() # targetpool still contains the node targetpool = driver.ex_get_targetpool('tpname') # targetpool.nodes contains node uri string (in addition to possible other node objects) # as produced by _to_targetpool because the node is in the pool but is destroyed targetpool.remove_node(node) # raises an exception, removes the node nevertheless. Expected behavior: remove the node, return true Fix #2: targetpool.add(node) targetpool.add(node) # targetpool.nodes contains 2 copies of node # actual targetpool resource on the GCE side doesn't contain 2 copies Expected behavior: no duplicates in targetpool.nodes, the node list matches GCE side Fix/Improvement #3: Allow specifying nodes by fully-qualified node uri in add_node and remove_node. if tp.nodes: tp.remove_node(tp.nodes[0]) # fails if the node in the list doesn't exist exoected behavior: should be able to remove any node from the list GCE allows adding non-existent nodes to the targetpool and doesn't automatically remove nodes from the pool if you delete them. libcloud should support doing the same. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 41 +++++++++++++++---- ...global_httpHealthChecks_lchealthcheck.json | 1 + libcloud/test/compute/test_gce.py | 18 +++++++- 3 files changed, 50 insertions(+), 10 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 991432478a..c5050be7a5 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -938,7 +938,8 @@ def ex_create_address(self, name, region=None): def ex_create_healthcheck(self, name, host=None, path=None, port=None, interval=None, timeout=None, unhealthy_threshold=None, - healthy_threshold=None): + healthy_threshold=None, + description=None): """ Create an Http Health Check. @@ -969,6 +970,9 @@ def ex_create_healthcheck(self, name, host=None, path=None, port=None, healthy. Defaults to 2. :type healthy_threshold: ``int`` + :keyword description: The description of the check. Defaults to None. + :type description: ``str`` + :return: Health Check object :rtype: :class:`GCEHealthCheck` """ @@ -976,6 +980,8 @@ def ex_create_healthcheck(self, name, host=None, path=None, port=None, hc_data['name'] = name if host: hc_data['host'] = host + if description: + hc_data['description'] = description # As of right now, the 'default' values aren't getting set when called # through the API, so set them explicitly hc_data['requestPath'] = path or '/' @@ -1613,16 +1619,25 @@ def ex_targetpool_add_node(self, targetpool, node): """ if not hasattr(targetpool, 'name'): targetpool = self.ex_get_targetpool(targetpool) - if not hasattr(node, 'name'): - node = self.ex_get_node(node, 'all') + if hasattr(node, 'name'): + node_uri = node.extra['selfLink'] + else: + if node.startswith('https://'): + node_uri = node + else: + node = self.ex_get_node(node, 'all') + node_uri = node.extra['selfLink'] - targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} + targetpool_data = {'instances': [{'instance': node_uri}]} request = '/regions/%s/targetPools/%s/addInstance' % ( targetpool.region.name, targetpool.name) self.connection.async_request(request, method='POST', data=targetpool_data) - targetpool.nodes.append(node) + if all((node_uri != n) and + (not hasattr(n, 'extra') or n.extra['selfLink'] != node_uri) + for n in targetpool.nodes): + targetpool.nodes.append(node) return True def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): @@ -1667,10 +1682,17 @@ def ex_targetpool_remove_node(self, targetpool, node): """ if not hasattr(targetpool, 'name'): targetpool = self.ex_get_targetpool(targetpool) - if not hasattr(node, 'name'): - node = self.ex_get_node(node, 'all') - targetpool_data = {'instances': [{'instance': node.extra['selfLink']}]} + if hasattr(node, 'name'): + node_uri = node.extra['selfLink'] + else: + if node.startswith('https://'): + node_uri = node + else: + node = self.ex_get_node(node, 'all') + node_uri = node.extra['selfLink'] + + targetpool_data = {'instances': [{'instance': node_uri}]} request = '/regions/%s/targetPools/%s/removeInstance' % ( targetpool.region.name, targetpool.name) @@ -1679,7 +1701,8 @@ def ex_targetpool_remove_node(self, targetpool, node): # Remove node object from node list index = None for i, nd in enumerate(targetpool.nodes): - if nd.name == node.name: + if nd == node_uri or (hasattr(nd, 'extra') and + nd.extra['selfLink'] == node_uri): index = i break if index is not None: diff --git a/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json b/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json index 80c9aebfe7..6c4a8ce7c0 100644 --- a/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json +++ b/libcloud/test/compute/fixtures/gce/global_httpHealthChecks_lchealthcheck.json @@ -1,6 +1,7 @@ { "checkIntervalSec": 10, "creationTimestamp": "2013-09-02T22:18:01.180-07:00", + "description": "test healthcheck", "healthyThreshold": 3, "host": "lchost", "id": "06860603312991823381", diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 74cc8b64e5..d4bdd3e4c9 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -231,13 +231,16 @@ def test_ex_create_healthcheck(self): 'interval': 10, 'timeout': 10, 'unhealthy_threshold': 4, - 'healthy_threshold': 3} + 'healthy_threshold': 3, + 'description': 'test healthcheck'} hc = self.driver.ex_create_healthcheck(healthcheck_name, **kwargs) self.assertTrue(isinstance(hc, GCEHealthCheck)) self.assertEqual(hc.name, healthcheck_name) self.assertEqual(hc.path, '/lc') self.assertEqual(hc.port, 8000) self.assertEqual(hc.interval, 10) + self.assertEqual(hc.extra['host'], 'lchost') + self.assertEqual(hc.extra['description'], 'test healthcheck') def test_ex_create_firewall(self): firewall_name = 'lcfirewall' @@ -413,10 +416,23 @@ def test_ex_targetpool_remove_add_node(self): self.assertTrue(remove_node) self.assertEqual(len(targetpool.nodes), 1) + add_node = self.driver.ex_targetpool_add_node(targetpool, node.extra['selfLink']) + self.assertTrue(add_node) + self.assertEqual(len(targetpool.nodes), 2) + + remove_node = self.driver.ex_targetpool_remove_node(targetpool, node.extra['selfLink']) + self.assertTrue(remove_node) + self.assertEqual(len(targetpool.nodes), 1) + add_node = self.driver.ex_targetpool_add_node(targetpool, node) self.assertTrue(add_node) self.assertEqual(len(targetpool.nodes), 2) + # check that duplicates are filtered + add_node = self.driver.ex_targetpool_add_node(targetpool, node.extra['selfLink']) + self.assertTrue(add_node) + self.assertEqual(len(targetpool.nodes), 2) + def test_ex_targetpool_remove_add_healthcheck(self): targetpool = self.driver.ex_get_targetpool('lctargetpool') healthcheck = self.driver.ex_get_healthcheck( From 6ab8fb5683f691bf4c5698cd4570ca2b1368da40 Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Tue, 1 Jul 2014 00:50:02 +0300 Subject: [PATCH 210/315] GCE: fix ex_targetpool_add_healthcheck and ex_targetpool_remove_healthcheck. The requests were malformed and thus did nothing. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index c5050be7a5..9e65b92899 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1658,7 +1658,7 @@ def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): if not hasattr(healthcheck, 'name'): healthcheck = self.ex_get_healthcheck(healthcheck) - targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} + targetpool_data = {'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] } request = '/regions/%s/targetPools/%s/addHealthCheck' % ( targetpool.region.name, targetpool.name) @@ -1727,7 +1727,7 @@ def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck): if not hasattr(healthcheck, 'name'): healthcheck = self.ex_get_healthcheck(healthcheck) - targetpool_data = {'healthCheck': healthcheck.extra['selfLink']} + targetpool_data = {'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] } request = '/regions/%s/targetPools/%s/removeHealthCheck' % ( targetpool.region.name, targetpool.name) From 951fb161a4b5c67ca43e64cee9011bfbf2aca992 Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Tue, 1 Jul 2014 06:41:42 +0300 Subject: [PATCH 211/315] GCE: add a parameter to ex_create_address to promote specific ephemeral addresses to static ones. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 9e65b92899..f0966377bd 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -910,7 +910,7 @@ def ex_list_zones(self): list_zones = [self._to_zone(z) for z in response['items']] return list_zones - def ex_create_address(self, name, region=None): + def ex_create_address(self, name, region=None, address=None): """ Create a static address in a region. @@ -920,6 +920,10 @@ def ex_create_address(self, name, region=None): :keyword region: Name of region for the address (e.g. 'us-central1') :type region: ``str`` or :class:`GCERegion` + :keyword address: Ephemeral IP address to promote to a static one + (e.g. 'xxx.xxx.xxx.xxx') + :type address: ``str`` or ``None`` + :return: Static Address object :rtype: :class:`GCEAddress` """ @@ -930,6 +934,8 @@ def ex_create_address(self, name, region=None): raise ValueError('REGION_NOT_SPECIFIED', 'Region must be provided for an address') address_data = {'name': name} + if address: + address_data['address'] = address request = '/regions/%s/addresses' % (region.name) self.connection.async_request(request, method='POST', data=address_data) From 7fd9b3b3f7bfade226e831c8a29a2cbc4ccee97a Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Wed, 2 Jul 2014 06:50:26 +0300 Subject: [PATCH 212/315] GCE: add description parameter to ex_create_forwarding_rule, improve tests; fix documentation and test fixtures for health checks Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 10 ++++++++-- .../fixtures/gce/aggregated_forwardingRules.json | 1 + .../compute/fixtures/gce/global_httpHealthChecks.json | 1 + ...s_us-central1_forwardingRules_lcforwardingrule.json | 1 + libcloud/test/compute/test_gce.py | 9 ++++++++- 5 files changed, 19 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index f0966377bd..6bcd8b24fe 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -977,7 +977,7 @@ def ex_create_healthcheck(self, name, host=None, path=None, port=None, :type healthy_threshold: ``int`` :keyword description: The description of the check. Defaults to None. - :type description: ``str`` + :type description: ``str`` or ``None`` :return: Health Check object :rtype: :class:`GCEHealthCheck` @@ -1072,7 +1072,7 @@ def ex_create_firewall(self, name, allowed, network='default', def ex_create_forwarding_rule(self, name, targetpool, region=None, protocol='tcp', port_range=None, - address=None): + address=None, description=None): """ Create a forwarding rule. @@ -1097,6 +1097,10 @@ def ex_create_forwarding_rule(self, name, targetpool, region=None, in same region. :type address: ``str`` or :class:`GCEAddress` + :keyword description: The description of the forwarding rule. + Defaults to None. + :type description: ``str`` or ``None`` + :return: Forwarding Rule object :rtype: :class:`GCEForwardingRule` """ @@ -1117,6 +1121,8 @@ def ex_create_forwarding_rule(self, name, targetpool, region=None, forwarding_rule_data['IPAddress'] = address.extra['selfLink'] if port_range: forwarding_rule_data['portRange'] = port_range + if description: + forwarding_rule_data['description'] = description request = '/regions/%s/forwardingRules' % (region.name) diff --git a/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json b/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json index 06a1a0e880..6447e5dd73 100644 --- a/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json +++ b/libcloud/test/compute/fixtures/gce/aggregated_forwardingRules.json @@ -34,6 +34,7 @@ "id": "06342111469679701315", "kind": "compute#forwardingRule", "name": "lcforwardingrule", + "description": "test forwarding rule", "portRange": "8000-8500", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", diff --git a/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json b/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json index e1f25a6864..97d215983a 100644 --- a/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json +++ b/libcloud/test/compute/fixtures/gce/global_httpHealthChecks.json @@ -24,6 +24,7 @@ "id": "022194976205566532", "kind": "compute#httpHealthCheck", "name": "lchealthcheck", + "description": "test healthcheck", "port": 9000, "requestPath": "/lc", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/global/httpHealthChecks/lchealthcheck", diff --git a/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json b/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json index 6301a559da..ead67c4ff1 100644 --- a/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json +++ b/libcloud/test/compute/fixtures/gce/regions_us-central1_forwardingRules_lcforwardingrule.json @@ -5,6 +5,7 @@ "id": "10901665092293158938", "kind": "compute#forwardingRule", "name": "lcforwardingrule", + "description": "test forwarding rule", "portRange": "8000-8500", "region": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1", "selfLink": "https://www.googleapis.com/compute/v1/projects/project_name/regions/us-central1/forwardingRules/lcforwardingrule", diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index d4bdd3e4c9..1912496261 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -255,11 +255,18 @@ def test_ex_create_forwarding_rule(self): fwr_name = 'lcforwardingrule' targetpool = 'lctargetpool' region = 'us-central1' + port_range='8000-8500' + description = 'test forwarding rule' fwr = self.driver.ex_create_forwarding_rule(fwr_name, targetpool, region=region, - port_range='8000-8500') + port_range=port_range, + description=description) self.assertTrue(isinstance(fwr, GCEForwardingRule)) self.assertEqual(fwr.name, fwr_name) + self.assertEqual(fwr.region.name, region) + self.assertEqual(fwr.protocol, 'TCP') + self.assertEqual(fwr.extra['portRange'], port_range) + self.assertEqual(fwr.extra['description'], description) def test_ex_create_network(self): network_name = 'lcnetwork' From d8e8987cfdf6eed01ca53ff432322e959d88fd32 Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Wed, 2 Jul 2014 11:00:21 +0300 Subject: [PATCH 213/315] GCE: fix creating a forwarding rule with a static IP Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 6bcd8b24fe..e3de515eac 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1118,7 +1118,7 @@ def ex_create_forwarding_rule(self, name, targetpool, region=None, if address: if not hasattr(address, 'name'): address = self.ex_get_address(address, region) - forwarding_rule_data['IPAddress'] = address.extra['selfLink'] + forwarding_rule_data['IPAddress'] = address.address if port_range: forwarding_rule_data['portRange'] = port_range if description: From a48481e9cbb7bf6ff464475d295c10b1152d93da Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Wed, 2 Jul 2014 23:52:18 +0300 Subject: [PATCH 214/315] GCE: ex_create_firewall: allow creation og firewalls with sourceRanges = [], preserve behavior for sourceRanges = None, preserve default value. GCE documentation states that firewall allows traffic if it matches either sourceRanges or sourceTags values. Thus, sourceRanges = [] is a valid parameter value. Although it would be better to simply set ["0.0.0.0/0"] as the default, for minimal API breakage, None value is still supported. Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index e3de515eac..fc2bc82176 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1058,7 +1058,9 @@ def ex_create_firewall(self, name, allowed, network='default', firewall_data['name'] = name firewall_data['allowed'] = allowed firewall_data['network'] = nw.extra['selfLink'] - firewall_data['sourceRanges'] = source_ranges or ['0.0.0.0/0'] + if source_ranges is None: + source_ranges = ['0.0.0.0/0'] + firewall_data['sourceRanges'] = source_ranges if source_tags is not None: firewall_data['sourceTags'] = source_tags if target_tags is not None: From e3de3b725e3da70f2b7e26d50f2802c17438aee4 Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Wed, 9 Jul 2014 02:57:44 +0300 Subject: [PATCH 215/315] GCE: fix parameter name for protocol in ex_create_forwarding_rule Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index fc2bc82176..30b513974b 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1116,7 +1116,7 @@ def ex_create_forwarding_rule(self, name, targetpool, region=None, forwarding_rule_data['name'] = name forwarding_rule_data['region'] = region.extra['selfLink'] forwarding_rule_data['target'] = targetpool.extra['selfLink'] - forwarding_rule_data['protocol'] = protocol.upper() + forwarding_rule_data['IPProtocol'] = protocol.upper() if address: if not hasattr(address, 'name'): address = self.ex_get_address(address, region) From 125d8cd047bd1d385f0359bb3e61d52acc00f103 Mon Sep 17 00:00:00 2001 From: Evgeny Egorochkin Date: Sat, 20 Sep 2014 02:24:06 +0300 Subject: [PATCH 216/315] GCE: lint Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 30b513974b..5c0035b1fb 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1672,7 +1672,9 @@ def ex_targetpool_add_healthcheck(self, targetpool, healthcheck): if not hasattr(healthcheck, 'name'): healthcheck = self.ex_get_healthcheck(healthcheck) - targetpool_data = {'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] } + targetpool_data = { + 'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] + } request = '/regions/%s/targetPools/%s/addHealthCheck' % ( targetpool.region.name, targetpool.name) @@ -1741,7 +1743,9 @@ def ex_targetpool_remove_healthcheck(self, targetpool, healthcheck): if not hasattr(healthcheck, 'name'): healthcheck = self.ex_get_healthcheck(healthcheck) - targetpool_data = {'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] } + targetpool_data = { + 'healthChecks': [{'healthCheck': healthcheck.extra['selfLink']}] + } request = '/regions/%s/targetPools/%s/removeHealthCheck' % ( targetpool.region.name, targetpool.name) From df18ca88e195ad318f6979bf73d76460ccb6adfa Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 20 Sep 2014 22:07:19 +0200 Subject: [PATCH 217/315] Fix lint. --- libcloud/test/compute/test_gce.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 1912496261..61dab263c3 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -255,7 +255,7 @@ def test_ex_create_forwarding_rule(self): fwr_name = 'lcforwardingrule' targetpool = 'lctargetpool' region = 'us-central1' - port_range='8000-8500' + port_range = '8000-8500' description = 'test forwarding rule' fwr = self.driver.ex_create_forwarding_rule(fwr_name, targetpool, region=region, From 51189eff19c5240aa47b659298b478acfed7b3e2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 20 Sep 2014 22:10:10 +0200 Subject: [PATCH 218/315] Update CHANGES. Closes #360 --- CHANGES.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index e9ac0fd9f6..6898eb77d1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -102,6 +102,11 @@ Compute (LIBCLOUD-589, GITHUB-352) [Matt Baldwin] +- Various improvements and bugs fixes in the GCE driver. For a list, see + https://github.com/apache/libcloud/pull/360/commits + (GITHUB-360) + [Evgeny Egorochkin] + Storage ~~~~~~~ From ad73a0c91920fc04cc27b52e17633334295ae6f9 Mon Sep 17 00:00:00 2001 From: Andy Grimm Date: Mon, 22 Sep 2014 15:22:34 -0400 Subject: [PATCH 219/315] Add virtualization type option for EC2 image registration Closes #361 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/ec2.py | 10 +++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 6898eb77d1..54bf069279 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -107,6 +107,11 @@ Compute (GITHUB-360) [Evgeny Egorochkin] +- Allow user to specify virtualization type when registering an EC2 image by + passing ``virtualization_type`` argument to the ``ex_register_image`` method. + (GITHUB-361) + [Andy Grimm] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 693e5c3e7c..6d98c9a6d5 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2573,7 +2573,7 @@ def delete_image(self, image): def ex_register_image(self, name, description=None, architecture=None, image_location=None, root_device_name=None, block_device_mapping=None, kernel_id=None, - ramdisk_id=None): + ramdisk_id=None, virtualization_type=None): """ Registers an Amazon Machine Image based off of an EBS-backed instance. Can also be used to create images from snapshots. More information @@ -2608,6 +2608,11 @@ def ex_register_image(self, name, description=None, architecture=None, :param ramdisk_id: RAM disk for AMI (optional) :type ramdisk_id: ``str`` + :param virtualization_type: The type of virtualization for the + AMI you are registering, paravirt + or hvm (optional) + :type virtualization_type: ``str`` + :rtype: :class:`NodeImage` """ @@ -2636,6 +2641,9 @@ def ex_register_image(self, name, description=None, architecture=None, if ramdisk_id is not None: params['RamDiskId'] = ramdisk_id + if virtualization_type is not None: + params['VirtualizationType'] = virtualization_type + image = self._to_image( self.connection.request(self.path, params=params).object ) From a6961cef1864c7dc81fd04f76dde13f14991a3fc Mon Sep 17 00:00:00 2001 From: Katriel Traum Date: Thu, 11 Sep 2014 10:15:31 +0300 Subject: [PATCH 220/315] Added ex_create_image for the GCE compute provider [LIBCLOUD-661] Added ex_create_image for the GCE compute provider - Fixes to docstring after pull request review Closes #358 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 ++ libcloud/compute/drivers/gce.py | 61 +++++++++++++++++++++++++++++++ libcloud/test/compute/test_gce.py | 10 ++++- 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 54bf069279..fb424e2ff4 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -112,6 +112,10 @@ Compute (GITHUB-361) [Andy Grimm] +- Add ``ex_create_image`` method to the GCE driver. + (GITHUB-358, LIBCLOUD-611) + [Katriel Traum] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 5c0035b1fb..bbcfbb8f96 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1133,6 +1133,67 @@ def ex_create_forwarding_rule(self, name, targetpool, region=None, return self.ex_get_forwarding_rule(name) + def ex_create_image(self, name, volume, description=None, + use_existing=True, wait_for_completion=True): + """ + Create an image from the provided volume. + + :param name: The name of the image to create. + :type name: ``str`` + + :param volume: The volume to use to create the image, or the + Google Cloud Storage URI + :type volume: ``str`` or :class:`StorageVolume` + + :keyword description: Description of the new Image + :type description: ``str`` + + :keyword use_existing: If True and an image with the given name + already exists, return an object for that + image instead of attempting to create + a new image. + :type use_existing: ``bool`` + + :keyword wait_for_completion: If True, wait until the new image is + created before returning a new NodeImage + Otherwise, return a new NodeImage + instance, and let the user track the + creation progress + :type wait_for_completion: ``bool`` + + :return: A GCENodeImage object for the new image + :rtype: :class:`GCENodeImage` + + """ + image_data = {} + image_data['name'] = name + image_data['description'] = description + if isinstance(volume, StorageVolume): + image_data['sourceDisk'] = volume.extra['selfLink'] + image_data['zone'] = volume.extra['zone'].name + elif isinstance(volume, str) and \ + volume.startswith('https://') and volume.endswith('tar.gz'): + image_data['rawDisk'] = {'source': volume, 'containerType': 'TAR'} + else: + raise ValueError('Source must be instance of StorageVolume or URI') + + request = '/global/images' + + try: + if wait_for_completion: + self.connection.async_request(request, method='POST', + data=image_data) + else: + self.connection.request(request, method='POST', + data=image_data) + + except ResourceExistsError: + e = sys.exc_info()[1] + if not use_existing: + raise e + + return self.ex_get_image(name) + def ex_create_network(self, name, cidr): """ Create a network. diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 61dab263c3..730ef27006 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -27,7 +27,8 @@ GCEAddress, GCEHealthCheck, GCEFirewall, GCEForwardingRule, GCENetwork, - GCEZone) + GCEZone, + GCENodeImage) from libcloud.common.google import (GoogleBaseAuthConnection, GoogleInstalledAppAuthConnection, GoogleBaseConnection, @@ -242,6 +243,13 @@ def test_ex_create_healthcheck(self): self.assertEqual(hc.extra['host'], 'lchost') self.assertEqual(hc.extra['description'], 'test healthcheck') + def test_ex_create_image(self): + volume = self.driver.ex_get_volume('lcdisk') + image = self.driver.ex_create_image('coreos', volume) + self.assertTrue(isinstance(image, GCENodeImage)) + self.assertEquals(image.name, 'coreos') + self.assertEquals(image.extra['description'], 'CoreOS test image') + def test_ex_create_firewall(self): firewall_name = 'lcfirewall' allowed = [{'IPProtocol': 'tcp', 'ports': ['4567']}] From 69f84e61abad3ab2667ed2203471e141cda2aeac Mon Sep 17 00:00:00 2001 From: gigimon Date: Wed, 17 Sep 2014 15:57:30 +0400 Subject: [PATCH 221/315] LIBCLOUD-616: Add some methods to cloudstack driver (create_volume_snapshot, list_snapshots, destroy_volume_snapshot, create_snapshot_template, ex_list_os_types) Signed-off-by: Sebastien Goasguen This closes #363 --- CHANGES.rst | 6 + libcloud/compute/drivers/cloudstack.py | 112 +++++++++++++++++- .../cloudstack/createSnapshot_default.json | 1 + .../cloudstack/createTemplate_default.json | 1 + .../cloudstack/deleteSnapshot_default.json | 1 + .../cloudstack/listOsTypes_default.json | 1 + .../cloudstack/listSnapshots_default.json | 1 + .../queryAsyncJobResult_1300001.json | 1 + .../queryAsyncJobResult_1300002.json | 1 + .../queryAsyncJobResult_1300003.json | 1 + libcloud/test/compute/test_cloudstack.py | 42 +++++++ 11 files changed, 167 insertions(+), 1 deletion(-) create mode 100644 libcloud/test/compute/fixtures/cloudstack/createSnapshot_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/createTemplate_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteSnapshot_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listOsTypes_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listSnapshots_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300001.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300002.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300003.json diff --git a/CHANGES.rst b/CHANGES.rst index fb424e2ff4..372d271a7a 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -116,6 +116,12 @@ Compute (GITHUB-358, LIBCLOUD-611) [Katriel Traum] +- Add some methods to CloudStack driver: + create_volume_snapshot, list_snapshots, destroy_volume_snapshot + create_snapshot_template, ex_list_os_types) + (GITHUB-363, LIBCLOUD-616) + [Oleg Suharev] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 21d2fefe7b..858553db24 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -24,7 +24,7 @@ from libcloud.compute.providers import Provider from libcloud.common.cloudstack import CloudStackDriverMixIn from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation -from libcloud.compute.base import NodeSize, StorageVolume +from libcloud.compute.base import NodeSize, StorageVolume, VolumeSnapshot from libcloud.compute.base import KeyPair from libcloud.compute.types import NodeState, LibcloudError from libcloud.compute.types import KeyPairDoesNotExistError @@ -193,6 +193,10 @@ 'transform_func': int }, 'instance_id': { + 'key_name': 'virtualmachineid', + 'transform_func': str + }, + 'serviceoffering_id': { 'key_name': 'serviceofferingid', 'transform_func': str }, @@ -2253,6 +2257,112 @@ def ex_delete_tags(self, resource_ids, resource_type, tag_keys): return True + def list_snapshots(self): + """ + Describe all snapshots. + + :rtype: ``list`` of :class:`VolumeSnapshot` + """ + snapshots = self._sync_request('listSnapshots', + method='GET') + list_snapshots = [] + + for snap in snapshots['snapshot']: + list_snapshots.append(self._to_snapshot(snap)) + return list_snapshots + + def create_volume_snapshot(self, volume): + """ + Create snapshot from volume + + :param volume: Instance of ``StorageVolume`` + :type volume: ``StorageVolume`` + + :rtype: :class:`VolumeSnapshot` + """ + snapshot = self._async_request(command='createSnapshot', + params={'volumeid': volume.id}, + method='GET') + return self._to_snapshot(snapshot['snapshot']) + + def destroy_volume_snapshot(self, snapshot): + """ + Destroy snapshot + + :param snapshot: Instance of ``VolumeSnapshot`` + :type volume: ``VolumeSnapshot`` + + :rtype: ``bool`` + """ + self._async_request(command='deleteSnapshot', + params={'id': snapshot.id}, + method='GET') + return True + + def ex_create_snapshot_template(self, snapshot, name, ostypeid, + displaytext=None): + """ + Create a template from a snapshot + + :param snapshot: Instance of ``VolumeSnapshot`` + :type volume: ``VolumeSnapshot`` + + :param name: the name of the template + :type name: ``str`` + + :param name: the os type id + :type name: ``str`` + + :param name: the display name of the template + :type name: ``str`` + + :rtype: :class:`NodeImage` + """ + if not displaytext: + displaytext = name + resp = self._async_request(command='createTemplate', + params={ + 'displaytext': displaytext, + 'name': name, + 'ostypeid': ostypeid, + 'snapshotid': snapshot.id}) + img = resp.get('template') + extra = { + 'hypervisor': img['hypervisor'], + 'format': img['format'], + 'os': img['ostypename'], + 'displaytext': img['displaytext'] + } + return NodeImage(id=img['id'], + name=img['name'], + driver=self.connection.driver, + extra=extra) + + def ex_list_os_types(self): + """ + List all registered os types (needed for snapshot creation) + + :rtype: ``list`` + """ + ostypes = self._sync_request('listOsTypes') + return ostypes['ostype'] + + def _to_snapshot(self, data): + """ + Create snapshot object from data + + :param data: Node data object. + :type data: ``dict`` + + :rtype: :class:`VolumeSnapshot` + """ + extra = { + 'tags': data.get('tags', None), + 'name': data.get('name', None), + 'volume_id': data.get('volumeid', None), + } + return VolumeSnapshot(data['id'], driver=self, extra=extra) + def _to_node(self, data, public_ips=None): """ :param data: Node data object. diff --git a/libcloud/test/compute/fixtures/cloudstack/createSnapshot_default.json b/libcloud/test/compute/fixtures/cloudstack/createSnapshot_default.json new file mode 100644 index 0000000000..4dc70bd7e4 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createSnapshot_default.json @@ -0,0 +1 @@ +{ "createsnapshotresponse" : {"jobid":1300001,"id":190547} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/createTemplate_default.json b/libcloud/test/compute/fixtures/cloudstack/createTemplate_default.json new file mode 100644 index 0000000000..3314257f7a --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createTemplate_default.json @@ -0,0 +1 @@ +{ "createtemplateresponse" : {"jobid":1300003,"id":10260} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteSnapshot_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteSnapshot_default.json new file mode 100644 index 0000000000..285e9873f2 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteSnapshot_default.json @@ -0,0 +1 @@ +{ "deletesnapshotresponse" : {"jobid":1300002} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listOsTypes_default.json b/libcloud/test/compute/fixtures/cloudstack/listOsTypes_default.json new file mode 100644 index 0000000000..e237a334dc --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listOsTypes_default.json @@ -0,0 +1 @@ +{ "listostypesresponse" : { "count":146 ,"ostype" : [ {"id":69,"oscategoryid":7,"description":"Asianux 3(32-bit)"}, {"id":70,"oscategoryid":7,"description":"Asianux 3(64-bit)"}, {"id":1,"oscategoryid":1,"description":"CentOS 4.5 (32-bit)"}, {"id":2,"oscategoryid":1,"description":"CentOS 4.6 (32-bit)"}, {"id":3,"oscategoryid":1,"description":"CentOS 4.7 (32-bit)"}, {"id":4,"oscategoryid":1,"description":"CentOS 4.8 (32-bit)"}, {"id":5,"oscategoryid":1,"description":"CentOS 5.0 (32-bit)"}, {"id":6,"oscategoryid":1,"description":"CentOS 5.0 (64-bit)"}, {"id":7,"oscategoryid":1,"description":"CentOS 5.1 (32-bit)"}, {"id":8,"oscategoryid":1,"description":"CentOS 5.1 (64-bit)"}, {"id":9,"oscategoryid":1,"description":"CentOS 5.2 (32-bit)"}, {"id":10,"oscategoryid":1,"description":"CentOS 5.2 (64-bit)"}, {"id":11,"oscategoryid":1,"description":"CentOS 5.3 (32-bit)"}, {"id":12,"oscategoryid":1,"description":"CentOS 5.3 (64-bit)"}, {"id":13,"oscategoryid":1,"description":"CentOS 5.4 (32-bit)"}, {"id":14,"oscategoryid":1,"description":"CentOS 5.4 (64-bit)"}, {"id":111,"oscategoryid":1,"description":"CentOS 5.5 (32-bit)"}, {"id":112,"oscategoryid":1,"description":"CentOS 5.5 (64-bit)"}, {"id":73,"oscategoryid":2,"description":"Debian GNU/Linux 4(32-bit)"}, {"id":74,"oscategoryid":2,"description":"Debian GNU/Linux 4(64-bit)"}, {"id":72,"oscategoryid":2,"description":"Debian GNU/Linux 5(64-bit)"}, {"id":15,"oscategoryid":2,"description":"Debian GNU/Linux 5.0 (32-bit)"}, {"id":132,"oscategoryid":2,"description":"Debian GNU/Linux 6(32-bit)"}, {"id":133,"oscategoryid":2,"description":"Debian GNU/Linux 6(64-bit)"}, {"id":102,"oscategoryid":6,"description":"DOS"}, {"id":118,"oscategoryid":4,"description":"Fedora 10"}, {"id":117,"oscategoryid":4,"description":"Fedora 11"}, {"id":116,"oscategoryid":4,"description":"Fedora 12"}, {"id":115,"oscategoryid":4,"description":"Fedora 13"}, {"id":120,"oscategoryid":4,"description":"Fedora 8"}, {"id":119,"oscategoryid":4,"description":"Fedora 9"}, {"id":83,"oscategoryid":9,"description":"FreeBSD (32-bit)"}, {"id":84,"oscategoryid":9,"description":"FreeBSD (64-bit)"}, {"id":92,"oscategoryid":6,"description":"Microsoft Small Bussiness Server 2003"}, {"id":138,"oscategoryid":7,"description":"None"}, {"id":78,"oscategoryid":8,"description":"Novell Netware 5.1"}, {"id":77,"oscategoryid":8,"description":"Novell Netware 6.x"}, {"id":68,"oscategoryid":7,"description":"Open Enterprise Server"}, {"id":16,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.0 (32-bit)"}, {"id":17,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.0 (64-bit)"}, {"id":18,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.1 (32-bit)"}, {"id":19,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.1 (64-bit)"}, {"id":20,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.2 (32-bit)"}, {"id":21,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.2 (64-bit)"}, {"id":22,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.3 (32-bit)"}, {"id":23,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.3 (64-bit)"}, {"id":24,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.4 (32-bit)"}, {"id":25,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.4 (64-bit)"}, {"id":134,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.5 (32-bit)"}, {"id":135,"oscategoryid":3,"description":"Oracle Enterprise Linux 5.5 (64-bit)"}, {"id":104,"oscategoryid":7,"description":"OS/2"}, {"id":60,"oscategoryid":7,"description":"Other (32-bit)"}, {"id":103,"oscategoryid":7,"description":"Other (64-bit)"}, {"id":75,"oscategoryid":7,"description":"Other 2.6x Linux (32-bit)"}, {"id":76,"oscategoryid":7,"description":"Other 2.6x Linux (64-bit)"}, {"id":200,"oscategoryid":1,"description":"Other CentOS (32-bit)"}, {"id":201,"oscategoryid":1,"description":"Other CentOS (64-bit)"}, {"id":98,"oscategoryid":7,"description":"Other Linux (32-bit)"}, {"id":99,"oscategoryid":7,"description":"Other Linux (64-bit)"}, {"id":139,"oscategoryid":7,"description":"Other PV (32-bit)"}, {"id":140,"oscategoryid":7,"description":"Other PV (64-bit)"}, {"id":202,"oscategoryid":5,"description":"Other SUSE Linux(32-bit)"}, {"id":203,"oscategoryid":5,"description":"Other SUSE Linux(64-bit)"}, {"id":59,"oscategoryid":10,"description":"Other Ubuntu (32-bit)"}, {"id":100,"oscategoryid":10,"description":"Other Ubuntu (64-bit)"}, {"id":131,"oscategoryid":10,"description":"Red Hat Enterprise Linux 2"}, {"id":66,"oscategoryid":4,"description":"Red Hat Enterprise Linux 3(32-bit)"}, {"id":67,"oscategoryid":4,"description":"Red Hat Enterprise Linux 3(64-bit)"}, {"id":106,"oscategoryid":4,"description":"Red Hat Enterprise Linux 4(64-bit)"}, {"id":26,"oscategoryid":4,"description":"Red Hat Enterprise Linux 4.5 (32-bit)"}, {"id":27,"oscategoryid":4,"description":"Red Hat Enterprise Linux 4.6 (32-bit)"}, {"id":28,"oscategoryid":4,"description":"Red Hat Enterprise Linux 4.7 (32-bit)"}, {"id":29,"oscategoryid":4,"description":"Red Hat Enterprise Linux 4.8 (32-bit)"}, {"id":30,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.0 (32-bit)"}, {"id":31,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.0 (64-bit)"}, {"id":32,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.1 (32-bit)"}, {"id":33,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.1 (64-bit)"}, {"id":34,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.2 (32-bit)"}, {"id":35,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.2 (64-bit)"}, {"id":36,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.3 (32-bit)"}, {"id":37,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.3 (64-bit)"}, {"id":38,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.4 (32-bit)"}, {"id":39,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.4 (64-bit)"}, {"id":113,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.5 (32-bit)"}, {"id":114,"oscategoryid":4,"description":"Red Hat Enterprise Linux 5.5 (64-bit)"}, {"id":136,"oscategoryid":4,"description":"Red Hat Enterprise Linux 6.0 (32-bit)"}, {"id":137,"oscategoryid":4,"description":"Red Hat Enterprise Linux 6.0 (64-bit)"}, {"id":85,"oscategoryid":9,"description":"SCO OpenServer 5"}, {"id":86,"oscategoryid":9,"description":"SCO UnixWare 7"}, {"id":79,"oscategoryid":9,"description":"Sun Solaris 10(32-bit)"}, {"id":80,"oscategoryid":9,"description":"Sun Solaris 10(64-bit)"}, {"id":142,"oscategoryid":9,"description":"Sun Solaris 11 (32-bit)"}, {"id":141,"oscategoryid":9,"description":"Sun Solaris 11 (64-bit)"}, {"id":82,"oscategoryid":9,"description":"Sun Solaris 8(Experimental)"}, {"id":81,"oscategoryid":9,"description":"Sun Solaris 9(Experimental)"}, {"id":109,"oscategoryid":5,"description":"SUSE Linux Enterprise 10(32-bit)"}, {"id":110,"oscategoryid":5,"description":"SUSE Linux Enterprise 10(64-bit)"}, {"id":96,"oscategoryid":5,"description":"SUSE Linux Enterprise 8(32-bit)"}, {"id":97,"oscategoryid":5,"description":"SUSE Linux Enterprise 8(64-bit)"}, {"id":107,"oscategoryid":5,"description":"SUSE Linux Enterprise 9(32-bit)"}, {"id":108,"oscategoryid":5,"description":"SUSE Linux Enterprise 9(64-bit)"}, {"id":41,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 10 SP1 (32-bit)"}, {"id":42,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 10 SP1 (64-bit)"}, {"id":43,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 10 SP2 (32-bit)"}, {"id":44,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 10 SP2 (64-bit)"}, {"id":45,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 10 SP3 (64-bit)"}, {"id":46,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 11 (32-bit)"}, {"id":47,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 11 (64-bit)"}, {"id":40,"oscategoryid":5,"description":"SUSE Linux Enterprise Server 9 SP4 (32-bit)"}, {"id":121,"oscategoryid":10,"description":"Ubuntu 10.04 (32-bit)"}, {"id":126,"oscategoryid":10,"description":"Ubuntu 10.04 (64-bit)"}, {"id":125,"oscategoryid":10,"description":"Ubuntu 8.04 (32-bit)"}, {"id":130,"oscategoryid":10,"description":"Ubuntu 8.04 (64-bit)"}, {"id":124,"oscategoryid":10,"description":"Ubuntu 8.10 (32-bit)"}, {"id":129,"oscategoryid":10,"description":"Ubuntu 8.10 (64-bit)"}, {"id":123,"oscategoryid":10,"description":"Ubuntu 9.04 (32-bit)"}, {"id":128,"oscategoryid":10,"description":"Ubuntu 9.04 (64-bit)"}, {"id":122,"oscategoryid":10,"description":"Ubuntu 9.10 (32-bit)"}, {"id":127,"oscategoryid":10,"description":"Ubuntu 9.10 (64-bit)"}, {"id":95,"oscategoryid":6,"description":"Windows 2000 Advanced Server"}, {"id":105,"oscategoryid":6,"description":"Windows 2000 Professional"}, {"id":61,"oscategoryid":6,"description":"Windows 2000 Server"}, {"id":55,"oscategoryid":6,"description":"Windows 2000 Server SP4 (32-bit)"}, {"id":65,"oscategoryid":6,"description":"Windows 3.1"}, {"id":48,"oscategoryid":6,"description":"Windows 7 (32-bit)"}, {"id":49,"oscategoryid":6,"description":"Windows 7 (64-bit)"}, {"id":63,"oscategoryid":6,"description":"Windows 95"}, {"id":62,"oscategoryid":6,"description":"Windows 98"}, {"id":64,"oscategoryid":6,"description":"Windows NT 4"}, {"id":204,"oscategoryid":6,"description":"Windows PV"}, {"id":87,"oscategoryid":6,"description":"Windows Server 2003 DataCenter Edition(32-bit)"}, {"id":88,"oscategoryid":6,"description":"Windows Server 2003 DataCenter Edition(64-bit)"}, {"id":50,"oscategoryid":6,"description":"Windows Server 2003 Enterprise Edition(32-bit)"}, {"id":51,"oscategoryid":6,"description":"Windows Server 2003 Enterprise Edition(64-bit)"}, {"id":89,"oscategoryid":6,"description":"Windows Server 2003 Standard Edition(32-bit)"}, {"id":90,"oscategoryid":6,"description":"Windows Server 2003 Standard Edition(64-bit)"}, {"id":91,"oscategoryid":6,"description":"Windows Server 2003 Web Edition"}, {"id":52,"oscategoryid":6,"description":"Windows Server 2008 (32-bit)"}, {"id":53,"oscategoryid":6,"description":"Windows Server 2008 (64-bit)"}, {"id":54,"oscategoryid":6,"description":"Windows Server 2008 R2 (64-bit)"}, {"id":56,"oscategoryid":6,"description":"Windows Vista (32-bit)"}, {"id":101,"oscategoryid":6,"description":"Windows Vista (64-bit)"}, {"id":93,"oscategoryid":6,"description":"Windows XP (32-bit)"}, {"id":94,"oscategoryid":6,"description":"Windows XP (64-bit)"}, {"id":57,"oscategoryid":6,"description":"Windows XP SP2 (32-bit)"}, {"id":58,"oscategoryid":6,"description":"Windows XP SP3 (32-bit)"} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listSnapshots_default.json b/libcloud/test/compute/fixtures/cloudstack/listSnapshots_default.json new file mode 100644 index 0000000000..d86f22b97a --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listSnapshots_default.json @@ -0,0 +1 @@ +{"listsnapshotsresponse" : { "count":3 ,"snapshot" : [ {"id":188402,"account":"User1","domainid":2345,"domain":"70000001000","snapshottype":"MANUAL","volumeid":89341,"volumename":"ROOT-12344","volumetype":"ROOT","created":"2014-09-17T19:55:48+0900","name":"i-123-87654-VM_ROOT-12344_20140917105548","intervaltype":"MANUAL","state":"BackedUp"}, {"id":188401,"account":"User1","domainid":2345,"domain":"70000001100","snapshottype":"MANUAL","volumeid":89342,"volumename":"ROOT-12345","volumetype":"ROOT","created":"2014-09-17T19:44:41+0900","name":"i-123-87654-VM_ROOT-12345_20140917104441","intervaltype":"MANUAL","state":"BackedUp"}, {"id":188400,"account":"User1","domainid":2345,"domain":"70000001100","snapshottype":"MANUAL","volumeid":89343,"volumename":"ROOT-71128","volumetype":"ROOT","created":"2014-09-17T19:34:58+0900","name":"i-882-71128-VM_ROOT-71128_20140917103458","intervaltype":"MANUAL","state":"BackedUp"}]}} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300001.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300001.json new file mode 100644 index 0000000000..076fd61b04 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300001.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":1300001,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"snapshot":{"id":190547,"account":"User1","domainid":2345,"domain":"70000001000","snapshottype":"MANUAL","volumeid":"fe1ada16-57a0-40ae-b577-01a153690fb4","volumename":"ROOT-12345","volumetype":"ROOT","created":"2014-09-22T19:09:47+0900","name":"i-123-87654-VM_ROOT-23456_20140917105548","intervaltype":"MANUAL","state":"CreatedOnPrimary"}}} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300002.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300002.json new file mode 100644 index 0000000000..c812b7b235 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300002.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":1300002,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true}} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300003.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300003.json new file mode 100644 index 0000000000..2ae11994a1 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_1300003.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":1300003,"jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"template":{"id":10260,"name":"test-libcloud-template","displaytext":"test-libcloud-template","ispublic":false,"created":"2014-09-22T19:14:28+0900","isready":true,"passwordenabled":false,"format":"OVA","isfeatured":false,"crossZones":false,"ostypeid":99,"ostypename":"Other Linux (64-bit)","account":"User1","zoneid":2,"zonename":"zone1","status":"Download Complete","size":16106127360,"templatetype":"USER","hypervisor":"VMware","domain":"70000001100","domainid":1105,"isextractable":true,"checksum":"d8fc299515d138b13680014d05031d8b","sourcetemplateid":10259}}} } \ No newline at end of file diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 3393c0cef4..00ba306cf6 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -706,6 +706,48 @@ def test_ex_delete_tags(self): resp = self.driver.ex_delete_tags([node.id], 'UserVm', tag_keys) self.assertTrue(resp) + def test_list_snapshots(self): + snapshots = self.driver.list_snapshots() + self.assertEqual(len(snapshots), 3) + + snap = snapshots[0] + self.assertEqual(snap.id, 188402) + self.assertEqual(snap.extra['name'], "i-123-87654-VM_ROOT-12344_20140917105548") + self.assertEqual(snap.extra['volume_id'], 89341) + + def test_create_volume_snapshot(self): + volume = self.driver.list_volumes()[0] + snapshot = self.driver.create_volume_snapshot(volume) + + self.assertEqual(snapshot.id, 190547) + self.assertEqual(snapshot.extra['name'], "i-123-87654-VM_ROOT-23456_20140917105548") + self.assertEqual(snapshot.extra['volume_id'], "fe1ada16-57a0-40ae-b577-01a153690fb4") + + def test_destroy_volume_snapshot(self): + snapshot = self.driver.list_snapshots()[0] + resp = self.driver.destroy_volume_snapshot(snapshot) + self.assertTrue(resp) + + def test_ex_create_snapshot_template(self): + snapshot = self.driver.list_snapshots()[0] + + template = self.driver.ex_create_snapshot_template(snapshot, "test-libcloud-template", 99) + + self.assertEqual(template.id, '10260') + self.assertEqual(template.name, "test-libcloud-template") + self.assertEqual(template.extra['displaytext'], "test-libcloud-template") + self.assertEqual(template.extra['hypervisor'], "VMware") + self.assertEqual(template.extra['os'], "Other Linux (64-bit)") + + def test_ex_list_os_types(self): + os_types = self.driver.ex_list_os_types() + + self.assertEqual(len(os_types), 146) + + self.assertEqual(os_types[0]['id'], 69) + self.assertEqual(os_types[0]['oscategoryid'], 7) + self.assertEqual(os_types[0]['description'], "Asianux 3(32-bit)") + class CloudStackTestCase(CloudStackCommonTestCase, unittest.TestCase): def test_driver_instantiation(self): From d608085e3da79240b2b56709ef6adb58bd9c9dab Mon Sep 17 00:00:00 2001 From: Jeroen de Korte Date: Thu, 25 Sep 2014 15:27:20 +0200 Subject: [PATCH 222/315] Cloudstack - Added VPC support and Egress Firewall rule support Signed-off-by: Sebastien Goasguen This closes: #364 --- CHANGES.rst | 4 + libcloud/compute/drivers/cloudstack.py | 350 +++++++++++++++++- .../createEgressFirewallRule_default.json | 1 + .../cloudstack/createVPC_default.json | 1 + .../deleteEgressFirewallRule_default.json | 1 + .../cloudstack/deleteVPC_default.json | 1 + .../listEgressFirewallRules_default.json | 1 + .../cloudstack/listVPCOfferings_default.json | 1 + .../fixtures/cloudstack/listVPCs_default.json | 1 + .../queryAsyncJobResult_deleteVPC.json | 1 + libcloud/test/compute/test_cloudstack.py | 90 +++++ 11 files changed, 451 insertions(+), 1 deletion(-) create mode 100644 libcloud/test/compute/fixtures/cloudstack/createEgressFirewallRule_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/createVPC_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteEgressFirewallRule_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/deleteVPC_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listEgressFirewallRules_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listVPCOfferings_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listVPCs_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVPC.json diff --git a/CHANGES.rst b/CHANGES.rst index 372d271a7a..fba5f48ffd 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -122,6 +122,10 @@ Compute (GITHUB-363, LIBCLOUD-616) [Oleg Suharev] +- Added VPC support and Egress Firewall rule support fo CloudStack + (GITHUB-363) + [Jeroen de Korte] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 858553db24..0129e0772d 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -217,6 +217,40 @@ 'transform_func': str } }, + 'vpc': { + 'created': { + 'key_name': 'created', + 'transform_func': str + }, + 'domain': { + 'key_name': 'domain', + 'transform_func': str + }, + 'domain_id': { + 'key_name': 'domainid', + 'transform_func': int + }, + 'network_domain': { + 'key_name': 'networkdomain', + 'transform_func': str + }, + 'state': { + 'key_name': 'state', + 'transform_func': str + }, + 'vpc_offering_id': { + 'key_name': 'vpcofferingid', + 'transform_func': str + }, + 'zone_name': { + 'key_name': 'zonename', + 'transform_func': str + }, + 'zone_id': { + 'key_name': 'zoneid', + 'transform_func': str + } + }, 'project': { 'account': {'key_name': 'account', 'transform_func': str}, 'cpuavailable': {'key_name': 'cpuavailable', 'transform_func': int}, @@ -426,6 +460,62 @@ def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id +class CloudStackEgressFirewallRule(object): + """ + A egress firewall rule. + """ + + def __init__(self, id, network_id, cidr_list, protocol, + icmp_code=None, icmp_type=None, + start_port=None, end_port=None): + + """ + A egress firewall rule. + + @note: This is a non-standard extension API, and only works for + CloudStack. + + :param id: Firewall Rule ID + :type id: ``int`` + + :param network_id: the id network network for the egress firwall + services + :type network_id: ``str`` + + :param protocol: TCP/IP Protocol (TCP, UDP) + :type protocol: ``str`` + + :param cidr_list: cidr list + :type cidr_list: ``str`` + + :param icmp_code: Error code for this icmp message + :type icmp_code: ``int`` + + :param icmp_type: Type of the icmp message being sent + :type icmp_type: ``int`` + + :param start_port: start of port range + :type start_port: ``int`` + + :param end_port: end of port range + :type end_port: ``int`` + + :rtype: :class:`CloudStackEgressFirewallRule` + """ + + self.id = id + self.network_id = network_id + self.cidr_list = cidr_list + self.protocol = protocol + self.icmp_code = icmp_code + self.icmp_type = icmp_type + self.start_port = start_port + self.end_port = end_port + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + class CloudStackIPForwardingRule(object): """ A NAT/firewall forwarding rule. @@ -564,6 +654,51 @@ def __repr__(self): self.driver.name)) +class CloudStackVPC(object): + """ + Class representing a CloudStack VPC. + """ + + def __init__(self, display_text, name, vpc_offering_id, id, zone_id, cidr, + driver, extra=None): + self.display_text = display_text + self.name = name + self.vpc_offering_id = vpc_offering_id + self.id = id + self.zone_id = zone_id + self.cidr = cidr + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.displaytext, self.name, + self.vpc_offering_id, self.zoneid, self.cidr, + self.driver.name)) + + +class CloudStackVPCOffering(object): + """ + Class representing a CloudStack VPC Offering. + """ + + def __init__(self, name, display_text, id, + driver, extra=None): + self.name = name + self.display_text = display_text + self.id = id + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.name, self.display_text, + self.driver.name)) + + class CloudStackProject(object): """ Class representing a CloudStack Project. @@ -1132,6 +1267,125 @@ def ex_delete_network(self, network, force=None): method='GET') return True + def ex_list_vpc_offerings(self): + """ + List the available vpc offerings + + :rtype ``list`` of :class:`CloudStackVPCOffering` + """ + res = self._sync_request(command='listVPCOfferings', + method='GET') + vpcoffers = res.get('vpcoffering', []) + + vpcofferings = [] + + for vpcoffer in vpcoffers: + vpcofferings.append(CloudStackVPCOffering( + vpcoffer['name'], + vpcoffer['displaytext'], + vpcoffer['id'], + self)) + + return vpcofferings + + def ex_list_vpcs(self): + """ + List the available VPCs + + :rtype ``list`` of :class:`CloudStackVPC` + """ + + res = self._sync_request(command='listVPCs', + method='GET') + vpcs = res.get('vpc', []) + + networks = [] + for vpc in vpcs: + + networks.append(CloudStackVPC( + vpc['displaytext'], + vpc['name'], + vpc['vpcofferingid'], + vpc['id'], + vpc['zoneid'], + vpc['cidr'], + self)) + + return networks + + def ex_create_vpc(self, cidr, display_text, name, vpc_offering, + zoneid): + """ + + Creates a VPC, only available in advanced zones. + + :param cidr: the cidr of the VPC. All VPC guest networks' cidrs + should be within this CIDR + + :type display_text: ``str`` + + :param display_text: the display text of the VPC + :type display_text: ``str`` + + :param name: the name of the VPC + :type name: ``str`` + + :param vpc_offering: the ID of the VPC offering + :type vpc_offering: :class:'CloudStackVPCOffering` + + :param zoneid: the ID of the availability zone + :type zoneid: ``str`` + + :rtype: :class:`CloudStackVPC` + + """ + + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['vpc'] + + args = { + 'cidr': cidr, + 'displaytext': display_text, + 'name': name, + 'vpcofferingid': vpc_offering.id, + 'zoneid': zoneid, + } + + result = self._sync_request(command='createVPC', + params=args, + method='GET') + + extra = self._get_extra_dict(result, extra_map) + + vpc = CloudStackVPC(display_text, + name, + vpc_offering.id, + result['id'], + zoneid, + cidr, + self, + extra=extra) + + return vpc + + def ex_delete_vpc(self, vpc): + """ + + Deletes a VPC, only available in advanced zones. + + :param vpc: The VPC + :type vpc: :class: 'CloudStackVPC' + + :rtype: ``bool`` + + """ + + args = {'id': vpc.id} + + self._async_request(command='deleteVPC', + params=args, + method='GET') + return True + def ex_list_projects(self): """ List the available projects @@ -1431,7 +1685,8 @@ def ex_list_public_ips(self): ips.append(CloudStackAddress(ip['id'], ip['ipaddress'], self, - ip['associatednetworkid'])) + ip.get('associatednetworkid', []))) + return ips def ex_allocate_public_ip(self, location=None): @@ -1562,6 +1817,99 @@ def ex_delete_firewall_rule(self, firewall_rule): method='GET') return res['success'] + def ex_list_egress_firewall_rules(self): + """ + Lists all agress Firewall Rules + + :rtype: ``list`` of :class:`CloudStackEgressFirewallRule` + """ + rules = [] + result = self._sync_request(command='listEgressFirewallRules', + method='GET') + for rule in result['firewallrule']: + rules.append(CloudStackEgressFirewallRule(rule['id'], + rule['networkid'], + rule['cidrlist'], + rule['protocol'], + rule.get('icmpcode'), + rule.get('icmptype'), + rule.get('startport'), + rule.get('endport'))) + + return rules + + def ex_create_egress_firewall_rule(self, network_id, cidr_list, protocol, + icmp_code=None, icmp_type=None, + start_port=None, end_port=None): + """ + Creates a Firewalle Rule + + :param network_id: the id network network for the egress firwall + services + :type network_id: ``str`` + + :param cidr_list: cidr list + :type cidr_list: ``str`` + + :param protocol: TCP/IP Protocol (TCP, UDP) + :type protocol: ``str`` + + :param icmp_code: Error code for this icmp message + :type icmp_code: ``int`` + + :param icmp_type: Type of the icmp message being sent + :type icmp_type: ``int`` + + :param start_port: start of port range + :type start_port: ``int`` + + :param end_port: end of port range + :type end_port: ``int`` + + :rtype: :class:`CloudStackEgressFirewallRule` + """ + args = { + 'networkid': network_id, + 'cidrlist': cidr_list, + 'protocol': protocol + } + if icmp_code is not None: + args['icmpcode'] = int(icmp_code) + if icmp_type is not None: + args['icmptype'] = int(icmp_type) + if start_port is not None: + args['startport'] = int(start_port) + if end_port is not None: + args['endport'] = int(end_port) + + result = self._async_request(command='createEgressFirewallRule', + params=args, + method='GET') + + rule = CloudStackEgressFirewallRule(result['firewallrule']['id'], + network_id, + cidr_list, + protocol, + icmp_code, + icmp_type, + start_port, + end_port) + return rule + + def ex_delete_egress_firewall_rule(self, firewall_rule): + """ + Remove a Firewall rule. + + :param egress_firewall_rule: Firewall rule which should be used + :type egress_firewall_rule: :class:`CloudStackEgressFirewallRule` + + :rtype: ``bool`` + """ + res = self._async_request(command='deleteEgressFirewallRule', + params={'id': firewall_rule.id}, + method='GET') + return res['success'] + def ex_list_port_forwarding_rules(self): """ Lists all Port Forwarding Rules diff --git a/libcloud/test/compute/fixtures/cloudstack/createEgressFirewallRule_default.json b/libcloud/test/compute/fixtures/cloudstack/createEgressFirewallRule_default.json new file mode 100644 index 0000000000..104b80a09a --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createEgressFirewallRule_default.json @@ -0,0 +1 @@ +{ "createegressfirewallruleresponse" : {"jobid":1149341,"id":172465} } diff --git a/libcloud/test/compute/fixtures/cloudstack/createVPC_default.json b/libcloud/test/compute/fixtures/cloudstack/createVPC_default.json new file mode 100644 index 0000000000..e1c2bedb42 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createVPC_default.json @@ -0,0 +1 @@ +{ "createvpcresponse" : {"id":"c78499e1-b3a2-4a2a-9759-c2bcb1b79cd4","jobid":"f618f672-c714-4031-8c79-bb1300a2163c"} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteEgressFirewallRule_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteEgressFirewallRule_default.json new file mode 100644 index 0000000000..3283e5509e --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteEgressFirewallRule_default.json @@ -0,0 +1 @@ +{ "deleteegressfirewallruleresponse" : {"jobid":1149342} } diff --git a/libcloud/test/compute/fixtures/cloudstack/deleteVPC_default.json b/libcloud/test/compute/fixtures/cloudstack/deleteVPC_default.json new file mode 100644 index 0000000000..300c113c0e --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deleteVPC_default.json @@ -0,0 +1 @@ +{ "deletevpcresponse" : {"jobid":"deleteVPC"} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listEgressFirewallRules_default.json b/libcloud/test/compute/fixtures/cloudstack/listEgressFirewallRules_default.json new file mode 100644 index 0000000000..d51bba7990 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listEgressFirewallRules_default.json @@ -0,0 +1 @@ +{ "listegressfirewallrulesresponse" : { "count":1 ,"firewallrule" : [ {"id":"7d4e2924-49b6-4a5a-9a98-69f2f0f73c69","protocol":"tcp","startport":"80","endport":"80","networkid":"874be2ca-20a7-4360-80e9-7356c0018c0b","state":"Active","cidrlist":"192.168.0.0/16","tags":[]} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listVPCOfferings_default.json b/libcloud/test/compute/fixtures/cloudstack/listVPCOfferings_default.json new file mode 100644 index 0000000000..368a143a43 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listVPCOfferings_default.json @@ -0,0 +1 @@ +{ "listvpcofferingsresponse" : { "count":1 ,"vpcoffering" : [ {"id":"cd7dbd68-4333-4507-b80d-9840cab32841","name":"Default VPC offering with Netscaler","displaytext":"Default VPC offering with Netscaler","isdefault":false,"state":"Enabled","service":[{"name":"Vpn","provider":[{"name":"VpcVirtualRouter"}]},{"name":"NetworkACL","provider":[{"name":"VpcVirtualRouter"}]},{"name":"PortForwarding","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Dns","provider":[{"name":"VpcVirtualRouter"}]},{"name":"UserData","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Lb","provider":[{"name":"Netscaler"},{"name":"InternalLbVm"}]},{"name":"SourceNat","provider":[{"name":"VpcVirtualRouter"}]},{"name":"StaticNat","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Dhcp","provider":[{"name":"VpcVirtualRouter"}]}]} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listVPCs_default.json b/libcloud/test/compute/fixtures/cloudstack/listVPCs_default.json new file mode 100644 index 0000000000..d349e1885f --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listVPCs_default.json @@ -0,0 +1 @@ +{ "listvpcsresponse" : { "count":1 ,"vpc" : [ {"id":"6adc8ad1-a8a1-4a4e-af75-2c3643297041","name":"test","displaytext":"test","state":"Enabled","zoneid":"2","zonename":"TEST-DC-1","service":[{"name":"Vpn","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Connectivity","provider":[{"name":"NiciraNvp"}]},{"name":"NetworkACL","provider":[{"name":"VpcVirtualRouter"}]},{"name":"PortForwarding","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Dns","provider":[{"name":"VpcVirtualRouter"}]},{"name":"UserData","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Lb","provider":[{"name":"VpcVirtualRouter"},{"name":"InternalLbVm"}]},{"name":"SourceNat","provider":[{"name":"VpcVirtualRouter"}]},{"name":"StaticNat","provider":[{"name":"VpcVirtualRouter"}]},{"name":"Dhcp","provider":[{"name":"VpcVirtualRouter"}]}],"cidr":"10.1.1.0/16","vpcofferingid":"ef58092b-8547-4d41-8dc3-cdaa471e12b1","account":"test_admin","domainid":"ee90435a-bba8-427f-90f9-02f9bf9e03aa","domain":"test","network":[],"restartrequired":false,"networkdomain":"test.local","tags":[]} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVPC.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVPC.json new file mode 100644 index 0000000000..b4a3041eae --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deleteVPC.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"5d7d4c5e-7e0b-4ac2-8550-713180d8a342","userid":"5fb4b286-ac58-44a7-acae-d47dbbac78d1","cmd":"org.apache.cloudstack.api.command.user.vpc.DeleteVPCCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"success":true},"created":"2014-09-25T00:11:31+0200","jobid":"cfa5c4f5-d312-4b18-9197-385ef169726e"} } \ No newline at end of file diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 00ba306cf6..cd72b2b2c3 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -295,6 +295,58 @@ def test_ex_delete_network(self): result = self.driver.ex_delete_network(network=network) self.assertTrue(result) + def test_ex_list_vpc_offerings(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listVPCOfferings_default.json') + fixture_vpcoffers = \ + fixture['listvpcofferingsresponse']['vpcoffering'] + + vpcoffers = self.driver.ex_list_vpc_offerings() + + for i, vpcoffer in enumerate(vpcoffers): + self.assertEqual(vpcoffer.id, fixture_vpcoffers[i]['id']) + self.assertEqual(vpcoffer.name, + fixture_vpcoffers[i]['name']) + self.assertEqual(vpcoffer.display_text, + fixture_vpcoffers[i]['displaytext']) + + def test_ex_list_vpcs(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listVPCs_default.json') + fixture_vpcs = fixture['listvpcsresponse']['vpc'] + + vpcs = self.driver.ex_list_vpcs() + + for i, vpc in enumerate(vpcs): + self.assertEqual(vpc.id, fixture_vpcs[i]['id']) + self.assertEqual(vpc.display_text, fixture_vpcs[i]['displaytext']) + self.assertEqual(vpc.name, fixture_vpcs[i]['name']) + self.assertEqual(vpc.vpc_offering_id, + fixture_vpcs[i]['vpcofferingid']) + self.assertEqual(vpc.zone_id, fixture_vpcs[i]['zoneid']) + + def test_ex_create_vpc(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'createVPC_default.json') + + fixture_vpc = fixture['createvpcresponse'] + + vpcoffer = self.driver.ex_list_vpc_offerings()[0] + vpc = self.driver.ex_create_vpc(cidr='10.1.1.0/16', + display_text='cloud.local', + name='cloud.local', + vpc_offering=vpcoffer, + zoneid="2") + + self.assertEqual(vpc.id, fixture_vpc['id']) + + def test_ex_delete_vpc(self): + + vpc = self.driver.ex_list_vpcs()[0] + + result = self.driver.ex_delete_vpc(vpc=vpc) + self.assertTrue(result) + def test_ex_list_projects(self): _, fixture = CloudStackMockHttp()._load_fixture( 'listProjects_default.json') @@ -634,6 +686,44 @@ def test_ex_create_firewall_rule_icmp(self): self.assertIsNone(rule.start_port) self.assertIsNone(rule.end_port) + def test_ex_list_egress_firewall_rules(self): + rules = self.driver.ex_list_egress_firewall_rules() + self.assertEqual(len(rules), 1) + rule = rules[0] + self.assertEqual(rule.network_id, '874be2ca-20a7-4360-80e9-7356c0018c0b') + self.assertEqual(rule.cidr_list, '192.168.0.0/16') + self.assertEqual(rule.protocol, 'tcp') + self.assertIsNone(rule.icmp_code) + self.assertIsNone(rule.icmp_type) + self.assertEqual(rule.start_port, '80') + self.assertEqual(rule.end_port, '80') + + def test_ex_delete_egress_firewall_rule(self): + rules = self.driver.ex_list_egress_firewall_rules() + res = self.driver.ex_delete_egress_firewall_rule(rules[0]) + self.assertTrue(res) + + def test_ex_create_egress_firewall_rule(self): + network_id = '874be2ca-20a7-4360-80e9-7356c0018c0b' + cidr_list = '192.168.0.0/16' + protocol = 'TCP' + start_port = 33 + end_port = 34 + rule = self.driver.ex_create_egress_firewall_rule( + network_id, + cidr_list, + protocol, + start_port=start_port, + end_port=end_port) + + self.assertEqual(rule.network_id, network_id) + self.assertEqual(rule.cidr_list, cidr_list) + self.assertEqual(rule.protocol, protocol) + self.assertIsNone(rule.icmp_code) + self.assertIsNone(rule.icmp_type) + self.assertEqual(rule.start_port, start_port) + self.assertEqual(rule.end_port, end_port) + def test_ex_list_port_forwarding_rules(self): rules = self.driver.ex_list_port_forwarding_rules() self.assertEqual(len(rules), 1) From 4edca4b16ad4e216431ace2a65da222b5b9679f3 Mon Sep 17 00:00:00 2001 From: Gertjan Oude Lohuis Date: Thu, 25 Sep 2014 11:23:50 +0200 Subject: [PATCH 223/315] Add extra data from openstack volume to object Closes #366 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/openstack.py | 5 +++++ libcloud/test/compute/test_openstack.py | 30 +++++++++++++++++++------ 3 files changed, 33 insertions(+), 7 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index fba5f48ffd..15454fd7b4 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -126,6 +126,11 @@ Compute (GITHUB-363) [Jeroen de Korte] +- Add additional attributes to the ``extra`` dictionary of OpenStack + StorageVolume object. + (GITHUB-366) + [Gertjan Oude Lohuis] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 01fadee2c7..e7a89f8845 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -2018,6 +2018,11 @@ def _to_volume(self, api_node): extra={ 'description': api_node['displayDescription'], 'attachments': [att for att in api_node['attachments'] if att], + 'state': api_node.get('status', None), + 'location': api_node.get('availabilityZone', None), + 'volume_type': api_node.get('volumeType', None), + 'metadata': api_node.get('metadata', None), + 'created_at': api_node.get('createdAt', None) } ) diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index e52c6d9451..1b0860d40d 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -794,18 +794,34 @@ def test_list_volumes(self): self.assertEqual('cd76a3a1-c4ce-40f6-9b9f-07a61508938d', volume.id) self.assertEqual('test_volume_2', volume.name) self.assertEqual(2, volume.size) - - self.assertEqual(volume.extra['description'], '') - self.assertEqual(volume.extra['attachments'][0][ - 'id'], 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d') + self.assertEqual(volume.extra, { + 'description': '', + 'attachments': [{ + 'id': 'cd76a3a1-c4ce-40f6-9b9f-07a61508938d', + "device": "/dev/vdb", + "serverId": "12065", + "volumeId": "cd76a3a1-c4ce-40f6-9b9f-07a61508938d", + }], + 'state': 'available', + 'location': 'nova', + 'volume_type': 'None', + 'metadata': {}, + 'created_at': '2013-06-24T11:20:13.000000', + }) volume = volumes[1] self.assertEqual('cfcec3bc-b736-4db5-9535-4c24112691b5', volume.id) self.assertEqual('test_volume', volume.name) self.assertEqual(50, volume.size) - - self.assertEqual(volume.extra['description'], 'some description') - self.assertEqual(volume.extra['attachments'], []) + self.assertEqual(volume.extra, { + 'description': 'some description', + 'attachments': [], + 'state': 'available', + 'location': 'nova', + 'volume_type': 'None', + 'metadata': {}, + 'created_at': '2013-06-21T12:39:02.000000', + }) def test_list_sizes(self): sizes = self.driver.list_sizes() From 00225d9044abd95c4cbdb0b473d8f97ff2c446bb Mon Sep 17 00:00:00 2001 From: Gertjan Oude Lohuis Date: Thu, 25 Sep 2014 09:37:27 +0200 Subject: [PATCH 224/315] Make openstack create_volume return a StorageVolume Closes #365 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 7 +++++++ libcloud/compute/drivers/openstack.py | 5 +++-- libcloud/test/compute/test_openstack.py | 4 +++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 15454fd7b4..1873f1a0be 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -131,6 +131,13 @@ Compute (GITHUB-366) [Gertjan Oude Lohuis] +- Fix ``create_volume`` method in the OpenStack driver to return a created + volume object (instance of StorageVolume) on success, instead of a boolean + indicating operation success. + (GITHUB-365) + [Gertjan Oude Lohuis] + + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index e7a89f8845..1d3b5ff9c3 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -152,7 +152,7 @@ def create_volume(self, size, name, location=None, snapshot=None): if snapshot: raise NotImplementedError( "create_volume does not yet support create from snapshot") - return self.connection.request('/os-volumes', + resp = self.connection.request('/os-volumes', method='POST', data={ 'volume': { @@ -165,7 +165,8 @@ def create_volume(self, size, name, location=None, snapshot=None): }, 'availability_zone': location, } - }).success() + }) + return self._to_volume(resp.object) def destroy_volume(self, volume): return self.connection.request('/os-volumes/%s' % volume.id, diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 1b0860d40d..930fff9bf0 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -923,7 +923,9 @@ def test_reboot_node(self): self.assertTrue(self.node.reboot()) def test_create_volume(self): - self.assertEqual(self.driver.create_volume(1, 'test'), True) + volume = self.driver.create_volume(1, 'test') + self.assertEqual(volume.name, 'test') + self.assertEqual(volume.size, 1) def test_destroy_volume(self): volume = self.driver.ex_get_volume( From ca6441739b8a56c61d8224747bc16633d2ee4079 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 27 Sep 2014 19:42:35 +0200 Subject: [PATCH 225/315] Fix docstrings. --- libcloud/common/abiquo.py | 3 +- libcloud/common/base.py | 2 +- libcloud/common/google.py | 62 ++++++++++++++++++++------------------- libcloud/common/linode.py | 14 +++------ libcloud/pricing.py | 2 +- 5 files changed, 40 insertions(+), 43 deletions(-) diff --git a/libcloud/common/abiquo.py b/libcloud/common/abiquo.py index f621b2aa5c..041a9ffea3 100644 --- a/libcloud/common/abiquo.py +++ b/libcloud/common/abiquo.py @@ -192,7 +192,8 @@ def add_default_headers(self, headers): :type headers: ``dict`` :param headers: Default input headers - :rtype ``dict`` + + :rtype: ``dict`` :return: Default input headers with the 'Authorization' header """ diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 1d05b4b89f..75200b411e 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -852,7 +852,7 @@ def async_request(self, action, params=None, data=None, headers=None, :type context: ``dict`` :param context: Context dictionary which is passed to the functions - which construct initial and poll URL. + which construct initial and poll URL. :return: An :class:`Response` instance. :rtype: :class:`Response` instance diff --git a/libcloud/common/google.py b/libcloud/common/google.py index 725c3832aa..52692e639e 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -26,38 +26,40 @@ Setting up Service Account authentication (note that you need the PyCrypto package installed to use this): - - Go to the Console - - Go to your project and then to "APIs & auth" on the left - - Click on "Credentials" - - Click on "Create New Client ID..." - - Select "Service account" and click on "Create Client ID" - - Download the Private Key (should happen automatically). - - The key that you download is a PKCS12 key. It needs to be converted to - the PEM format. - - Convert the key using OpenSSL (the default password is 'notasecret'): - ``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts - -passin pass:notasecret | openssl rsa -out PRIV.pem`` - - Move the .pem file to a safe location. - - To Authenticate, you will need to pass the Service Account's "Email - address" in as the user_id and the path to the .pem file as the key. + +- Go to the Console +- Go to your project and then to "APIs & auth" on the left +- Click on "Credentials" +- Click on "Create New Client ID..." +- Select "Service account" and click on "Create Client ID" +- Download the Private Key (should happen automatically). +- The key that you download is a PKCS12 key. It needs to be converted to + the PEM format. +- Convert the key using OpenSSL (the default password is 'notasecret'): + ``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts + -passin pass:notasecret | openssl rsa -out PRIV.pem`` +- Move the .pem file to a safe location. +- To Authenticate, you will need to pass the Service Account's "Email + address" in as the user_id and the path to the .pem file as the key. Setting up Installed Application authentication: - - Go to the Console - - Go to your project and then to "APIs & auth" on the left - - Click on "Credentials" - - Select "Installed application" and "Other" then click on - "Create Client ID" - - To Authenticate, pass in the "Client ID" as the user_id and the "Client - secret" as the key - - The first time that you do this, the libcloud will give you a URL to - visit. Copy and paste the URL into a browser. - - When you go to the URL it will ask you to log in (if you aren't already) - and ask you if you want to allow the project access to your account. - - Click on Accept and you will be given a code. - - Paste that code at the prompt given to you by the Google libcloud - connection. - - At that point, a token & refresh token will be stored in your home - directory and will be used for authentication. + +- Go to the Console +- Go to your project and then to "APIs & auth" on the left +- Click on "Credentials" +- Select "Installed application" and "Other" then click on + "Create Client ID" +- To Authenticate, pass in the "Client ID" as the user_id and the "Client + secret" as the key +- The first time that you do this, the libcloud will give you a URL to + visit. Copy and paste the URL into a browser. +- When you go to the URL it will ask you to log in (if you aren't already) + and ask you if you want to allow the project access to your account. +- Click on Accept and you will be given a code. +- Paste that code at the prompt given to you by the Google libcloud + connection. +- At that point, a token & refresh token will be stored in your home + directory and will be used for authentication. Please remember to secure your keys and access tokens. """ diff --git a/libcloud/common/linode.py b/libcloud/common/linode.py index 2d60500fcd..fb6ea2bd3a 100644 --- a/libcloud/common/linode.py +++ b/libcloud/common/linode.py @@ -63,19 +63,13 @@ def __repr__(self): class LinodeResponse(JsonResponse): - """Linode API response - - Wraps the HTTP response returned by the Linode API, which should be JSON in - this structure: + """ + Linode API response - { - "ERRORARRAY": [ ... ], - "DATA": [ ... ], - "ACTION": " ... " - } + Wraps the HTTP response returned by the Linode API. libcloud does not take advantage of batching, so a response will always - reflect the above format. A few weird quirks are caught here as well. + reflect the above format. A few weird quirks are caught here as well. """ objects = None diff --git a/libcloud/pricing.py b/libcloud/pricing.py index 1dbdf204c4..cc2506f133 100644 --- a/libcloud/pricing.py +++ b/libcloud/pricing.py @@ -70,7 +70,7 @@ def get_pricing(driver_type, driver_name, pricing_file_path=None): :type driver_type: ``str`` :param driver_type: Driver type ('compute' or 'storage') - :type driver_name: ``str` + :type driver_name: ``str`` :param driver_name: Driver name :type pricing_file_path: ``str`` From d2c5ffc7419f8f84c848162fe3181041f9ae3812 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 27 Sep 2014 20:06:24 +0200 Subject: [PATCH 226/315] Update tox "docs" target to also generate API documentation for the whole libcloud package. --- .gitignore | 1 + docs/api_docs.rst | 7 +++++++ docs/index.rst | 1 + tox.ini | 3 ++- 4 files changed, 11 insertions(+), 1 deletion(-) create mode 100644 docs/api_docs.rst diff --git a/.gitignore b/.gitignore index dc61a25d46..45fbe88029 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,7 @@ coverage_html_report/ .idea dist/*apache-libcloud* dist/*apache_libcloud* +docs/apidocs/* _build/ apache_libcloud.egg-info/ .project diff --git a/docs/api_docs.rst b/docs/api_docs.rst new file mode 100644 index 0000000000..2cb0a0aabd --- /dev/null +++ b/docs/api_docs.rst @@ -0,0 +1,7 @@ +API Documentation +================= + +For automatically generated API documentation of all the modules, +please visit `this page`_. + +.. _`this page`: apidocs/modules.html diff --git a/docs/index.rst b/docs/index.rst index d0263293ae..02e18589c7 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -37,6 +37,7 @@ Main loadbalancer/index dns/index troubleshooting + api_docs faq other/* diff --git a/tox.ini b/tox.ini index 30516b0b47..2d06eba7d1 100644 --- a/tox.ini +++ b/tox.ini @@ -50,7 +50,8 @@ deps = sphinx basepython = python2.7 changedir = docs commands = python ../contrib/generate_provider_feature_matrix_table.py - sphinx-build -W -b html -d {envtmpdir}/doctrees . _build/html + sphinx-apidoc -d 2 ../libcloud/ -o apidocs/ + sphinx-build -b html -d {envtmpdir}/doctrees . _build/html [testenv:scrape-ec2-prices] deps = requests From 7aae76025cd90d568c2e46a12de9874830f5452f Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 27 Sep 2014 20:08:03 +0200 Subject: [PATCH 227/315] Re-generate supported methods table. --- docs/compute/_supported_methods_block_storage.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index f14d375f88..31978266a5 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -6,7 +6,7 @@ Provider list volumes create volume destroy volume `Brightbox`_ no no no no no no no `CloudFrames`_ no no no no no no no `CloudSigma (API v2.0)`_ no no no no no no no -`CloudStack`_ yes yes yes yes yes no no +`CloudStack`_ yes yes yes yes yes no yes `Digital Ocean`_ no no no no no no no `Dreamhost`_ no no no no no no no `Amazon EC2`_ yes yes yes yes yes yes yes @@ -30,22 +30,22 @@ Provider list volumes create volume destroy volume `ElasticHosts (lax-p)`_ no no no no no no no `ElasticHosts (sjc-c)`_ no no no no no no no `Eucalyptus`_ yes yes yes yes yes yes yes -`Exoscale`_ yes yes yes yes yes no no +`Exoscale`_ yes yes yes yes yes no yes `Gandi`_ yes yes yes yes yes no no `Google Compute Engine`_ yes yes yes yes yes yes yes `GoGrid`_ no no no no no no no `HostVirtual`_ no no no no no no no `HP Public Cloud (Helion)`_ yes yes yes yes yes no no `IBM SmartCloud Enterprise`_ yes yes yes yes yes no no -`Ikoula`_ yes yes yes yes yes no no +`Ikoula`_ yes yes yes yes yes no yes `Joyent`_ no no no no no no no `Kili Public Cloud`_ yes yes yes yes yes no no -`KTUCloud`_ yes yes yes yes yes no no +`KTUCloud`_ yes yes yes yes yes no yes `Libvirt`_ no no no no no no no `Linode`_ no no no no no no no `NephoScale`_ no no no no no no no `Nimbus`_ yes yes yes yes yes yes yes -`Ninefold`_ yes yes yes yes yes no no +`Ninefold`_ yes yes yes yes yes no yes `OpenNebula (v3.8)`_ yes yes yes yes yes no no `OpenStack`_ yes yes yes yes yes no no `Opsource`_ no no no no no no no From 0a1f67b329f5627bad14f82a4b923e8d29a5e890 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 27 Sep 2014 20:16:06 +0200 Subject: [PATCH 228/315] Also generate api docs files on readthedocs. --- docs/conf.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index ae5ea97c7a..093854ad31 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -12,6 +12,14 @@ # serve to show the default. import sys, os +import subprocess + +# Detect if we are running on read the docs +on_rtd = os.environ.get('READTHEDOCS', None) == 'True' + +if on_rtd: + cmd = 'sphinx-apidoc -d 2 -o apidocs/ ../libcloud/' + subprocess.call(cmd, shell=True) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -95,7 +103,6 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: html_theme = 'default' From 08b4638139c94ae5a1d9ad0342aaf022153e23c3 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sat, 27 Sep 2014 20:40:03 +0200 Subject: [PATCH 229/315] Increase toc depth. --- docs/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conf.py b/docs/conf.py index 093854ad31..6bd17cb7d6 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -18,7 +18,7 @@ on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if on_rtd: - cmd = 'sphinx-apidoc -d 2 -o apidocs/ ../libcloud/' + cmd = 'sphinx-apidoc -d 3 -o apidocs/ ../libcloud/' subprocess.call(cmd, shell=True) # If extensions (or modules to document with autodoc) are in another directory, From 76c6fd91edb3487783fb65a226e27f7ea1e8551d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= Date: Wed, 1 Oct 2014 09:25:26 +0200 Subject: [PATCH 230/315] LIBCLOUD-615: Add optional param project for ex_list_networks() to CloudStack provider. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: René Moser Signed-off-by: Sebastien Goasguen This closes #367 --- CHANGES.rst | 3 +++ libcloud/compute/drivers/cloudstack.py | 11 ++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1873f1a0be..2d0aa6d990 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -137,6 +137,9 @@ Compute (GITHUB-365) [Gertjan Oude Lohuis] +- Add optional project parameters for ex_list_networks() to CloudStack driver + (GITHUB-367, LIBCLOUD-615) + [Rene Moser] Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 0129e0772d..6ff91f2866 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -1103,14 +1103,23 @@ def ex_list_disk_offerings(self): return diskOfferings - def ex_list_networks(self): + def ex_list_networks(self, project=None): """ List the available networks + :param project: Optional project the networks belongs to. + :type project: :class:`.CloudStackProject` + :rtype ``list`` of :class:`CloudStackNetwork` """ + args = {} + + if project is not None: + args['projectid'] = project.id + res = self._sync_request(command='listNetworks', + params=args, method='GET') nets = res.get('network', []) From 2d58fa9ed4defec7f44ce0b83aede701dbc806e1 Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Fri, 3 Oct 2014 13:29:10 +0200 Subject: [PATCH 231/315] CLOUDSTACK: option to start VM in a STOPPED state Signed-off-by: Sebastien Goasguen This closes #368 --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/cloudstack.py | 8 +++++++ .../deployVirtualMachine_stoppedvm.json | 1 + .../cloudstack/listNetworks_stoppedvm.json | 1 + .../listServiceOfferings_stoppedvm.json | 1 + .../cloudstack/listTemplates_stoppedvm.json | 1 + .../cloudstack/listZones_stoppedvm.json | 1 + .../queryAsyncJobResult_deployvmstopped.json | 1 + libcloud/test/compute/test_cloudstack.py | 23 +++++++++++++++++++ 9 files changed, 41 insertions(+) create mode 100644 libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_stoppedvm.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNetworks_stoppedvm.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_stoppedvm.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listTemplates_stoppedvm.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listZones_stoppedvm.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmstopped.json diff --git a/CHANGES.rst b/CHANGES.rst index 2d0aa6d990..70dff2faa3 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -141,6 +141,10 @@ Compute (GITHUB-367, LIBCLOUD-615) [Rene Moser] +- CLOUDSTACK: option to start VM in a STOPPED state + (GITHUB-368) + [Roeland Kuipers] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 6ff91f2866..a9d716938c 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -952,6 +952,10 @@ def create_node(self, **kwargs): :keyword ex_ip_address: String with ipaddress for the default nic :type ex_ip_address: ``str`` + :keyword ex_start_vm: Boolean to specify to start VM after creation + Defaults to True + :type ex_start_vm: ``bool`` + :rtype: :class:`.CloudStackNode` """ @@ -979,6 +983,7 @@ def _create_args_to_params(self, node, **kwargs): ex_security_groups = kwargs.get('ex_security_groups', None) ex_displayname = kwargs.get('ex_displayname', None) ex_ip_address = kwargs.get('ex_ip_address', None) + ex_start_vm = kwargs.get('ex_start_vm', None) if name: server_params['name'] = name @@ -1022,6 +1027,9 @@ def _create_args_to_params(self, node, **kwargs): if ex_ip_address: server_params['ipaddress'] = ex_ip_address + if ex_start_vm is False: + server_params['startvm'] = ex_start_vm + return server_params def destroy_node(self, node): diff --git a/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_stoppedvm.json b/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_stoppedvm.json new file mode 100644 index 0000000000..5906c4df98 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/deployVirtualMachine_stoppedvm.json @@ -0,0 +1 @@ +{ "deployvirtualmachineresponse" : {"jobid":"deployvmstopped","id":65385} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworks_stoppedvm.json b/libcloud/test/compute/fixtures/cloudstack/listNetworks_stoppedvm.json new file mode 100644 index 0000000000..ad0bb8095a --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworks_stoppedvm.json @@ -0,0 +1 @@ +{"listnetworksresponse": { "count": 3, "network": [ { "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://1002", "displaytext": "network:192.168.2.0/24", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "endip": "192.168.2.233", "gateway": "192.168.2.254", "id": 1823, "isdefault": false, "isshared": true, "issystem": false, "name": "ROOT", "netmask": "255.255.255.0", "networkdomain": "cs1cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Direct", "networkofferingid": 7, "networkofferingname": "DefaultDirectNetworkOffering", "related": 1823, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Dhcp" } ], "startip": "192.168.2.1", "state": "Setup", "traffictype": "Guest", "type": "Direct", "vlan": "1002", "zoneid": 1 }, { "account": "testuser", "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://2909", "displaytext": "testuser-network", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "id": 1547, "isdefault": true, "isshared": false, "issystem": false, "name": "testuser-network", "networkdomain": "cs586cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Virtual Vlan", "networkofferingid": 6, "networkofferingname": "DefaultVirtualizedNetworkOffering", "related": 1547, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Gateway" }, { "name": "Dhcp" }, { "capability": [ { "name": "SupportedVpnTypes", "value": "pptp,l2tp,ipsec" } ], "name": "Vpn" }, { "capability": [ { "name": "MultipleIps", "value": "true" }, { "name": "SupportedSourceNatTypes", "value": "per account" }, { "name": "SupportedProtocols", "value": "tcp,udp,icmp" }, { "name": "TrafficStatistics", "value": "per public ip" }, { "name": "PortForwarding", "value": "true" }, { "name": "StaticNat", "value": "true" } ], "name": "Firewall" } ], "state": "Implemented", "traffictype": "Guest", "type": "Virtual", "zoneid": 2 }, { "account": "testuser", "broadcastdomaintype": "Vlan", "broadcasturi": "vlan://3564", "displaytext": "testuser-network", "dns1": "8.8.8.8", "dns2": "8.8.8.8", "domain": "ROOT", "domainid": 1623, "id": 1374, "isdefault": true, "isshared": false, "issystem": false, "name": "testuser-network", "networkdomain": "cs586cloud.internal", "networkofferingavailability": "Optional", "networkofferingdisplaytext": "Virtual Vlan", "networkofferingid": 6, "networkofferingname": "DefaultVirtualizedNetworkOffering", "related": 1374, "securitygroupenabled": false, "service": [ { "name": "UserData" }, { "capability": [ { "name": "AllowDnsSuffixModification", "value": "true" } ], "name": "Dns" }, { "capability": [ { "name": "SupportedProtocols", "value": "tcp, udp" }, { "name": "SupportedLbAlgorithms", "value": "roundrobin,leastconn,source" } ], "name": "Lb" }, { "name": "Gateway" }, { "name": "Dhcp" }, { "capability": [ { "name": "SupportedVpnTypes", "value": "pptp,l2tp,ipsec" } ], "name": "Vpn" }, { "capability": [ { "name": "MultipleIps", "value": "true" }, { "name": "SupportedSourceNatTypes", "value": "per account" }, { "name": "SupportedProtocols", "value": "tcp,udp,icmp" }, { "name": "TrafficStatistics", "value": "per public ip" }, { "name": "PortForwarding", "value": "true" }, { "name": "StaticNat", "value": "true" } ], "name": "Firewall" } ], "state": "Implemented", "traffictype": "Guest", "type": "Virtual", "zoneid": 1 } ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_stoppedvm.json b/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_stoppedvm.json new file mode 100644 index 0000000000..944a6f2919 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listServiceOfferings_stoppedvm.json @@ -0,0 +1 @@ +{ "listserviceofferingsresponse" : {"count": 3, "serviceoffering": [ {"cpunumber": 2, "cpuspeed": 1600, "created": "2011-09-09T13:14:19+0900", "defaultuse": false, "displaytext": "M4", "id": 21, "issystem": false, "limitcpuuse": true, "memory": 4096, "name": "M4", "networkrate": 500, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}, {"cpunumber": 1, "cpuspeed": 800, "created": "2011-09-09T13:17:52+0900", "defaultuse": false, "displaytext": "XS", "id": 24, "issystem": false, "limitcpuuse": true, "memory": 512, "name": "XS", "networkrate": 100, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}, {"cpunumber": 1, "cpuspeed": 1600, "created": "2011-09-14T22:51:23+0900", "defaultuse": false, "displaytext": "S2", "id": 30, "issystem": false, "limitcpuuse": true, "memory": 2048, "name": "S2", "networkrate": 500, "offerha": false, "storagetype": "shared", "tags": "Shared.auto"}]}} diff --git a/libcloud/test/compute/fixtures/cloudstack/listTemplates_stoppedvm.json b/libcloud/test/compute/fixtures/cloudstack/listTemplates_stoppedvm.json new file mode 100644 index 0000000000..a9c778c738 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listTemplates_stoppedvm.json @@ -0,0 +1 @@ +{ "listtemplatesresponse" : {"count": 2, "template": [ {"account": "admin", "created": "2014-06-06T20:08:49+0900", "crossZones": false, "displaytext": "CentOS 6.5", "domain": "ROOT", "domainid": 1, "format": "OVA", "hypervisor": "VMware", "id": 8028, "isextractable": true, "isfeatured": true, "ispublic": true, "isready": true, "name": "CentOS 6.5 64-bit", "ostypeid": 112, "ostypename": "CentOS 5.5 (64-bit)", "passwordenabled": true, "size": 16106127360, "status": "Download Complete", "templatetype": "USER", "zoneid": 2, "zonename": "zone2"}, {"account": "admin", "created": "2014-06-06T20:08:48+0900", "crossZones": false, "displaytext": "CentOS 6.5", "domain": "ROOT", "domainid": 1, "format": "OVA", "hypervisor": "VMware", "id": 8028, "isextractable": true, "isfeatured": true, "ispublic": true, "isready": true, "name": "CentOS 6.5 64-bit", "ostypeid": 112, "ostypename": "CentOS 5.5 (64-bit)", "passwordenabled": true, "size": 16106127360, "status": "Download Complete", "templatetype": "USER", "zoneid": 1, "zonename": "zone1"} ]} } diff --git a/libcloud/test/compute/fixtures/cloudstack/listZones_stoppedvm.json b/libcloud/test/compute/fixtures/cloudstack/listZones_stoppedvm.json new file mode 100644 index 0000000000..b072749410 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listZones_stoppedvm.json @@ -0,0 +1 @@ +{ "listzonesresponse" : { "count":2 ,"zone" : [ {"id":1,"name":"zone1","networktype":"Advanced","securitygroupsenabled":false,"allocationstate":"Enabled","zonetoken":"6a3bfa26-67cd-3ff2-867e-20e86b211bb1","dhcpprovider":"VirtualRouter"}, {"id":2,"name":"zone2","networktype":"Advanced","securitygroupsenabled":false,"allocationstate":"Enabled","zonetoken":"8366e550-542d-373d-88e3-ca7c90bc8e6c","dhcpprovider":"VirtualRouter"} ] } } diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmstopped.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmstopped.json new file mode 100644 index 0000000000..9974248df8 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_deployvmstopped.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"jobid":"deployvmstopped", "jobprocstatus": 0, "jobresult": {"virtualmachine": {"account": "testuser", "cpunumber": 2, "cpuspeed": 1600, "created": "2014-07-06T16:40:39+0900", "displayname": "deployip", "domain": "ROOT", "domainid": 1623, "guestosid": 112, "haenable": false, "hypervisor": "VMware", "id": 65385, "memory": 4096, "name": "stopped_vm", "nic": [{"gateway": "10.1.0.1", "id": 87320, "ipaddress": "10.1.0.128", "isdefault": true, "macaddress": "02:00:78:4a:01:9e", "netmask": "255.255.252.0", "networkid": 1374, "traffictype": "Guest", "type": "Virtual"}, {"gateway": "192.168.2.254", "id": 87319, "ipaddress": "192.168.2.55", "isdefault": false, "macaddress": "06:e6:50:00:70:0e", "netmask": "255.255.255.0", "networkid": 1823, "traffictype": "Guest", "type": "Direct"}], "password": "password", "passwordenabled": true, "rootdeviceid": 0, "rootdevicetype": "VMFS", "securitygroup": [], "serviceofferingid": 21, "serviceofferingname": "M4", "state": "Stopped" , "templatedisplaytext": "CentOS 6.5", "templateid": 8028, "templatename": "CentOS 6.5 64-bit", "zoneid": 1, "zonename": "zone1"}}, "jobresultcode": 0, "jobresulttype": "object", "jobstatus": 1} } diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index cd72b2b2c3..387695f05d 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -29,6 +29,7 @@ from libcloud.compute.drivers.cloudstack import CloudStackNodeDriver from libcloud.compute.types import LibcloudError, Provider, InvalidCredsError from libcloud.compute.types import KeyPairDoesNotExistError +from libcloud.compute.types import NodeState from libcloud.compute.providers import get_driver from libcloud.test import unittest @@ -149,6 +150,28 @@ def test_create_node_ex_ipaddress(self): self.assertEqual(node.extra['image_id'], image.id) self.assertEqual(node.private_ips[0], ipaddress) + def test_create_node_ex_start_vm_false(self): + CloudStackMockHttp.fixture_tag = 'stoppedvm' + size = self.driver.list_sizes()[0] + image = self.driver.list_images()[0] + location = self.driver.list_locations()[0] + + networks = [nw for nw in self.driver.ex_list_networks() + if str(nw.zoneid) == str(location.id)] + + node = self.driver.create_node(name='stopped_vm', + location=location, + image=image, + size=size, + networks=networks, + ex_start_vm=False) + self.assertEqual(node.name, 'stopped_vm') + self.assertEqual(node.extra['size_id'], size.id) + self.assertEqual(node.extra['zone_id'], location.id) + self.assertEqual(node.extra['image_id'], image.id) + + self.assertEqual(node.state, NodeState.STOPPED) + def test_create_node_ex_security_groups(self): size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] From 4158507b0ee48dddc1cd2de92867311b0a3f59d1 Mon Sep 17 00:00:00 2001 From: Nirmal Ranganathan Date: Wed, 8 Oct 2014 19:25:36 -0500 Subject: [PATCH 232/315] Support for config_drive in the Openstack driver Adds a 'ex_config_drive' option to create_node and ex_rebuild_node for the Openstack driver. Closes #370 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 6 ++++ docs/compute/drivers/openstack.rst | 14 ++++++++++ docs/examples/compute/openstack/cloud_init.py | 28 +++++++++++++++++++ libcloud/compute/drivers/openstack.py | 17 +++++++++++ ..._26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json | 2 +- libcloud/test/compute/test_openstack.py | 22 +++++++++++++++ 6 files changed, 88 insertions(+), 1 deletion(-) create mode 100644 docs/examples/compute/openstack/cloud_init.py diff --git a/CHANGES.rst b/CHANGES.rst index 70dff2faa3..a23f4ebe98 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -145,6 +145,12 @@ Compute (GITHUB-368) [Roeland Kuipers] +- Support "config_drive" in the OpenStack driver. Allow users to pass + ``ex_config_drive`` argument to the ``create_node`` and ``ex_rebuild_node`` + method. + (GITHUB-370) + [Nirmal Ranganathan] + Storage ~~~~~~~ diff --git a/docs/compute/drivers/openstack.rst b/docs/compute/drivers/openstack.rst index ecf57d8c63..6975c3753c 100644 --- a/docs/compute/drivers/openstack.rst +++ b/docs/compute/drivers/openstack.rst @@ -122,6 +122,20 @@ Connecting to HP Cloud US West and US East (OpenStack Havana). .. literalinclude:: /examples/compute/openstack/hpcloud.py :language: python +7. Using Cloud-Init with Openstack +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This example shows how to use cloud-init using the ``ex_config_drive`` and +``ex_userdata`` arguments to ``create_node``. This example just installs +nginx and starts it. More `Cloud-Init examples`_. + +Note: You will need to use a cloud-init enabled image. Most Openstack based +public cloud providers support it. + +.. literalinclude:: /examples/compute/openstack/cloud_init.py + :language: python +.. _`Cloud-Init examples`: http://cloudinit.readthedocs.org/en/latest/topics/examples.html + Non-standard functionality and extension methods ------------------------------------------------ diff --git a/docs/examples/compute/openstack/cloud_init.py b/docs/examples/compute/openstack/cloud_init.py new file mode 100644 index 0000000000..5aafb92768 --- /dev/null +++ b/docs/examples/compute/openstack/cloud_init.py @@ -0,0 +1,28 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + + +cloud_init_config = """ +#cloud-config + +packages: + + - nginx + +runcmd: + + - service nginx start + +""" + + +OpenStack = get_driver(Provider.OPENSTACK) +driver = OpenStack('your_auth_username', 'your_auth_password', + ex_force_auth_url='http://192.168.1.101:5000', + ex_force_auth_version='2.0_password') + +image = driver.get_image('image_id') +size = driver.list_sizes()[0] + +node = driver.create_node(name='cloud_init', image=image, size=size, + ex_userdata=cloud_init_config, ex_config_drive=True) diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 1d3b5ff9c3..750891d375 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -1175,6 +1175,10 @@ def create_node(self, **kwargs): Can be either ``AUTO`` or ``MANUAL``. :type ex_disk_config: ``str`` + :keyword ex_config_drive: If True enables metadata injection in a + server through a configuration drive. + :type ex_config_drive: ``bool`` + :keyword ex_admin_pass: The root password for the node :type ex_admin_pass: ``str`` @@ -1264,6 +1268,9 @@ def _create_args_to_params(self, node, **kwargs): if 'ex_disk_config' in kwargs: server_params['OS-DCF:diskConfig'] = kwargs['ex_disk_config'] + if 'ex_config_drive' in kwargs: + server_params['config_drive'] = str(kwargs['ex_config_drive']) + if 'ex_admin_pass' in kwargs: server_params['adminPass'] = kwargs['ex_admin_pass'] @@ -1358,6 +1365,10 @@ def ex_rebuild(self, node, image, **kwargs): Can be either ``AUTO`` or ``MANUAL``. :type ex_disk_config: ``str`` + :keyword ex_config_drive: If True enables metadata injection in a + server through a configuration drive. + :type ex_config_drive: ``bool`` + :rtype: ``bool`` """ server_params = self._create_args_to_params(node, image=image, @@ -1979,6 +1990,11 @@ def _to_node(self, api_node): image = api_node.get('image', None) image_id = image.get('id', None) if image else None + if api_node.get("config_drive", False).lower() == "true": + config_drive = True + else: + config_drive = False + return Node( id=api_node['id'], name=api_node['name'], @@ -2003,6 +2019,7 @@ def _to_node(self, api_node): updated=api_node['updated'], key_name=api_node.get('key_name', None), disk_config=api_node.get('OS-DCF:diskConfig', None), + config_drive=config_drive, availability_zone=api_node.get('OS-EXT-AZ:availability_zone', None), ), diff --git a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json index d972cfd66c..7180b9167b 100644 --- a/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json +++ b/libcloud/test/compute/fixtures/openstack_v1.1/_servers_26f7fbee_8ce1_4c28_887a_bfe8e4bb10fe.json @@ -40,7 +40,7 @@ } ] }, - "config_drive": "", + "config_drive": "True", "id": "26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe", "metadata": { "My Server Name" : "Apache1" diff --git a/libcloud/test/compute/test_openstack.py b/libcloud/test/compute/test_openstack.py index 930fff9bf0..19f0eb811e 100644 --- a/libcloud/test/compute/test_openstack.py +++ b/libcloud/test/compute/test_openstack.py @@ -916,6 +916,18 @@ def test_create_node_with_ex_disk_config(self): self.assertEqual(node.name, 'racktest') self.assertEqual(node.extra['disk_config'], 'AUTO') + def test_create_node_with_ex_config_drive(self): + OpenStack_1_1_MockHttp.type = 'EX_CONFIG_DRIVE' + image = NodeImage( + id=11, name='Ubuntu 8.10 (intrepid)', driver=self.driver) + size = NodeSize( + 1, '256 slice', None, None, None, None, driver=self.driver) + node = self.driver.create_node(name='racktest', image=image, size=size, + ex_config_drive=True) + self.assertEqual(node.id, '26f7fbee-8ce1-4c28-887a-bfe8e4bb10fe') + self.assertEqual(node.name, 'racktest') + self.assertEqual(node.extra['config_drive'], True) + def test_destroy_node(self): self.assertTrue(self.node.destroy()) @@ -965,6 +977,16 @@ def test_ex_rebuild_with_ex_disk_config(self): ex_disk_config='MANUAL') self.assertTrue(success) + def test_ex_rebuild_with_ex_config_drive(self): + image = NodeImage(id=58, name='Ubuntu 10.10 (intrepid)', + driver=self.driver) + node = Node(id=12066, name=None, state=None, public_ips=None, + private_ips=None, driver=self.driver) + success = self.driver.ex_rebuild(node, image=image, + ex_disk_config='MANUAL', + ex_config_drive=True) + self.assertTrue(success) + def test_ex_resize(self): size = NodeSize(1, '256 slice', None, None, None, None, driver=self.driver) From a38ade58404f155c5de82a2a9512809856f63f17 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Fri, 10 Oct 2014 15:52:02 +0000 Subject: [PATCH 233/315] LIBCLOUD-578: GCE adding Service Accounts to create_node Closes #372 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 + docs/compute/drivers/gce.rst | 14 ++ .../compute/gce/gce_service_account_scopes.py | 31 +++++ libcloud/compute/drivers/gce.py | 124 +++++++++++++++++- libcloud/test/compute/test_gce.py | 26 ++++ 5 files changed, 193 insertions(+), 7 deletions(-) create mode 100644 docs/examples/compute/gce/gce_service_account_scopes.py diff --git a/CHANGES.rst b/CHANGES.rst index a23f4ebe98..e7b954237b 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -151,6 +151,11 @@ Compute (GITHUB-370) [Nirmal Ranganathan] +- Add support for service scopes to the ``create_node`` method in the GCE + driver. + (LIBCLOUD-578, GITHUB-373) + [Eric Johnson] + Storage ~~~~~~~ diff --git a/docs/compute/drivers/gce.rst b/docs/compute/drivers/gce.rst index a6b6b71b7e..6e91160019 100644 --- a/docs/compute/drivers/gce.rst +++ b/docs/compute/drivers/gce.rst @@ -15,6 +15,7 @@ Google Compute Engine features: * High-performance virtual machines * Minute-level billing (10-minute minimum) * Fast VM provisioning +* Persistent block storage (SSD and standard) * Native Load Balancing Connecting to Google Compute Engine @@ -68,6 +69,14 @@ To set up Installed Account authentication: 7. You will also need your "Project ID" which can be found by clicking on the "Overview" link on the left sidebar. +Accessing Google Cloud services from your Libcloud nodes +-------------------------------------------------------- +In order for nodes created with libcloud to be able to access or manage other +Google Cloud Platform services, you will need to specify a list of Service +Account Scopes. By default libcloud will create nodes that only allow +read-only access to Google Cloud Storage. A few of the examples below +illustrate how to use Service Account Scopes. + Examples -------- @@ -89,6 +98,11 @@ https://github.com/apache/libcloud/blob/trunk/demos/gce_demo.py .. literalinclude:: /examples/compute/gce/gce_datacenter.py +4. Specifying Service Account Scopes +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/gce/gce_service_account_scopes.py + API Docs -------- diff --git a/docs/examples/compute/gce/gce_service_account_scopes.py b/docs/examples/compute/gce/gce_service_account_scopes.py new file mode 100644 index 0000000000..f5f9be9fd9 --- /dev/null +++ b/docs/examples/compute/gce/gce_service_account_scopes.py @@ -0,0 +1,31 @@ +# See previous examples for connecting and creating the driver +# ... +driver = None + +# Define common example attributes +s = 'n1-standard-1' +i = 'debian-7' +z = 'us-central1-a' + +# Service Account Scopes require a list of dictionaries. Each dictionary +# can have an optional 'email' address specifying the Service Account +# address, and list of 'scopes'. The default Service Account Scopes for +# new nodes will effectively use: + +sa_scopes = [ + { + 'email': 'default', + 'scopes': ['storage-ro'] + } +] + +# The expected scenario will likely use the default Service Account email +# address, but allow users to override the default list of scopes. +# For example, create a new node with full access to Google Cloud Storage +# and Google Compute Engine: +sa_scopes = [{'scopes': ['compute', 'storage-full']}] +node_1 = driver.create_node("n1", s, i, z, ex_service_accounts=sa_scopes) + +# See Google's documentation for Accessing other Google Cloud services from +# your Google Compute Engine instances at, +# https://cloud.google.com/compute/docs/authentication diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index bbcfbb8f96..856c334bc9 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -534,6 +534,22 @@ class GCENodeDriver(NodeDriver): "TERMINATED": NodeState.TERMINATED } + AUTH_URL = "https://www.googleapis.com/auth/" + SA_SCOPES_MAP = { + # list derived from 'gcloud compute instances create --help' + "bigquery": "bigquery", + "compute-ro": "compute.readonly", + "compute-rw": "compute", + "datastore": "datastore", + "sql": "sqlservice", + "sql-admin": "sqlservice.admin", + "storage-full": "devstorage.full_control", + "storage-ro": "devstorage.read_only", + "storage-rw": "devstorage.read_write", + "taskqueue": "taskqueue", + "userinfo-email": "userinfo.email" + } + def __init__(self, user_id, key, datacenter=None, project=None, auth_type=None, scopes=None, **kwargs): """ @@ -1222,7 +1238,7 @@ def create_node(self, name, size, image, location=None, ex_network='default', ex_tags=None, ex_metadata=None, ex_boot_disk=None, use_existing_disk=True, external_ip='ephemeral', ex_disk_type='pd-standard', - ex_disk_auto_delete=True): + ex_disk_auto_delete=True, ex_service_accounts=None): """ Create a new node and return a node object for the node. @@ -1273,6 +1289,20 @@ def create_node(self, name, size, image, location=None, True by default. :type ex_disk_auto_delete: ``bool`` + :keyword ex_service_accounts: Specify a list of serviceAccounts when + creating the instance. The format is a + list of dictionaries containing email + and list of scopes, e.g. + [{'email':'default', + 'scopes':['compute', ...]}, ...] + Scopes can either be full URLs or short + names. If not provided, use the + 'default' service account email and a + scope of 'devstorage.read_only'. Also + accepts the aliases defined in + 'gcloud cmopute'. + :type ex_service_accounts: ``list`` + :return: A Node object for the new node. :rtype: :class:`Node` """ @@ -1315,7 +1345,8 @@ def create_node(self, name, size, image, location=None, ex_tags, ex_metadata, ex_boot_disk, external_ip, ex_disk_type, - ex_disk_auto_delete) + ex_disk_auto_delete, + ex_service_accounts) self.connection.async_request(request, method='POST', data=node_data) return self.ex_get_node(name, location.name) @@ -1327,6 +1358,7 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, poll_interval=2, external_ip='ephemeral', ex_disk_type='pd-standard', ex_auto_disk_delete=True, + ex_service_accounts=None, timeout=DEFAULT_TASK_COMPLETION_TIMEOUT): """ Create multiple nodes and return a list of Node objects. @@ -1392,6 +1424,20 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, True by default. :type ex_disk_auto_delete: ``bool`` + :keyword ex_service_accounts: Specify a list of serviceAccounts when + creating the instance. The format is a + list of dictionaries containing email + and list of scopes, e.g. + [{'email':'default', + 'scopes':['compute', ...]}, ...] + Scopes can either be full URLs or short + names. If not provided, use the + 'default' service account email and a + scope of 'devstorage.read_only'. Also + accepts the aliases defined in + 'gcloud cmopute'. + :type ex_service_accounts: ``list`` + :keyword timeout: The number of seconds to wait for all nodes to be created before timing out. :type timeout: ``int`` @@ -1418,7 +1464,8 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, 'ignore_errors': ignore_errors, 'use_existing_disk': use_existing_disk, 'external_ip': external_ip, - 'ex_disk_type': ex_disk_type} + 'ex_disk_type': ex_disk_type, + 'ex_service_accounts': ex_service_accounts} # List for holding the status information for disk/node creation. status_list = [] @@ -1930,7 +1977,8 @@ def ex_set_node_scheduling(self, node, on_host_maintenance=None, return success def deploy_node(self, name, size, image, script, location=None, - ex_network='default', ex_tags=None): + ex_network='default', ex_tags=None, + ex_service_accounts=None): """ Create a new node and run a script on start-up. @@ -1956,6 +2004,20 @@ def deploy_node(self, name, size, image, script, location=None, :keyword ex_tags: A list of tags to associate with the node. :type ex_tags: ``list`` of ``str`` or ``None`` + :keyword ex_service_accounts: Specify a list of serviceAccounts when + creating the instance. The format is a + list of dictionaries containing email + and list of scopes, e.g. + [{'email':'default', + 'scopes':['compute', ...]}, ...] + Scopes can either be full URLs or short + names. If not provided, use the + 'default' service account email and a + scope of 'devstorage.read_only'. Also + accepts the aliases defined in + 'gcloud cmopute'. + :type ex_service_accounts: ``list`` + :return: A Node object for the new node. :rtype: :class:`Node` """ @@ -1966,7 +2028,8 @@ def deploy_node(self, name, size, image, script, location=None, return self.create_node(name, size, image, location=location, ex_network=ex_network, ex_tags=ex_tags, - ex_metadata=metadata) + ex_metadata=metadata, + ex_service_accounts=ex_service_accounts) def attach_volume(self, node, volume, device=None, ex_mode=None, ex_boot=False): @@ -2863,7 +2926,7 @@ def _set_zone(self, zone): def _create_node_req(self, name, size, image, location, network, tags=None, metadata=None, boot_disk=None, external_ip='ephemeral', ex_disk_type='pd-standard', - ex_disk_auto_delete=True): + ex_disk_auto_delete=True, ex_service_accounts=None): """ Returns a request and body to create a new node. This is a helper method to support both :class:`create_node` and @@ -2910,6 +2973,20 @@ def _create_node_req(self, name, size, image, location, network, True by default. :type ex_disk_auto_delete: ``bool`` + :keyword ex_service_accounts: Specify a list of serviceAccounts when + creating the instance. The format is a + list of dictionaries containing email + and list of scopes, e.g. + [{'email':'default', + 'scopes':['compute', ...]}, ...] + Scopes can either be full URLs or short + names. If not provided, use the + 'default' service account email and a + scope of 'devstorage.read_only'. Also + accepts the aliases defined in + 'gcloud cmopute'. + :type ex_service_accounts: ``list`` + :return: A tuple containing a request string and a node_data dict. :rtype: ``tuple`` of ``str`` and ``dict`` """ @@ -2921,6 +2998,38 @@ def _create_node_req(self, name, size, image, location, network, if metadata: node_data['metadata'] = metadata + # by default, new instances will match the same serviceAccount and + # scope set in the Developers Console and Cloud SDK + if not ex_service_accounts: + set_scopes = [{ + 'email': 'default', + 'scopes': [self.AUTH_URL + 'devstorage.read_only'] + }] + elif not isinstance(ex_service_accounts, list): + raise ValueError("ex_service_accounts field is not a list.") + else: + set_scopes = [] + for sa in ex_service_accounts: + if not isinstance(sa, dict): + raise ValueError("ex_service_accounts needs to be a list " + "of dicts, got: '%s - %s'" % ( + str(type(sa)), str(sa))) + if 'email' not in sa: + sa['email'] = 'default' + if 'scopes' not in sa: + sa['scopes'] = [self.AUTH_URL + 'devstorage.read_only'] + ps = [] + for scope in sa['scopes']: + if scope.startswith(self.AUTH_URL): + ps.append(scope) + elif scope in self.SA_SCOPES_MAP: + ps.append(self.AUTH_URL + self.SA_SCOPES_MAP[scope]) + else: + ps.append(self.AUTH_URL + scope) + sa['scopes'] = ps + set_scopes.append(sa) + node_data['serviceAccounts'] = set_scopes + if boot_disk: disks = [{'kind': 'compute#attachedDisk', 'boot': True, @@ -3040,7 +3149,8 @@ def _multi_create_node(self, status, node_attrs): status['name'], node_attrs['size'], node_attrs['image'], node_attrs['location'], node_attrs['network'], node_attrs['tags'], node_attrs['metadata'], boot_disk=status['disk'], - external_ip=node_attrs['external_ip']) + external_ip=node_attrs['external_ip'], + ex_service_accounts=node_attrs['ex_service_accounts']) try: node_res = self.connection.request( request, method='POST', data=node_data).object diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 730ef27006..97bb61f5ed 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -302,6 +302,11 @@ def test_create_node_req(self): self.assertEqual(node_data['tags']['items'][0], 'libcloud') self.assertEqual(node_data['name'], 'lcnode') self.assertTrue(node_data['disks'][0]['boot']) + self.assertIsInstance(node_data['serviceAccounts'], list) + self.assertIsInstance(node_data['serviceAccounts'][0], dict) + self.assertTrue(node_data['serviceAccounts'][0]['email'], 'default') + self.assertIsInstance(node_data['serviceAccounts'][0]['scopes'], list) + self.assertTrue(len(node_data['serviceAccounts'][0]['scopes']), 1) def test_create_node(self): node_name = 'node-name' @@ -311,6 +316,27 @@ def test_create_node(self): self.assertTrue(isinstance(node, Node)) self.assertEqual(node.name, node_name) + def test_create_node_req_with_serviceaccounts(self): + image = self.driver.ex_get_image('debian-7') + size = self.driver.ex_get_size('n1-standard-1') + location = self.driver.zone + network = self.driver.ex_get_network('default') + # ex_service_accounts with specific scopes, default 'email' + ex_sa = [{'scopes': ['compute-ro', 'pubsub', 'storage-ro']}] + node_request, node_data = self.driver._create_node_req('lcnode', size, + image, location, + network, + ex_service_accounts=ex_sa) + self.assertIsInstance(node_data['serviceAccounts'], list) + self.assertIsInstance(node_data['serviceAccounts'][0], dict) + self.assertTrue(node_data['serviceAccounts'][0]['email'], 'default') + self.assertIsInstance(node_data['serviceAccounts'][0]['scopes'], list) + self.assertTrue(len(node_data['serviceAccounts'][0]['scopes']), 3) + self.assertTrue('https://www.googleapis.com/auth/devstorage.read_only' + in node_data['serviceAccounts'][0]['scopes']) + self.assertTrue('https://www.googleapis.com/auth/compute.readonly' + in node_data['serviceAccounts'][0]['scopes']) + def test_create_node_with_metadata(self): node_name = 'node-name' image = self.driver.ex_get_image('debian-7') From 3cd156c5e1875c4d8f991651892ecf60f8939eb9 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 19 Oct 2014 10:33:38 +0800 Subject: [PATCH 234/315] Allow users to pass "headers" and "method" argument to the get_response_object utility method. --- libcloud/utils/connection.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/libcloud/utils/connection.py b/libcloud/utils/connection.py index db381a31fd..1591c6442f 100644 --- a/libcloud/utils/connection.py +++ b/libcloud/utils/connection.py @@ -21,7 +21,7 @@ ] -def get_response_object(url): +def get_response_object(url, method='GET', headers=None): """ Utility function which uses libcloud's connection class to issue an HTTP request. @@ -29,6 +29,12 @@ def get_response_object(url): :param url: URL to send the request to. :type url: ``str`` + :param method: HTTP method. + :type method: ``str`` + + :param headers: Optional request headers. + :type headers: ``dict`` + :return: Response object. :rtype: :class:`Response`. """ @@ -36,7 +42,10 @@ def get_response_object(url): parsed_qs = parse_qs(parsed_url.query) secure = parsed_url.scheme == 'https' + headers = headers or {} + method = method.upper() + con = Connection(secure=secure, host=parsed_url.netloc) - response = con.request(method='GET', action=parsed_url.path, - params=parsed_qs) + response = con.request(action=parsed_url.path, params=parsed_qs, + headers=headers, method=method) return response From 75bc68c66fd8f2247e05958cee6db71c45ca1dbc Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Thu, 16 Oct 2014 10:21:35 -0700 Subject: [PATCH 235/315] Use assertEqual instead of the deprecated assertEquals Closes #375 Signed-off-by: Tomaz Muraus --- libcloud/test/compute/test_gce.py | 4 +- libcloud/test/compute/test_nephoscale.py | 2 +- libcloud/test/compute/test_profitbricks.py | 100 ++++++++++----------- libcloud/test/dns/test_rackspace.py | 2 +- libcloud/test/loadbalancer/test_gogrid.py | 2 +- libcloud/test/storage/test_azure_blobs.py | 6 +- 6 files changed, 58 insertions(+), 58 deletions(-) diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 97bb61f5ed..2b90ec69e7 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -247,8 +247,8 @@ def test_ex_create_image(self): volume = self.driver.ex_get_volume('lcdisk') image = self.driver.ex_create_image('coreos', volume) self.assertTrue(isinstance(image, GCENodeImage)) - self.assertEquals(image.name, 'coreos') - self.assertEquals(image.extra['description'], 'CoreOS test image') + self.assertEqual(image.name, 'coreos') + self.assertEqual(image.extra['description'], 'CoreOS test image') def test_ex_create_firewall(self): firewall_name = 'lcfirewall' diff --git a/libcloud/test/compute/test_nephoscale.py b/libcloud/test/compute/test_nephoscale.py index bb30f68a08..0a0af0add8 100644 --- a/libcloud/test/compute/test_nephoscale.py +++ b/libcloud/test/compute/test_nephoscale.py @@ -74,7 +74,7 @@ def test_list_ssh_keys(self): def test_list_password_keys(self): password_keys = self.driver.ex_list_keypairs(password=True) self.assertEqual(len(password_keys), 1) - self.assertEquals(password_keys[0].password, '23d493j5') + self.assertEqual(password_keys[0].password, '23d493j5') def test_reboot_node(self): node = self.driver.list_nodes()[0] diff --git a/libcloud/test/compute/test_profitbricks.py b/libcloud/test/compute/test_profitbricks.py index ab7ab06c1f..0b7567d912 100644 --- a/libcloud/test/compute/test_profitbricks.py +++ b/libcloud/test/compute/test_profitbricks.py @@ -40,18 +40,18 @@ def test_list_nodes(self): self.assertEqual(len(nodes), 3) node = nodes[0] - self.assertEquals(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246") - self.assertEquals(node.name, "server001") - self.assertEquals(node.state, 0) - self.assertEquals(node.public_ips, ['162.254.25.197']) - self.assertEquals(node.private_ips, ['10.10.108.12', '10.13.198.11']) - self.assertEquals(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899") - self.assertEquals(node.extra['datacenter_version'], "5") - self.assertEquals(node.extra['provisioning_state'], 0) - self.assertEquals(node.extra['creation_time'], "2014-07-14T20:52:20.839Z") - self.assertEquals(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z") - self.assertEquals(node.extra['os_type'], "LINUX") - self.assertEquals(node.extra['availability_zone'], "ZONE_1") + self.assertEqual(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246") + self.assertEqual(node.name, "server001") + self.assertEqual(node.state, 0) + self.assertEqual(node.public_ips, ['162.254.25.197']) + self.assertEqual(node.private_ips, ['10.10.108.12', '10.13.198.11']) + self.assertEqual(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899") + self.assertEqual(node.extra['datacenter_version'], "5") + self.assertEqual(node.extra['provisioning_state'], 0) + self.assertEqual(node.extra['creation_time'], "2014-07-14T20:52:20.839Z") + self.assertEqual(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z") + self.assertEqual(node.extra['os_type'], "LINUX") + self.assertEqual(node.extra['availability_zone'], "ZONE_1") def test_ex_describe_node(self): image = type('NodeImage', (object,), @@ -68,7 +68,7 @@ def test_ex_describe_node(self): image=image, size=size) - self.assertEquals(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750") + self.assertEqual(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750") def test_reboot_node(self): node = type('Node', (object,), @@ -117,16 +117,16 @@ def test_list_volumes(self): self.assertEqual(len(volumes), 4) volume = volumes[0] - self.assertEquals(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232") - self.assertEquals(volume.name, "storage001") - self.assertEquals(volume.size, 50) - self.assertEquals(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29") - self.assertEquals(volume.extra['provisioning_state'], 0) - self.assertEquals(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z") - self.assertEquals(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z") - self.assertEquals(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9") - self.assertEquals(volume.extra['image_name'], "CentOS-6-server-2014-07-01") - self.assertEquals(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292") + self.assertEqual(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232") + self.assertEqual(volume.name, "storage001") + self.assertEqual(volume.size, 50) + self.assertEqual(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29") + self.assertEqual(volume.extra['provisioning_state'], 0) + self.assertEqual(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z") + self.assertEqual(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z") + self.assertEqual(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9") + self.assertEqual(volume.extra['image_name'], "CentOS-6-server-2014-07-01") + self.assertEqual(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292") def test_create_volume(self): datacenter = type('Datacenter', (object,), @@ -286,9 +286,9 @@ def test_ex_list_datacenters(self): self.assertEqual(len(datacenters), 2) dc1 = datacenters[0] - self.assertEquals(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede") - self.assertEquals(dc1.name, "StackPointCloud") - self.assertEquals(dc1.version, "1") + self.assertEqual(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede") + self.assertEqual(dc1.name, "StackPointCloud") + self.assertEqual(dc1.version, "1") def test_ex_rename_datacenter(self): datacenter = type('Datacenter', (object,), @@ -306,7 +306,7 @@ def test_list_locations(self): locationNamesResult = sorted(list(a.name for a in locations)) locationNamesExpected = ['de/fkb', 'de/fra', 'us/las'] - self.assertEquals(locationNamesResult, locationNamesExpected) + self.assertEqual(locationNamesResult, locationNamesExpected) ''' Availability Zone Tests ''' @@ -318,7 +318,7 @@ def test_ex_list_availability_zones(self): zoneNamesResult = sorted(list(a.name for a in zones)) zoneNamesExpected = ['AUTO', 'ZONE_1', 'ZONE_2'] - self.assertEquals(zoneNamesResult, zoneNamesExpected) + self.assertEqual(zoneNamesResult, zoneNamesExpected) ''' Interface Tests ''' @@ -329,16 +329,16 @@ def test_ex_list_interfaces(self): self.assertEqual(len(interfaces), 3) interface = interfaces[0] - self.assertEquals(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727") - self.assertEquals(interface.name, "StackPointCloud") - self.assertEquals(interface.state, 0) - self.assertEquals(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116") - self.assertEquals(interface.extra['lan_id'], '3') - self.assertEquals(interface.extra['internet_access'], 'false') - self.assertEquals(interface.extra['mac_address'], "02:01:40:47:90:04") - self.assertEquals(interface.extra['dhcp_active'], "true") - self.assertEquals(interface.extra['gateway_ip'], None) - self.assertEquals(interface.extra['ips'], ['10.14.96.11', '162.254.26.14', '162.254.26.15']) + self.assertEqual(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727") + self.assertEqual(interface.name, "StackPointCloud") + self.assertEqual(interface.state, 0) + self.assertEqual(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116") + self.assertEqual(interface.extra['lan_id'], '3') + self.assertEqual(interface.extra['internet_access'], 'false') + self.assertEqual(interface.extra['mac_address'], "02:01:40:47:90:04") + self.assertEqual(interface.extra['dhcp_active'], "true") + self.assertEqual(interface.extra['gateway_ip'], None) + self.assertEqual(interface.extra['ips'], ['10.14.96.11', '162.254.26.14', '162.254.26.15']) def test_ex_create_network_interface(self): node = type('Node', (object,), @@ -374,18 +374,18 @@ def test_ex_describe_network_interface(self): describe = self.driver.ex_describe_network_interface(network_interface=network_interface) - self.assertEquals(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3") - self.assertEquals(describe.name, None) - self.assertEquals(describe.state, 0) - self.assertEquals(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee") - self.assertEquals(describe.extra['datacenter_version'], "6") - self.assertEquals(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408") - self.assertEquals(describe.extra['lan_id'], "1") - self.assertEquals(describe.extra['internet_access'], "false") - self.assertEquals(describe.extra['mac_address'], "02:01:96:d7:60:e0") - self.assertEquals(describe.extra['dhcp_active'], "true") - self.assertEquals(describe.extra['gateway_ip'], None) - self.assertEquals(describe.extra['ips'], ['10.10.38.12']) + self.assertEqual(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3") + self.assertEqual(describe.name, None) + self.assertEqual(describe.state, 0) + self.assertEqual(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee") + self.assertEqual(describe.extra['datacenter_version'], "6") + self.assertEqual(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408") + self.assertEqual(describe.extra['lan_id'], "1") + self.assertEqual(describe.extra['internet_access'], "false") + self.assertEqual(describe.extra['mac_address'], "02:01:96:d7:60:e0") + self.assertEqual(describe.extra['dhcp_active'], "true") + self.assertEqual(describe.extra['gateway_ip'], None) + self.assertEqual(describe.extra['ips'], ['10.10.38.12']) def test_list_sizes(self): sizes = self.driver.list_sizes() diff --git a/libcloud/test/dns/test_rackspace.py b/libcloud/test/dns/test_rackspace.py index 4a211a3f85..907fa8d3d6 100644 --- a/libcloud/test/dns/test_rackspace.py +++ b/libcloud/test/dns/test_rackspace.py @@ -71,7 +71,7 @@ def test_gets_auth_2_0_endpoint(self): driver = self.klass(*DNS_PARAMS_RACKSPACE, **kwargs) driver.connection._populate_hosts_and_request_paths() - self.assertEquals(self.endpoint_url, driver.connection.get_endpoint()) + self.assertEqual(self.endpoint_url, driver.connection.get_endpoint()) def test_list_record_types(self): record_types = self.driver.list_record_types() diff --git a/libcloud/test/loadbalancer/test_gogrid.py b/libcloud/test/loadbalancer/test_gogrid.py index 3e32a37f19..5ad3e8015a 100644 --- a/libcloud/test/loadbalancer/test_gogrid.py +++ b/libcloud/test/loadbalancer/test_gogrid.py @@ -119,7 +119,7 @@ def test_balancer_list_members(self): self.assertEqual(len(members2), 3) self.assertEqual(expected_members, set(["%s:%s" % (member.ip, member.port) for member in members1])) - self.assertEquals(members1[0].balancer, balancer) + self.assertEqual(members1[0].balancer, balancer) def test_balancer_attach_compute_node(self): balancer = LoadBalancer(23530, None, None, None, None, self.driver) diff --git a/libcloud/test/storage/test_azure_blobs.py b/libcloud/test/storage/test_azure_blobs.py index 87f1238ee4..f9ee585fc6 100644 --- a/libcloud/test/storage/test_azure_blobs.py +++ b/libcloud/test/storage/test_azure_blobs.py @@ -945,9 +945,9 @@ def test_storage_driver_host(self): host2 = driver2.connection.host host3 = driver3.connection.host - self.assertEquals(host1, 'fakeaccount1.blob.core.windows.net') - self.assertEquals(host2, 'fakeaccount2.blob.core.windows.net') - self.assertEquals(host3, 'test.foo.bar.com') + self.assertEqual(host1, 'fakeaccount1.blob.core.windows.net') + self.assertEqual(host2, 'fakeaccount2.blob.core.windows.net') + self.assertEqual(host3, 'test.foo.bar.com') if __name__ == '__main__': From 39d9f6081642d6a5321f34c6c5363a621fd6f711 Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Sat, 13 Sep 2014 17:43:22 +0100 Subject: [PATCH 236/315] Allow passing `credential_file` to be used in the GCEConnection Closes #359 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 856c334bc9..7255755696 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -551,7 +551,7 @@ class GCENodeDriver(NodeDriver): } def __init__(self, user_id, key, datacenter=None, project=None, - auth_type=None, scopes=None, **kwargs): + auth_type=None, scopes=None, credential_file=None, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. @@ -578,10 +578,16 @@ def __init__(self, user_id, key, datacenter=None, project=None, :keyword scopes: List of authorization URLs. Default is empty and grants read/write to Compute, Storage, DNS. :type scopes: ``list`` + + :keyword credential_file: Path to file for caching authentication + information used by GCEConnection + :type credential_file: ``str`` + """ self.auth_type = auth_type self.project = project self.scopes = scopes + self.credential_file = credential_file if not self.project: raise ValueError('Project name must be specified using ' '"project" keyword.') @@ -2749,7 +2755,8 @@ def ex_copy_image(self, name, url, description=None): def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, 'project': self.project, - 'scopes': self.scopes} + 'scopes': self.scopes, + 'credential_file': self.credential_file} def _catch_error(self, ignore_errors=False): """ From f01509da53fb630cc44b979fcc46f148348365be Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 20 Oct 2014 15:19:16 +0800 Subject: [PATCH 237/315] Bump versions for 0.16.0 release. --- CHANGES.rst | 4 ++-- docs/upgrade_notes.rst | 4 ++-- libcloud/__init__.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e7b954237b..821fd58fbe 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes with Apache Libcloud in development -------------------------------------------- +Changes with Apache Libcloud 0.16.0 +----------------------------------- General ~~~~~~~ diff --git a/docs/upgrade_notes.rst b/docs/upgrade_notes.rst index 656e3e1af2..dd6c52bb9e 100644 --- a/docs/upgrade_notes.rst +++ b/docs/upgrade_notes.rst @@ -5,8 +5,8 @@ This page describes how to upgrade from a previous version to a new version which contains backward incompatible or semi-incompatible changes and how to preserve the old behavior when this is possible. -Libcloud in development ------------------------ +Libcloud 0.16.0 +--------------- Changes in the OpenStack authentication and service catalog classes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 3ce79f0b20..9a1c180d2a 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.15.1' +__version__ = '0.16.0' import os From 20d977075117f05a0d8cd8ceb91c4dfcd93a7766 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Sat, 18 Oct 2014 02:17:00 +0000 Subject: [PATCH 238/315] LIBCLOUD-625: Allow for internal GCE authorization with metadata service --- demos/secrets.py-dist | 2 +- docs/compute/drivers/gce.rst | 21 +++- .../examples/compute/gce/gce_internal_auth.py | 9 ++ libcloud/common/google.py | 108 ++++++++++++++---- libcloud/compute/drivers/gce.py | 24 +++- libcloud/test/common/test_google.py | 8 ++ libcloud/test/secrets.py-dist | 2 +- libcloud/utils/connection.py | 7 +- 8 files changed, 148 insertions(+), 33 deletions(-) create mode 100644 docs/examples/compute/gce/gce_internal_auth.py diff --git a/demos/secrets.py-dist b/demos/secrets.py-dist index 82c3de168c..981286da0e 100644 --- a/demos/secrets.py-dist +++ b/demos/secrets.py-dist @@ -22,7 +22,7 @@ DREAMHOST_PARAMS = ('key',) EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) -GCE_PARAMS = ('email_address', 'key') # Service Account Authentication +GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key') # Service Account Authentication #GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication GCE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') diff --git a/docs/compute/drivers/gce.rst b/docs/compute/drivers/gce.rst index 6e91160019..9230ca9165 100644 --- a/docs/compute/drivers/gce.rst +++ b/docs/compute/drivers/gce.rst @@ -20,8 +20,8 @@ Google Compute Engine features: Connecting to Google Compute Engine ----------------------------------- -Libcloud supports two different methods for authenticating to Compute Engine: -`Service Account`_ and `Installed Application`_ +Libcloud supports three different methods for authenticating: +`Service Account`_ and `Installed Application`_ and `Internal Authentication_` Which one should I use? @@ -34,6 +34,11 @@ Which one should I use? example, a desktop application for managing VMs that would be used by many different people with different Google accounts. +* If you are running your code on an instance inside Google Compute Engine, + the GCE driver will consult the internal metadata service to obtain an + authorization token. The only parameter required for this type of + authorization is your Project ID. + Once you have set up the authentication as described below, you pass the authentication information to the driver as described in `Examples`_ @@ -69,6 +74,13 @@ To set up Installed Account authentication: 7. You will also need your "Project ID" which can be found by clicking on the "Overview" link on the left sidebar. +Internal Authentication +~~~~~~~~~~~~~~~~~~~~~~~ + +To use GCE's internal metadata service to authenticate, simply specify +your Project ID and let the driver handle the rest. See the `Examples`_ +below for a sample. + Accessing Google Cloud services from your Libcloud nodes -------------------------------------------------------- In order for nodes created with libcloud to be able to access or manage other @@ -103,6 +115,11 @@ https://github.com/apache/libcloud/blob/trunk/demos/gce_demo.py .. literalinclude:: /examples/compute/gce/gce_service_account_scopes.py +5. Using GCE Internal Authorization +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/gce/gce_internal_auth.py + API Docs -------- diff --git a/docs/examples/compute/gce/gce_internal_auth.py b/docs/examples/compute/gce/gce_internal_auth.py new file mode 100644 index 0000000000..d3aa6acff7 --- /dev/null +++ b/docs/examples/compute/gce/gce_internal_auth.py @@ -0,0 +1,9 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +# This example assumes you are running on an instance within Google +# Compute Engine. As such, the only parameter you need to specify is +# the Project ID. The GCE driver will the consult GCE's internal +# metadata service for an authorization token. +ComputeEngine = get_driver(Provider.GCE) +driver = ComputeEngine(project='your_project_id') diff --git a/libcloud/common/google.py b/libcloud/common/google.py index 52692e639e..b805ee1591 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -78,6 +78,7 @@ import socket import sys +from libcloud.utils.connection import get_response_object from libcloud.utils.py3 import httplib, urlencode, urlparse, PY3 from libcloud.common.base import (ConnectionUserAndKey, JsonResponse, PollingConnection) @@ -99,6 +100,23 @@ TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ' +def _is_gce(): + http_code, http_reason, body = _get_gce_metadata() + if http_code == httplib.OK and body: + return True + return False + + +def _get_gce_metadata(path=''): + try: + url = "http://metadata/computeMetadata/v1/" + path.lstrip('/') + headers = {'Metadata-Flavor': 'Google'} + response = get_response_object(url, headers) + return response.status, "", response.body + except Exception as e: + return -1, str(e), None + + class GoogleAuthError(LibcloudError): """Generic Error class for various authentication errors.""" def __init__(self, value): @@ -123,7 +141,16 @@ class JsonParseError(GoogleBaseError): class ResourceNotFoundError(GoogleBaseError): - pass + def __init__(self, value, http_code, code, driver=None): + self.code = code + if isinstance(value, dict) and 'message' in value and \ + value['message'].count('/') == 1 and \ + value['message'].count('projects/') == 1: + value['message'] = value['message'] + ". A missing project " \ + "error may be an authentication issue. " \ + "Please ensure your auth credentials match " \ + "your project. " + super(GoogleBaseError, self).__init__(value, http_code, driver) class QuotaExceededError(GoogleBaseError): @@ -255,7 +282,7 @@ class GoogleBaseAuthConnection(ConnectionUserAndKey): host = 'accounts.google.com' auth_path = '/o/oauth2/auth' - def __init__(self, user_id, key, scopes=None, + def __init__(self, user_id, key=None, scopes=None, redirect_uri='urn:ietf:wg:oauth:2.0:oob', login_hint=None, **kwargs): """ @@ -318,6 +345,21 @@ def _token_request(self, request_body): token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT) return token_info + def refresh_token(self, token_info): + """ + Refresh the current token. + + Fetch an updated refresh token from internal metadata service. + + :param token_info: Dictionary containing token information. + (Not used, but here for compatibility) + :type token_info: ``dict`` + + :return: A dictionary containing updated token information. + :rtype: ``dict`` + """ + return self.get_new_token() + class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection): """Authentication connection for "Installed Application" authentication.""" @@ -450,21 +492,32 @@ def get_new_token(self): return self._token_request(request) - def refresh_token(self, token_info): - """ - Refresh the current token. - - Service Account authentication doesn't supply a "refresh token" so - this simply gets a new token using the email address/key. - :param token_info: Dictionary containing token information. - (Not used, but here for compatibility) - :type token_info: ``dict`` +class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection): + """Authentication class for self-authentication when used with a GCE + istance that supports serviceAccounts. + """ + def get_new_token(self): + """ + Get a new token from the internal metadata service. - :return: A dictionary containing updated token information. + :return: Dictionary containing token information :rtype: ``dict`` """ - return self.get_new_token() + path = '/instance/service-accounts/default/token' + http_code, http_reason, token_info = _get_gce_metadata(path) + if http_code == httplib.NOT_FOUND: + raise ValueError("Service Accounts are not enabled for this " + "GCE instance.") + if http_code != httplib.OK: + raise ValueError("Internal GCE Authorization failed: " + "'%s'" % str(http_reason)) + token_info = json.loads(token_info) + if 'expires_in' in token_info: + expire_time = self._now() + datetime.timedelta( + seconds=token_info['expires_in']) + token_info['expire_time'] = expire_time.strftime(TIMESTAMP_FORMAT) + return token_info class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection): @@ -475,7 +528,7 @@ class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection): poll_interval = 2.0 timeout = 180 - def __init__(self, user_id, key, auth_type=None, + def __init__(self, user_id, key=None, auth_type=None, credential_file=None, scopes=None, **kwargs): """ Determine authentication type, set up appropriate authentication @@ -490,10 +543,16 @@ def __init__(self, user_id, key, auth_type=None, authentication. :type key: ``str`` - :keyword auth_type: Accepted values are "SA" or "IA" - ("Service Account" or "Installed Application"). + :keyword auth_type: Accepted values are "SA" or "IA" or "GCE" + ("Service Account" or "Installed Application" or + "GCE" if libcloud is being used on a GCE instance + with service account enabled). + If not supplied, auth_type will be guessed based + on value of user_id or if the code is being + executed in a GCE instance.). If not supplied, auth_type will be guessed based - on value of user_id. + on value of user_id or if the code is running + on a GCE instance. :type auth_type: ``str`` :keyword credential_file: Path to file for caching authentication @@ -507,10 +566,11 @@ def __init__(self, user_id, key, auth_type=None, self.credential_file = credential_file or '~/.gce_libcloud_auth' if auth_type is None: - # Try to guess. Service accounts use an email address - # as the user id. + # Try to guess. if '@' in user_id: auth_type = 'SA' + elif _is_gce(): + auth_type = 'GCE' else: auth_type = 'IA' @@ -525,14 +585,20 @@ def __init__(self, user_id, key, auth_type=None, ] self.token_info = self._get_token_info_from_file() - if auth_type == 'SA': + if auth_type == 'GCE': + self.auth_conn = GoogleGCEServiceAcctAuthConnection( + user_id, self.scopes, **kwargs) + elif auth_type == 'SA': + if '@' not in user_id: + raise GoogleAuthError('Service Account auth requires a ' + 'valid email address') self.auth_conn = GoogleServiceAcctAuthConnection( user_id, key, self.scopes, **kwargs) elif auth_type == 'IA': self.auth_conn = GoogleInstalledAppAuthConnection( user_id, key, self.scopes, **kwargs) else: - raise GoogleAuthError('auth_type should be \'SA\' or \'IA\'') + raise GoogleAuthError('Invalid auth_type: %s' % str(auth_type)) if self.token_info is None: self.token_info = self.auth_conn.get_new_token() diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 856c334bc9..648ae3e151 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -550,8 +550,8 @@ class GCENodeDriver(NodeDriver): "userinfo-email": "userinfo.email" } - def __init__(self, user_id, key, datacenter=None, project=None, - auth_type=None, scopes=None, **kwargs): + def __init__(self, user_id, key=None, datacenter=None, project=None, + auth_type=None, scopes=None, credential_file=None, **kwargs): """ :param user_id: The email address (for service accounts) or Client ID (for installed apps) to be used for authentication. @@ -569,19 +569,30 @@ def __init__(self, user_id, key, datacenter=None, project=None, :keyword project: Your GCE project name. (required) :type project: ``str`` - :keyword auth_type: Accepted values are "SA" or "IA" - ("Service Account" or "Installed Application"). + :keyword auth_type: Accepted values are "SA" or "IA" or "GCE" + ("Service Account" or "Installed Application" or + "GCE" if libcloud is being used on a GCE instance + with service account enabled). If not supplied, auth_type will be guessed based - on value of user_id. + on value of user_id or if the code is being + executed in a GCE instance. :type auth_type: ``str`` :keyword scopes: List of authorization URLs. Default is empty and grants read/write to Compute, Storage, DNS. :type scopes: ``list`` + + :keyword credential_file: Path to file for caching authentication + information used by GCEConnection. + :type credential_file: ``str`` """ + self.auth_type = auth_type self.project = project self.scopes = scopes + self.credential_file = credential_file or \ + '~/.gce_libcloud_auth' + '.' + self.project + if not self.project: raise ValueError('Project name must be specified using ' '"project" keyword.') @@ -2749,7 +2760,8 @@ def ex_copy_image(self, name, url, description=None): def _ex_connection_class_kwargs(self): return {'auth_type': self.auth_type, 'project': self.project, - 'scopes': self.scopes} + 'scopes': self.scopes, + 'credential_file': self.credential_file} def _catch_error(self, ignore_errors=False): """ diff --git a/libcloud/test/common/test_google.py b/libcloud/test/common/test_google.py index 2e9c701dd8..d851b06a1f 100644 --- a/libcloud/test/common/test_google.py +++ b/libcloud/test/common/test_google.py @@ -31,6 +31,7 @@ GoogleBaseAuthConnection, GoogleInstalledAppAuthConnection, GoogleServiceAcctAuthConnection, + GoogleGCEServiceAcctAuthConnection, GoogleBaseConnection) from libcloud.test.secrets import GCE_PARAMS @@ -125,6 +126,8 @@ class GoogleBaseConnectionTest(LibcloudTestCase): GoogleInstalledAppAuthConnection.get_code = lambda x: '1234' GoogleServiceAcctAuthConnection.get_new_token = \ lambda x: x._token_request({}) + GoogleGCEServiceAcctAuthConnection.get_new_token = \ + lambda x: x._token_request({}) GoogleBaseConnection._now = lambda x: datetime.datetime(2013, 6, 26, 19, 0, 0) @@ -152,6 +155,11 @@ def test_auth_type(self): self.assertTrue(isinstance(conn2.auth_conn, GoogleInstalledAppAuthConnection)) + kwargs['auth_type'] = 'GCE' + conn3 = GoogleBaseConnection(*GCE_PARAMS, **kwargs) + self.assertTrue(isinstance(conn3.auth_conn, + GoogleGCEServiceAcctAuthConnection)) + def test_add_default_headers(self): old_headers = {} new_expected_headers = {'Content-Type': 'application/json', diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index b3525e88cc..940f455c7e 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -22,7 +22,7 @@ DREAMHOST_PARAMS = ('key',) EC2_PARAMS = ('access_id', 'secret') ECP_PARAMS = ('user_name', 'password') GANDI_PARAMS = ('user',) -GCE_PARAMS = ('email_address', 'key') # Service Account Authentication +GCE_PARAMS = ('email@developer.gserviceaccount.com', 'key') # Service Account Authentication # GCE_PARAMS = ('client_id', 'client_secret') # Installed App Authentication GCE_KEYWORD_PARAMS = {'project': 'project_name'} HOSTINGCOM_PARAMS = ('user', 'secret') diff --git a/libcloud/utils/connection.py b/libcloud/utils/connection.py index db381a31fd..f507ad3af9 100644 --- a/libcloud/utils/connection.py +++ b/libcloud/utils/connection.py @@ -21,7 +21,7 @@ ] -def get_response_object(url): +def get_response_object(url, headers=None): """ Utility function which uses libcloud's connection class to issue an HTTP request. @@ -29,6 +29,9 @@ def get_response_object(url): :param url: URL to send the request to. :type url: ``str`` + :param headers: Custom request headers. + :type headers: ``dict`` + :return: Response object. :rtype: :class:`Response`. """ @@ -38,5 +41,5 @@ def get_response_object(url): con = Connection(secure=secure, host=parsed_url.netloc) response = con.request(method='GET', action=parsed_url.path, - params=parsed_qs) + params=parsed_qs, headers=headers) return response From f7ef3c430aa41ba0e53dde014bf3ac410c9b0e8c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Sun, 19 Oct 2014 10:33:38 +0800 Subject: [PATCH 239/315] Allow users to pass "headers" and "method" argument to the get_response_object utility method. --- libcloud/utils/connection.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/libcloud/utils/connection.py b/libcloud/utils/connection.py index f507ad3af9..1591c6442f 100644 --- a/libcloud/utils/connection.py +++ b/libcloud/utils/connection.py @@ -21,7 +21,7 @@ ] -def get_response_object(url, headers=None): +def get_response_object(url, method='GET', headers=None): """ Utility function which uses libcloud's connection class to issue an HTTP request. @@ -29,7 +29,10 @@ def get_response_object(url, headers=None): :param url: URL to send the request to. :type url: ``str`` - :param headers: Custom request headers. + :param method: HTTP method. + :type method: ``str`` + + :param headers: Optional request headers. :type headers: ``dict`` :return: Response object. @@ -39,7 +42,10 @@ def get_response_object(url, headers=None): parsed_qs = parse_qs(parsed_url.query) secure = parsed_url.scheme == 'https' + headers = headers or {} + method = method.upper() + con = Connection(secure=secure, host=parsed_url.netloc) - response = con.request(method='GET', action=parsed_url.path, - params=parsed_qs, headers=headers) + response = con.request(action=parsed_url.path, params=parsed_qs, + headers=headers, method=method) return response From 52a58433bb3fca789b16275d050778ed60af6289 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Thu, 16 Oct 2014 10:21:35 -0700 Subject: [PATCH 240/315] Use assertEqual instead of the deprecated assertEquals Closes #375 Signed-off-by: Tomaz Muraus --- libcloud/test/compute/test_gce.py | 4 +- libcloud/test/compute/test_nephoscale.py | 2 +- libcloud/test/compute/test_profitbricks.py | 100 ++++++++++----------- libcloud/test/dns/test_rackspace.py | 2 +- libcloud/test/loadbalancer/test_gogrid.py | 2 +- libcloud/test/storage/test_azure_blobs.py | 6 +- 6 files changed, 58 insertions(+), 58 deletions(-) diff --git a/libcloud/test/compute/test_gce.py b/libcloud/test/compute/test_gce.py index 97bb61f5ed..2b90ec69e7 100644 --- a/libcloud/test/compute/test_gce.py +++ b/libcloud/test/compute/test_gce.py @@ -247,8 +247,8 @@ def test_ex_create_image(self): volume = self.driver.ex_get_volume('lcdisk') image = self.driver.ex_create_image('coreos', volume) self.assertTrue(isinstance(image, GCENodeImage)) - self.assertEquals(image.name, 'coreos') - self.assertEquals(image.extra['description'], 'CoreOS test image') + self.assertEqual(image.name, 'coreos') + self.assertEqual(image.extra['description'], 'CoreOS test image') def test_ex_create_firewall(self): firewall_name = 'lcfirewall' diff --git a/libcloud/test/compute/test_nephoscale.py b/libcloud/test/compute/test_nephoscale.py index bb30f68a08..0a0af0add8 100644 --- a/libcloud/test/compute/test_nephoscale.py +++ b/libcloud/test/compute/test_nephoscale.py @@ -74,7 +74,7 @@ def test_list_ssh_keys(self): def test_list_password_keys(self): password_keys = self.driver.ex_list_keypairs(password=True) self.assertEqual(len(password_keys), 1) - self.assertEquals(password_keys[0].password, '23d493j5') + self.assertEqual(password_keys[0].password, '23d493j5') def test_reboot_node(self): node = self.driver.list_nodes()[0] diff --git a/libcloud/test/compute/test_profitbricks.py b/libcloud/test/compute/test_profitbricks.py index ab7ab06c1f..0b7567d912 100644 --- a/libcloud/test/compute/test_profitbricks.py +++ b/libcloud/test/compute/test_profitbricks.py @@ -40,18 +40,18 @@ def test_list_nodes(self): self.assertEqual(len(nodes), 3) node = nodes[0] - self.assertEquals(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246") - self.assertEquals(node.name, "server001") - self.assertEquals(node.state, 0) - self.assertEquals(node.public_ips, ['162.254.25.197']) - self.assertEquals(node.private_ips, ['10.10.108.12', '10.13.198.11']) - self.assertEquals(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899") - self.assertEquals(node.extra['datacenter_version'], "5") - self.assertEquals(node.extra['provisioning_state'], 0) - self.assertEquals(node.extra['creation_time'], "2014-07-14T20:52:20.839Z") - self.assertEquals(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z") - self.assertEquals(node.extra['os_type'], "LINUX") - self.assertEquals(node.extra['availability_zone'], "ZONE_1") + self.assertEqual(node.id, "c8e57d7b-e731-46ad-a913-1828c0562246") + self.assertEqual(node.name, "server001") + self.assertEqual(node.state, 0) + self.assertEqual(node.public_ips, ['162.254.25.197']) + self.assertEqual(node.private_ips, ['10.10.108.12', '10.13.198.11']) + self.assertEqual(node.extra['datacenter_id'], "e1e8ec0d-b47f-4d39-a91b-6e885483c899") + self.assertEqual(node.extra['datacenter_version'], "5") + self.assertEqual(node.extra['provisioning_state'], 0) + self.assertEqual(node.extra['creation_time'], "2014-07-14T20:52:20.839Z") + self.assertEqual(node.extra['last_modification_time'], "2014-07-14T22:11:09.324Z") + self.assertEqual(node.extra['os_type'], "LINUX") + self.assertEqual(node.extra['availability_zone'], "ZONE_1") def test_ex_describe_node(self): image = type('NodeImage', (object,), @@ -68,7 +68,7 @@ def test_ex_describe_node(self): image=image, size=size) - self.assertEquals(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750") + self.assertEqual(node.id, "7b18b85f-cc93-4c2d-abcc-5ce732d35750") def test_reboot_node(self): node = type('Node', (object,), @@ -117,16 +117,16 @@ def test_list_volumes(self): self.assertEqual(len(volumes), 4) volume = volumes[0] - self.assertEquals(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232") - self.assertEquals(volume.name, "storage001") - self.assertEquals(volume.size, 50) - self.assertEquals(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29") - self.assertEquals(volume.extra['provisioning_state'], 0) - self.assertEquals(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z") - self.assertEquals(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z") - self.assertEquals(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9") - self.assertEquals(volume.extra['image_name'], "CentOS-6-server-2014-07-01") - self.assertEquals(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292") + self.assertEqual(volume.id, "453582cf-8d54-4ec8-bc0b-f9962f7fd232") + self.assertEqual(volume.name, "storage001") + self.assertEqual(volume.size, 50) + self.assertEqual(volume.extra['server_id'], "ebee7d83-912b-42f1-9b62-b953351a7e29") + self.assertEqual(volume.extra['provisioning_state'], 0) + self.assertEqual(volume.extra['creation_time'], "2014-07-15T03:19:38.252Z") + self.assertEqual(volume.extra['last_modification_time'], "2014-07-15T03:28:58.724Z") + self.assertEqual(volume.extra['image_id'], "d2f627c4-0289-11e4-9f63-52540066fee9") + self.assertEqual(volume.extra['image_name'], "CentOS-6-server-2014-07-01") + self.assertEqual(volume.extra['datacenter_id'], "06eac419-c2b3-4761-aeb9-10efdd2cf292") def test_create_volume(self): datacenter = type('Datacenter', (object,), @@ -286,9 +286,9 @@ def test_ex_list_datacenters(self): self.assertEqual(len(datacenters), 2) dc1 = datacenters[0] - self.assertEquals(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede") - self.assertEquals(dc1.name, "StackPointCloud") - self.assertEquals(dc1.version, "1") + self.assertEqual(dc1.id, "a3e6f83a-8982-4d6a-aebc-60baf5755ede") + self.assertEqual(dc1.name, "StackPointCloud") + self.assertEqual(dc1.version, "1") def test_ex_rename_datacenter(self): datacenter = type('Datacenter', (object,), @@ -306,7 +306,7 @@ def test_list_locations(self): locationNamesResult = sorted(list(a.name for a in locations)) locationNamesExpected = ['de/fkb', 'de/fra', 'us/las'] - self.assertEquals(locationNamesResult, locationNamesExpected) + self.assertEqual(locationNamesResult, locationNamesExpected) ''' Availability Zone Tests ''' @@ -318,7 +318,7 @@ def test_ex_list_availability_zones(self): zoneNamesResult = sorted(list(a.name for a in zones)) zoneNamesExpected = ['AUTO', 'ZONE_1', 'ZONE_2'] - self.assertEquals(zoneNamesResult, zoneNamesExpected) + self.assertEqual(zoneNamesResult, zoneNamesExpected) ''' Interface Tests ''' @@ -329,16 +329,16 @@ def test_ex_list_interfaces(self): self.assertEqual(len(interfaces), 3) interface = interfaces[0] - self.assertEquals(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727") - self.assertEquals(interface.name, "StackPointCloud") - self.assertEquals(interface.state, 0) - self.assertEquals(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116") - self.assertEquals(interface.extra['lan_id'], '3') - self.assertEquals(interface.extra['internet_access'], 'false') - self.assertEquals(interface.extra['mac_address'], "02:01:40:47:90:04") - self.assertEquals(interface.extra['dhcp_active'], "true") - self.assertEquals(interface.extra['gateway_ip'], None) - self.assertEquals(interface.extra['ips'], ['10.14.96.11', '162.254.26.14', '162.254.26.15']) + self.assertEqual(interface.id, "6b38a4f3-b851-4614-9e3a-5ddff4727727") + self.assertEqual(interface.name, "StackPointCloud") + self.assertEqual(interface.state, 0) + self.assertEqual(interface.extra['server_id'], "234f0cf9-1efc-4ade-b829-036456584116") + self.assertEqual(interface.extra['lan_id'], '3') + self.assertEqual(interface.extra['internet_access'], 'false') + self.assertEqual(interface.extra['mac_address'], "02:01:40:47:90:04") + self.assertEqual(interface.extra['dhcp_active'], "true") + self.assertEqual(interface.extra['gateway_ip'], None) + self.assertEqual(interface.extra['ips'], ['10.14.96.11', '162.254.26.14', '162.254.26.15']) def test_ex_create_network_interface(self): node = type('Node', (object,), @@ -374,18 +374,18 @@ def test_ex_describe_network_interface(self): describe = self.driver.ex_describe_network_interface(network_interface=network_interface) - self.assertEquals(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3") - self.assertEquals(describe.name, None) - self.assertEquals(describe.state, 0) - self.assertEquals(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee") - self.assertEquals(describe.extra['datacenter_version'], "6") - self.assertEquals(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408") - self.assertEquals(describe.extra['lan_id'], "1") - self.assertEquals(describe.extra['internet_access'], "false") - self.assertEquals(describe.extra['mac_address'], "02:01:96:d7:60:e0") - self.assertEquals(describe.extra['dhcp_active'], "true") - self.assertEquals(describe.extra['gateway_ip'], None) - self.assertEquals(describe.extra['ips'], ['10.10.38.12']) + self.assertEqual(describe.id, "f1c7a244-2fa6-44ee-8fb6-871f337683a3") + self.assertEqual(describe.name, None) + self.assertEqual(describe.state, 0) + self.assertEqual(describe.extra['datacenter_id'], "a3a2e730-0dc3-47e6-bac6-4c056d5e2aee") + self.assertEqual(describe.extra['datacenter_version'], "6") + self.assertEqual(describe.extra['server_id'], "c09f4f31-336c-4ad2-9ec7-591778513408") + self.assertEqual(describe.extra['lan_id'], "1") + self.assertEqual(describe.extra['internet_access'], "false") + self.assertEqual(describe.extra['mac_address'], "02:01:96:d7:60:e0") + self.assertEqual(describe.extra['dhcp_active'], "true") + self.assertEqual(describe.extra['gateway_ip'], None) + self.assertEqual(describe.extra['ips'], ['10.10.38.12']) def test_list_sizes(self): sizes = self.driver.list_sizes() diff --git a/libcloud/test/dns/test_rackspace.py b/libcloud/test/dns/test_rackspace.py index 4a211a3f85..907fa8d3d6 100644 --- a/libcloud/test/dns/test_rackspace.py +++ b/libcloud/test/dns/test_rackspace.py @@ -71,7 +71,7 @@ def test_gets_auth_2_0_endpoint(self): driver = self.klass(*DNS_PARAMS_RACKSPACE, **kwargs) driver.connection._populate_hosts_and_request_paths() - self.assertEquals(self.endpoint_url, driver.connection.get_endpoint()) + self.assertEqual(self.endpoint_url, driver.connection.get_endpoint()) def test_list_record_types(self): record_types = self.driver.list_record_types() diff --git a/libcloud/test/loadbalancer/test_gogrid.py b/libcloud/test/loadbalancer/test_gogrid.py index 3e32a37f19..5ad3e8015a 100644 --- a/libcloud/test/loadbalancer/test_gogrid.py +++ b/libcloud/test/loadbalancer/test_gogrid.py @@ -119,7 +119,7 @@ def test_balancer_list_members(self): self.assertEqual(len(members2), 3) self.assertEqual(expected_members, set(["%s:%s" % (member.ip, member.port) for member in members1])) - self.assertEquals(members1[0].balancer, balancer) + self.assertEqual(members1[0].balancer, balancer) def test_balancer_attach_compute_node(self): balancer = LoadBalancer(23530, None, None, None, None, self.driver) diff --git a/libcloud/test/storage/test_azure_blobs.py b/libcloud/test/storage/test_azure_blobs.py index 87f1238ee4..f9ee585fc6 100644 --- a/libcloud/test/storage/test_azure_blobs.py +++ b/libcloud/test/storage/test_azure_blobs.py @@ -945,9 +945,9 @@ def test_storage_driver_host(self): host2 = driver2.connection.host host3 = driver3.connection.host - self.assertEquals(host1, 'fakeaccount1.blob.core.windows.net') - self.assertEquals(host2, 'fakeaccount2.blob.core.windows.net') - self.assertEquals(host3, 'test.foo.bar.com') + self.assertEqual(host1, 'fakeaccount1.blob.core.windows.net') + self.assertEqual(host2, 'fakeaccount2.blob.core.windows.net') + self.assertEqual(host3, 'test.foo.bar.com') if __name__ == '__main__': From 3a9dcb80247ea0d3262f8584ab1137a3ee814236 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Mon, 20 Oct 2014 15:19:16 +0800 Subject: [PATCH 241/315] Bump versions for 0.16.0 release. --- CHANGES.rst | 4 ++-- docs/upgrade_notes.rst | 4 ++-- libcloud/__init__.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e7b954237b..821fd58fbe 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,8 +1,8 @@ Changelog ========= -Changes with Apache Libcloud in development -------------------------------------------- +Changes with Apache Libcloud 0.16.0 +----------------------------------- General ~~~~~~~ diff --git a/docs/upgrade_notes.rst b/docs/upgrade_notes.rst index 656e3e1af2..dd6c52bb9e 100644 --- a/docs/upgrade_notes.rst +++ b/docs/upgrade_notes.rst @@ -5,8 +5,8 @@ This page describes how to upgrade from a previous version to a new version which contains backward incompatible or semi-incompatible changes and how to preserve the old behavior when this is possible. -Libcloud in development ------------------------ +Libcloud 0.16.0 +--------------- Changes in the OpenStack authentication and service catalog classes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/libcloud/__init__.py b/libcloud/__init__.py index 3ce79f0b20..9a1c180d2a 100644 --- a/libcloud/__init__.py +++ b/libcloud/__init__.py @@ -20,7 +20,7 @@ """ __all__ = ['__version__', 'enable_debug'] -__version__ = '0.15.1' +__version__ = '0.16.0' import os From 930d05a43136c6f0aed0eb082fafa9e1650d651c Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 22 Oct 2014 15:06:59 +0800 Subject: [PATCH 242/315] Update CHANGES. --- CHANGES.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 821fd58fbe..81072d2dae 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -156,6 +156,10 @@ Compute (LIBCLOUD-578, GITHUB-373) [Eric Johnson] +- Update GCE driver to allow for authentication with internal metadata service. + (LIBCLOUD-625, LIBCLOUD-276, GITHUB-276) + [Eric Johnson] + Storage ~~~~~~~ From 341ceb11e777942a3a343bf49a276142a67e4a2d Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Mon, 20 Oct 2014 12:12:45 +0200 Subject: [PATCH 243/315] CLOUDSTACK: Minor fix for start-vm option Closes #377 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/cloudstack.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index a9d716938c..fa10fb1e02 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -953,7 +953,9 @@ def create_node(self, **kwargs): :type ex_ip_address: ``str`` :keyword ex_start_vm: Boolean to specify to start VM after creation - Defaults to True + Default Cloudstack behaviour is to start a VM, + if not specified. + :type ex_start_vm: ``bool`` :rtype: :class:`.CloudStackNode` @@ -1027,7 +1029,7 @@ def _create_args_to_params(self, node, **kwargs): if ex_ip_address: server_params['ipaddress'] = ex_ip_address - if ex_start_vm is False: + if ex_start_vm is not None: server_params['startvm'] = ex_start_vm return server_params From 532ce827ce8f4d67080c0e39bfe4950f24e9f8e5 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 19 Oct 2014 22:44:01 -0700 Subject: [PATCH 244/315] Support CONFLICT response for Google Cloud's API If the status code for a call to the Google Cloud's API is 409 (CONFLICT), and the code of the error is 'alreadyExists', consider the resource as already existing and create the correct error. Signed-off-by: Tomaz Muraus --- libcloud/common/google.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/libcloud/common/google.py b/libcloud/common/google.py index b805ee1591..069b8ddcce 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -201,7 +201,7 @@ def _get_error(self, body): code = err.get('code') message = err.get('message') else: - code = None + code = err.get('reason', None) message = body.get('error_description', err) return (code, message) @@ -226,14 +226,14 @@ def parse_body(self): body = self.body json_error = True - if self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]: + if self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]: if json_error: raise JsonParseError(body, self.status, None) elif 'error' in body: (code, message) = self._get_error(body) if code == 'QUOTA_EXCEEDED': raise QuotaExceededError(message, self.status, code) - elif code == 'RESOURCE_ALREADY_EXISTS': + elif (code == 'RESOURCE_ALREADY_EXISTS' or code == 'alreadyExists'): raise ResourceExistsError(message, self.status, code) elif code.startswith('RESOURCE_IN_USE'): raise ResourceInUseError(message, self.status, code) From b21041c9f985d05c0c51100f8710e2891bc54c39 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 19 Oct 2014 22:46:10 -0700 Subject: [PATCH 245/315] The default ttl for Google Cloud's API is 0. The default can't be `None' since we're calling `int' on the value of ttl. Signed-off-by: Tomaz Muraus --- libcloud/dns/drivers/google.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/dns/drivers/google.py b/libcloud/dns/drivers/google.py index ca3e5f06da..15b58591f1 100644 --- a/libcloud/dns/drivers/google.py +++ b/libcloud/dns/drivers/google.py @@ -208,7 +208,7 @@ def create_record(self, name, zone, type, data, extra=None): :rtype: :class:`Record` """ - ttl = data.get('ttl', None) + ttl = data.get('ttl', 0) rrdatas = data.get('rrdatas', []) data = { From f1c42527b2d8994035832b84da8c782ea91c9484 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Sun, 19 Oct 2014 22:47:40 -0700 Subject: [PATCH 246/315] The 3rd argument for GoogleDNSConnection is secure, not scope. Signed-off-by: Tomaz Muraus --- libcloud/dns/drivers/google.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/dns/drivers/google.py b/libcloud/dns/drivers/google.py index 15b58591f1..01721eb389 100644 --- a/libcloud/dns/drivers/google.py +++ b/libcloud/dns/drivers/google.py @@ -70,7 +70,7 @@ def __init__(self, user_id, key, project=None, auth_type=None, scopes=None, if not self.project: raise ValueError('Project name must be specified using ' '"project" keyword.') - super(GoogleDNSDriver, self).__init__(user_id, key, scopes, **kwargs) + super(GoogleDNSDriver, self).__init__(user_id, key, **kwargs) def iterate_zones(self): """ From 697d8d0bb502183e61919d3a5a6119cbe577e072 Mon Sep 17 00:00:00 2001 From: Franck Cuny Date: Mon, 20 Oct 2014 07:53:21 -0700 Subject: [PATCH 247/315] Reformat the code for pep8 Signed-off-by: Tomaz Muraus --- libcloud/common/google.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/libcloud/common/google.py b/libcloud/common/google.py index 069b8ddcce..601d4e7f59 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -226,14 +226,22 @@ def parse_body(self): body = self.body json_error = True - if self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED, httplib.CONFLICT]: + valid_http_codes = [ + httplib.OK, + httplib.CREATED, + httplib.ACCEPTED, + httplib.CONFLICT, + ] + if self.status in valid_http_codes: if json_error: raise JsonParseError(body, self.status, None) elif 'error' in body: (code, message) = self._get_error(body) if code == 'QUOTA_EXCEEDED': raise QuotaExceededError(message, self.status, code) - elif (code == 'RESOURCE_ALREADY_EXISTS' or code == 'alreadyExists'): + elif code == 'RESOURCE_ALREADY_EXISTS': + raise ResourceExistsError(message, self.status, code) + elif code == 'alreadyExists': raise ResourceExistsError(message, self.status, code) elif code.startswith('RESOURCE_IN_USE'): raise ResourceInUseError(message, self.status, code) From 206a5b49be49db98b197c2154beb689bca183ec9 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Thu, 23 Oct 2014 16:16:42 +0800 Subject: [PATCH 248/315] Update changes. Closes #378 --- CHANGES.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 81072d2dae..b70ce363d8 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -179,6 +179,13 @@ Loadbalancer (LIBCLOUD-595, GITHUB-341) [Lee Verberne, Eric Johnson] +DNS +~~~ + +- Various fixes in the Google DNS driver. + (GITHUB-378) + [Franck Cuny] + Changes with Apache Libcloud 0.15.1 ----------------------------------- From f4dde7bbd37b2b51541bffed805b5c38bd353877 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Tue, 24 Jun 2014 15:44:56 -0400 Subject: [PATCH 249/315] Fixed a bug in elasticstack node creation method where it would raise exceptions because of missing data in a response, and also fixed pulling the IP from the proper data item Fixed a class leak variable when creating multiple connections to different EC2 regions Closes #325 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 11 +++++ libcloud/compute/drivers/ec2.py | 2 +- libcloud/compute/drivers/elasticstack.py | 19 +++++--- .../elastichosts/offline_servers_info.json | 16 +++++++ .../fixtures/elastichosts/servers_info.json | 47 ++++++++++--------- libcloud/test/compute/test_ec2.py | 10 +++- libcloud/test/compute/test_elasticstack.py | 18 +++++++ 7 files changed, 91 insertions(+), 32 deletions(-) create mode 100644 libcloud/test/compute/fixtures/elastichosts/offline_servers_info.json diff --git a/CHANGES.rst b/CHANGES.rst index b70ce363d8..da45e6cc1d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -160,6 +160,17 @@ Compute (LIBCLOUD-625, LIBCLOUD-276, GITHUB-276) [Eric Johnson] +- Fix a bug in Elasticstack node creation method where it would raise + exceptions because of missing data in a response, and also fix pulling the + IP from the proper data item. + (GITHUB-325) + [Michael Bennett] + +- Fix a bug which prevented user to connect and instantiate multiple EC2 driver + instances for different regions at the same time. + (GITHUB-325) + [Michael Bennett] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 6d98c9a6d5..217ed4cf23 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -5386,7 +5386,7 @@ def __init__(self, key, secret=None, secure=True, host=None, port=None, self.api_name = details['api_name'] self.country = details['country'] - self.connectionCls.host = details['endpoint'] + host = host or details['endpoint'] super(EC2NodeDriver, self).__init__(key=key, secret=secret, secure=secure, host=host, diff --git a/libcloud/compute/drivers/elasticstack.py b/libcloud/compute/drivers/elasticstack.py index da3863a3ef..739fa8faff 100644 --- a/libcloud/compute/drivers/elasticstack.py +++ b/libcloud/compute/drivers/elasticstack.py @@ -453,15 +453,22 @@ def _to_node(self, data, ssh_password=None): except KeyError: state = NodeState.UNKNOWN - if isinstance(data['nic:0:dhcp'], list): - public_ip = data['nic:0:dhcp'] + if 'nic:0:dhcp:ip' in data: + if isinstance(data['nic:0:dhcp:ip'], list): + public_ip = data['nic:0:dhcp:ip'] + else: + public_ip = [data['nic:0:dhcp:ip']] else: - public_ip = [data['nic:0:dhcp']] + public_ip = [] extra = {'cpu': data['cpu'], - 'smp': data['smp'], - 'mem': data['mem'], - 'started': data['started']} + 'mem': data['mem']} + + if 'started' in data: + extra['started'] = data['started'] + + if 'smp' in data: + extra['smp'] = data['smp'] if 'vnc:ip' in data: extra['vnc:ip'] = data['vnc:ip'] diff --git a/libcloud/test/compute/fixtures/elastichosts/offline_servers_info.json b/libcloud/test/compute/fixtures/elastichosts/offline_servers_info.json new file mode 100644 index 0000000000..60b04330dc --- /dev/null +++ b/libcloud/test/compute/fixtures/elastichosts/offline_servers_info.json @@ -0,0 +1,16 @@ +[ + { + "boot": "ide:0:0", + "cpu": 2000, + "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", + "mem": 1024, + "name": "test api node", + "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", + "nic:0:dhcp": "auto", + "nic:0:model": "virtio", + "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", + "status": "stopped", + "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", + "vnc:password": "testvncpass" + } +] diff --git a/libcloud/test/compute/fixtures/elastichosts/servers_info.json b/libcloud/test/compute/fixtures/elastichosts/servers_info.json index 72b6b48bef..bb3fc35a0d 100644 --- a/libcloud/test/compute/fixtures/elastichosts/servers_info.json +++ b/libcloud/test/compute/fixtures/elastichosts/servers_info.json @@ -1,27 +1,28 @@ [ { - "boot": "ide:0:0", - "cpu": 2000, - "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", - "ide:0:0:read:bytes": "299696128", - "ide:0:0:read:requests": "73168", - "ide:0:0:write:bytes": "321044480", - "ide:0:0:write:requests": "78380", - "mem": 1024, - "name": "test api node", - "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", - "nic:0:dhcp": ["1.2.3.4", "1.2.3.5"], - "nic:0:model": "virtio", - "rx": 679560, - "rx:packets": 644, - "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", - "smp": 1, - "started": 1280723696, - "status": "active", - "tx": 21271, - "tx:packets": "251", - "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", - "vnc:ip": "216.151.208.174", + "boot": "ide:0:0", + "cpu": 2000, + "ide:0:0": "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3", + "ide:0:0:read:bytes": "299696128", + "ide:0:0:read:requests": "73168", + "ide:0:0:write:bytes": "321044480", + "ide:0:0:write:requests": "78380", + "mem": 1024, + "name": "test api node", + "nic:0:block": "tcp/21 tcp/22 tcp/23 tcp/25", + "nic:0:dhcp": "auto", + "nic:0:dhcp:ip": ["1.2.3.4", "1.2.3.5"], + "nic:0:model": "virtio", + "rx": 679560, + "rx:packets": 644, + "server": "b605ca90-c3e6-4cee-85f8-a8ebdf8f9903", + "smp": 1, + "started": 1280723696, + "status": "active", + "tx": 21271, + "tx:packets": "251", + "user": "2164ce57-591a-43ee-ade5-e2fe0ee13c3f", + "vnc:ip": "216.151.208.174", "vnc:password": "testvncpass" } -] \ No newline at end of file +] diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 02ab8a8c2e..4a2a2e6081 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -56,8 +56,14 @@ def test_instantiate_driver_valid_regions(self): regions = REGION_DETAILS.keys() regions = [d for d in regions if d != 'nimbus'] - for region in regions: - EC2NodeDriver(*EC2_PARAMS, **{'region': region}) + region_endpoints = [ + EC2NodeDriver(*EC2_PARAMS, **{'region': region}).connection.host for region in regions + ] + + # Verify that each driver doesn't get the same API host endpoint + self.assertEqual(len(region_endpoints), + len(set(region_endpoints)), + "Multiple Region Drivers were given the same API endpoint") def test_instantiate_driver_invalid_regions(self): for region in ['invalid', 'nimbus']: diff --git a/libcloud/test/compute/test_elasticstack.py b/libcloud/test/compute/test_elasticstack.py index 31896c864e..40dea2eaef 100644 --- a/libcloud/test/compute/test_elasticstack.py +++ b/libcloud/test/compute/test_elasticstack.py @@ -106,6 +106,20 @@ def test_list_nodes(self): self.assertEqual( node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3") + def test_list_offline_node(self): + self.mockHttp.type = 'OFFLINE' + + nodes = self.driver.list_nodes() + self.assertTrue(isinstance(nodes, list)) + self.assertEqual(len(nodes), 1) + + node = nodes[0] + self.assertEqual(len(node.public_ips), 0, "Public IPs was not empty") + self.assertNotIn('smp', node.extra) + self.assertNotIn('started', node.extra) + self.assertEqual( + node.extra['ide:0:0'], "b6049e7a-aa1b-47f9-b21d-cdf2354e28d3") + def test_list_sizes(self): images = self.driver.list_sizes() self.assertEqual(len(images), 6) @@ -256,6 +270,10 @@ def _servers_info(self, method, url, body, headers): body = self.fixtures.load('servers_info.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _servers_info_OFFLINE(self, method, url, body, headers): + body = self.fixtures.load('offline_servers_info.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _servers_72258_set(self, method, url, body, headers): body = '{}' return (httplib.OK, body, {}, httplib.responses[httplib.OK]) From 746e218191a2adf5789e436542a0f20cc664a9b8 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Mon, 27 Oct 2014 18:53:29 +0000 Subject: [PATCH 250/315] GCE: fix GCE internal auth Signed-off-by: Tomaz Muraus --- libcloud/common/google.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/common/google.py b/libcloud/common/google.py index 601d4e7f59..ebb9fdff1b 100644 --- a/libcloud/common/google.py +++ b/libcloud/common/google.py @@ -111,7 +111,7 @@ def _get_gce_metadata(path=''): try: url = "http://metadata/computeMetadata/v1/" + path.lstrip('/') headers = {'Metadata-Flavor': 'Google'} - response = get_response_object(url, headers) + response = get_response_object(url, headers=headers) return response.status, "", response.body except Exception as e: return -1, str(e), None From d3b03979595d741f0284ce7c452f2f7d7567b6ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Aitor=20G=C3=B3mez-Goiri?= Date: Fri, 31 Oct 2014 18:11:13 +0100 Subject: [PATCH 251/315] Fix typos in the docs. Closes #380 Signed-off-by: Tomaz Muraus --- docs/compute/drivers/openstack.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/compute/drivers/openstack.rst b/docs/compute/drivers/openstack.rst index 6975c3753c..ce172cae6e 100644 --- a/docs/compute/drivers/openstack.rst +++ b/docs/compute/drivers/openstack.rst @@ -180,21 +180,21 @@ There are many different things which could cause this error: 1. Service catalog is empty 2. You have not specified a value for one of the following arguments - ``ex_service_type``, ``ex_service_name``, ``ex_service_region`` and the + ``ex_force_service_type``, ``ex_force_service_name``, ``ex_force_service_region`` and the driver is using the default values which don't match your installation. 3. You have specified invalid value for one or all of the following arguments: - ``ex_service_type``, ``ex_service_name``, ``ex_service_region`` + ``ex_force_service_type``, ``ex_force_service_name``, ``ex_force_service_region`` The best way to troubleshoot this issue is to use ``LIBCLOUD_DEBUG`` functionality which is documented in the debugging section. This functionality allows you to introspect the response from the authentication -service and you can make sure that ``ex_service_type``, ``ex_service_name``, -``ex_service_region`` arguments match values returned in the service catalog. +service and you can make sure that ``ex_force_service_type``, ``ex_force_service_name``, +``ex_force_service_region`` arguments match values returned in the service catalog. If the service catalog is empty, you have two options: -1. Populate the service catalog and makes sure the ``ex_service_type``, - ``ex_service_name`` and ``ex_service_region`` arguments match the values +1. Populate the service catalog and makes sure the ``ex_force_service_type``, + ``ex_force_service_name`` and ``ex_force_service_region`` arguments match the values defined in the service catalog. 2. Provide the API endpoint url using ``ex_force_base_url`` argument and skip the "endpoint selection using the service catalog" step all together From 43d8cd680b9f09615e61506d18eecec12bccbee1 Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Fri, 3 Oct 2014 19:15:26 +0200 Subject: [PATCH 252/315] CLOUDSTACK: Add ex_list_nics, ex_attach_nic_to_vm, ex_detach_nic_from_vm + tests Signed-off-by: Sebastien Goasguen This closes #369 --- CHANGES.rst | 4 + libcloud/compute/drivers/cloudstack.py | 125 +++++++++++++++++- .../addNicToVirtualMachine_default.json | 1 + .../fixtures/cloudstack/listNics_default.json | 1 + .../queryAsyncJobResult_addnictovm.json | 37 ++++++ .../queryAsyncJobResult_removenic.json | 37 ++++++ .../removeNicFromVirtualMachine_default.json | 1 + libcloud/test/compute/test_cloudstack.py | 40 ++++++ 8 files changed, 244 insertions(+), 2 deletions(-) create mode 100644 libcloud/test/compute/fixtures/cloudstack/addNicToVirtualMachine_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNics_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_addnictovm.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_removenic.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/removeNicFromVirtualMachine_default.json diff --git a/CHANGES.rst b/CHANGES.rst index da45e6cc1d..9026450d6c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -171,6 +171,10 @@ Compute (GITHUB-325) [Michael Bennett] +- Add methods in CloudStack driver to manage mutiple nics per vm. + (GITHUB-369) + [Roeland Kuipers] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index fa10fb1e02..454f1e7214 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -181,6 +181,10 @@ 'project_id': { 'key_name': 'projectid', 'transform_func': str + }, + 'nics:': { + 'key_name': 'nic', + 'transform_func': list } }, 'volume': { @@ -302,6 +306,12 @@ 'vpcavailable': {'key_name': 'vpcavailable', 'transform_func': int}, 'vpclimit': {'key_name': 'vpclimit', 'transform_func': int}, 'vpctotal': {'key_name': 'vpctotal', 'transform_func': int} + }, + 'nic': { + 'secondary_ip': { + 'key_name': 'secondaryip', + 'transform_func': list + } } } @@ -654,6 +664,35 @@ def __repr__(self): self.driver.name)) +class CloudStackNic(object): + """ + Class representing a CloudStack Network Interface. + """ + + def __init__(self, id, network_id, net_mask, gateway, ip_address, + is_default, mac_address, driver, extra=None): + self.id = id + self.network_id = network_id + self.net_mask = net_mask + self.gateway = gateway + self.ip_address = ip_address + self.is_default = is_default + self.mac_address = mac_address + self.driver = driver + self.extra = extra or {} + + def __repr__(self): + return (('') + % (self.id, self.network_id, self.net_mask, + self.gateway, self.ip_address, self.is_default, + self.mac_address, self.driver.name)) + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + class CloudStackVPC(object): """ Class representing a CloudStack VPC. @@ -1189,10 +1228,10 @@ def ex_create_network(self, display_text, name, network_offering, :param name: the name of the network :type name: ``str`` - :param network_offering: the network offering id + :param network_offering: NetworkOffering object :type network_offering: :class:'CloudStackNetworkOffering` - :param location: Zone + :param location: Zone object :type location: :class:`NodeLocation` :param gateway: Optional, the Gateway of this network @@ -2714,6 +2753,88 @@ def ex_list_os_types(self): ostypes = self._sync_request('listOsTypes') return ostypes['ostype'] + def ex_list_nics(self, node): + """ + List the available networks + + :param vm: Node Object + :type vm: :class:`CloudStackNode + + :rtype ``list`` of :class:`CloudStackNic` + """ + + res = self._sync_request(command='listNics', + params={'virtualmachineid': node.id}, + method='GET') + items = res.get('nic', []) + + nics = [] + extra_map = RESOURCE_EXTRA_ATTRIBUTES_MAP['nic'] + for item in items: + extra = self._get_extra_dict(item, extra_map) + + nics.append(CloudStackNic( + id=item['id'], + network_id=item['networkid'], + net_mask=item['netmask'], + gateway=item['gateway'], + ip_address=item['ipaddress'], + is_default=item['isdefault'], + mac_address=item['macaddress'], + driver=self, + extra=extra)) + + return nics + + def ex_attach_nic_to_node(self, node, network, ip_address=None): + """ + Add an extra Nic to a VM + + :param network: NetworkOffering object + :type network: :class:'CloudStackNetwork` + + :param node: Node Object + :type node: :class:'CloudStackNode` + + :param ip_address: Optional, specific IP for this Nic + :type ip_address: ``str`` + + + :rtype: ``bool`` + """ + + args = { + 'virtualmachineid': node.id, + 'networkid': network.id + } + + if ip_address is not None: + args['ipaddress'] = ip_address + + self._async_request(command='addNicToVirtualMachine', + params=args) + return True + + def ex_detach_nic_from_node(self, nic, node): + + """ + Remove Nic from a VM + + :param nic: Nic object + :type nic: :class:'CloudStackNetwork` + + :param node: Node Object + :type node: :class:'CloudStackNode` + + :rtype: ``bool`` + """ + + self._async_request(command='removeNicFromVirtualMachine', + params={'nicid': nic.id, + 'virtualmachineid': node.id}) + + return True + def _to_snapshot(self, data): """ Create snapshot object from data diff --git a/libcloud/test/compute/fixtures/cloudstack/addNicToVirtualMachine_default.json b/libcloud/test/compute/fixtures/cloudstack/addNicToVirtualMachine_default.json new file mode 100644 index 0000000000..3816acb953 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/addNicToVirtualMachine_default.json @@ -0,0 +1 @@ +{ "addnictovirtualmachineresponse" : {"jobid":"addnictovm"} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listNics_default.json b/libcloud/test/compute/fixtures/cloudstack/listNics_default.json new file mode 100644 index 0000000000..4c8b18e750 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNics_default.json @@ -0,0 +1 @@ +{ "listnicsresponse" : { "count":1 ,"nic" : [ {"id":"15418e74-25e8-42d3-9bd7-eb55e57825fe","networkid":"de45b0ed-b5ae-4374-ac7c-aff3fb2aefa2","netmask":"255.255.255.0","gateway":"10.1.1.1","ipaddress":"10.1.1.136","isdefault":true,"macaddress":"02:00:00:b9:01:1a"} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_addnictovm.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_addnictovm.json new file mode 100644 index 0000000000..12b01cd2aa --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_addnictovm.json @@ -0,0 +1,37 @@ +{ + "queryasyncjobresultresponse" : { + "accountid" : "02c9bf08-6f36-44b1-a57f-df0708f90de4", "userid" : "6ef2b921-4ecf-4651-8188-f9868db73e73", "cmd" : "org.apache.cloudstack.api.command.user.vm.AddNicToVMCmd", "jobstatus" : 1, "jobprocstatus" : 0, "jobresultcode" : 0, "jobresulttype" : "object", "jobresult" : { + "virtualmachine" : { + "id" : "903897b7-9241-4f93-bd08-3453c36a3c99", "name" : "test", "account" : "rkuipers_admin", "domainid" : "4b6e626c-9d50-4480-bf77-daae632c7ffd", "domain" : "rkuipers", "created" : "2014-10-03T17:24:37+0200", "state" : "Stopped", "haenable" : false, "zoneid" : "2", "zonename" : "BETA-SBP-DC-1", "guestosid" : "278699da-edfc-11e2-a249-005056ba4c5e", "securitygroup" : [], "nic" : [{ + "id": "15418e74-25e8-42d3-9bd7-eb55e57825fe", + "networkid": "de45b0ed-b5ae-4374-ac7c-aff3fb2aefa2", + "networkname": "rkuipers-default", + "netmask": "255.255.255.0", + "gateway": "10.1.1.1", + "ipaddress": "10.1.1.136", + "isolationuri": "lswitch:d3eaa05c-4392-4918-93ab-c4a979a7988a", + "broadcasturi": "lswitch:d3eaa05c-4392-4918-93ab-c4a979a7988a", + "traffictype": "Guest", + "type": "Isolated", + "isdefault": true, + "macaddress": "02:00:00:b9:01:1a" + }, { + "id": "f71e48da-fe40-458d-9033-ea27a3a92de1", + "networkid": "f59ee08a-66b5-40c6-baa7-392394c6cea9", + "networkname": "node_cellar_network", + "netmask": "255.255.255.0", + "gateway": "10.1.1.1", + "ipaddress": "10.1.1.253", + "isolationuri": "lswitch:bad1ca9f-b039-4ad1-b5fc-1f3147fad7fa", + "broadcasturi": "lswitch:bad1ca9f-b039-4ad1-b5fc-1f3147fad7fa", + "traffictype": "Guest", + "type": "Isolated", + "isdefault": false, + "macaddress": "02:00:4f:84:00:1d" + } + ], "hypervisor" : "XenServer", "tags" : [], "affinitygroup" : [], "displayvm" : true, "isdynamicallyscalable" : false + } + }, "created" : "2014-10-03T18:30:59+0200", "jobid" : "e521e748-1271-4587-8433-2d094704bfe9" + } +} + diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_removenic.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_removenic.json new file mode 100644 index 0000000000..3f0d556b6f --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_removenic.json @@ -0,0 +1,37 @@ +{ + "queryasyncjobresultresponse" : { + "accountid" : "02c9bf08-6f36-44b1-a57f-df0708f90de4", "userid" : "6ef2b921-4ecf-4651-8188-f9868db73e73", "cmd" : "org.apache.cloudstack.api.command.user.vm.RemoveNicFromVMCmd", "jobstatus" : 1, "jobprocstatus" : 0, "jobresultcode" : 0, "jobresulttype" : "object", "jobresult" : { + "virtualmachine" : { + "id" : "903897b7-9241-4f93-bd08-3453c36a3c99", "name" : "test", "account" : "rkuipers_admin", "domainid" : "4b6e626c-9d50-4480-bf77-daae632c7ffd", "domain" : "rkuipers", "created" : "2014-10-03T17:24:37+0200", "state" : "Stopped", "haenable" : false, "zoneid" : "2", "zonename" : "BETA-SBP-DC-1", "guestosid" : "278699da-edfc-11e2-a249-005056ba4c5e", "securitygroup" : [], "nic" : [{ + "id": "15418e74-25e8-42d3-9bd7-eb55e57825fe", + "networkid": "de45b0ed-b5ae-4374-ac7c-aff3fb2aefa2", + "networkname": "rkuipers-default", + "netmask": "255.255.255.0", + "gateway": "10.1.1.1", + "ipaddress": "10.1.1.136", + "isolationuri": "lswitch:d3eaa05c-4392-4918-93ab-c4a979a7988a", + "broadcasturi": "lswitch:d3eaa05c-4392-4918-93ab-c4a979a7988a", + "traffictype": "Guest", + "type": "Isolated", + "isdefault": true, + "macaddress": "02:00:00:b9:01:1a" + }, { + "id": "f71e48da-fe40-458d-9033-ea27a3a92de1", + "networkid": "f59ee08a-66b5-40c6-baa7-392394c6cea9", + "networkname": "node_cellar_network", + "netmask": "255.255.255.0", + "gateway": "10.1.1.1", + "ipaddress": "10.1.1.253", + "isolationuri": "lswitch:bad1ca9f-b039-4ad1-b5fc-1f3147fad7fa", + "broadcasturi": "lswitch:bad1ca9f-b039-4ad1-b5fc-1f3147fad7fa", + "traffictype": "Guest", + "type": "Isolated", + "isdefault": false, + "macaddress": "02:00:4f:84:00:1d" + } + ], "hypervisor" : "XenServer", "tags" : [], "affinitygroup" : [], "displayvm" : true, "isdynamicallyscalable" : false + } + }, "created" : "2014-10-03T18:40:30+0200", "jobid" : "23fc6bfc-e354-41c8-bf8b-c0db8227feb4" + } +} + diff --git a/libcloud/test/compute/fixtures/cloudstack/removeNicFromVirtualMachine_default.json b/libcloud/test/compute/fixtures/cloudstack/removeNicFromVirtualMachine_default.json new file mode 100644 index 0000000000..ccada15482 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/removeNicFromVirtualMachine_default.json @@ -0,0 +1 @@ +{ "removenicfromvirtualmachineresponse" : {"jobid":"removenic"} } \ No newline at end of file diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 387695f05d..21643239f1 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -318,6 +318,46 @@ def test_ex_delete_network(self): result = self.driver.ex_delete_network(network=network) self.assertTrue(result) + def test_ex_list_nics(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listNics_default.json') + + fixture_nic = fixture['listnicsresponse']['nic'] + vm = self.driver.list_nodes()[0] + nics = self.driver.ex_list_nics(vm) + + for i, nic in enumerate(nics): + self.assertEqual(nic.id, fixture_nic[i]['id']) + self.assertEqual(nic.network_id, + fixture_nic[i]['networkid']) + self.assertEqual(nic.net_mask, + fixture_nic[i]['netmask']) + self.assertEqual(nic.gateway, + fixture_nic[i]['gateway']) + self.assertEqual(nic.ip_address, + fixture_nic[i]['ipaddress']) + self.assertEqual(nic.is_default, + fixture_nic[i]['isdefault']) + self.assertEqual(nic.mac_address, + fixture_nic[i]['macaddress']) + + def test_ex_add_nic_to_node(self): + + vm = self.driver.list_nodes()[0] + network = self.driver.ex_list_networks()[0] + ip = "10.1.4.123" + + result = self.driver.ex_attach_nic_to_node(node=vm, network=network, ip_address=ip) + self.assertTrue(result) + + def test_ex_remove_nic_from_node(self): + + vm = self.driver.list_nodes()[0] + nic = self.driver.ex_list_nics(node=vm)[0] + + result = self.driver.ex_detach_nic_from_node(node=vm, nic=nic) + self.assertTrue(result) + def test_ex_list_vpc_offerings(self): _, fixture = CloudStackMockHttp()._load_fixture( 'listVPCOfferings_default.json') From 46b94aa47d2ac6ca6d7b510a2ea72516d2755850 Mon Sep 17 00:00:00 2001 From: Jeroen de Korte Date: Sun, 28 Sep 2014 18:32:32 +0200 Subject: [PATCH 253/315] Cloudstack: Implemented VPC network acls and tests Signed-off-by: Sebastien Goasguen This closes #371 --- CHANGES.rst | 4 + libcloud/compute/drivers/cloudstack.py | 435 ++++++++++++++++-- .../createNetworkACLList_default.json | 1 + .../cloudstack/createNetworkACL_default.json | 1 + .../listNetworkACLLists_default.json | 1 + .../cloudstack/listNetworkACLs_default.json | 1 + .../cloudstack/listRouters_default.json | 1 + .../queryAsyncJobResult_111112.json | 1 + libcloud/test/compute/test_cloudstack.py | 63 ++- 9 files changed, 472 insertions(+), 36 deletions(-) create mode 100644 libcloud/test/compute/fixtures/cloudstack/createNetworkACLList_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/createNetworkACL_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNetworkACLLists_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listNetworkACLs_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/listRouters_default.json create mode 100644 libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_111112.json diff --git a/CHANGES.rst b/CHANGES.rst index 9026450d6c..71dac63608 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -175,6 +175,10 @@ Compute (GITHUB-369) [Roeland Kuipers] +- Implements VPC network ACLs for CloudStack drive + (GITHUB-371) + [Jeroen de Korte] + Storage ~~~~~~~ diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 454f1e7214..43617bd083 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -397,13 +397,18 @@ class CloudStackAddress(object): :param associated_network_id: The ID of the network where this address has been associated with :type associated_network_id: ``str`` + + :param vpc_id: VPC the ip belongs to + :type vpc_id: ``str`` """ - def __init__(self, id, address, driver, associated_network_id=None): + def __init__(self, id, address, driver, associated_network_id=None, + vpc_id=None): self.id = id self.address = address self.driver = driver self.associated_network_id = associated_network_id + self.vpc_id = vpc_id def release(self): self.driver.ex_release_public_ip(address=self) @@ -552,7 +557,8 @@ class CloudStackPortForwardingRule(object): """ def __init__(self, node, rule_id, address, protocol, public_port, - private_port, public_end_port=None, private_end_port=None): + private_port, public_end_port=None, private_end_port=None, + network_id=None): """ A Port forwarding rule for Source NAT. @@ -584,6 +590,12 @@ def __init__(self, node, rule_id, address, protocol, public_port, :param private_end_port: End of internal port range :type private_end_port: ``int`` + :param network_id: The network of the vm the Port Forwarding rule + will be created for. Required when public Ip + address is not associated with any Guest + network yet (VPC case) + :type network_id: ``str`` + :rtype: :class:`CloudStackPortForwardingRule` """ self.node = node @@ -602,6 +614,102 @@ def __eq__(self, other): return self.__class__ is other.__class__ and self.id == other.id +class CloudStackNetworkACLList(object): + """ + a Network ACL for the given VPC + """ + + def __init__(self, acl_id, name, vpc_id, driver, description=None): + """ + a Network ACL for the given VPC + + @note: This is a non-standard extension API, and only works for + Cloudstack. + + :param acl_id: ACL ID + :type acl_id: ``int`` + + :param name: Name of the network ACL List + :type name: ``str`` + + :param vpc_id: Id of the VPC associated with this network ACL List + :type vpc_id: ``string`` + + :param description: Description of the network ACL List + :type description: ``str`` + + :rtype: :class:`CloudStackNetworkACLList` + """ + + self.id = acl_id + self.name = name + self.vpc_id = vpc_id + self.driver = driver + self.description = description + + def __repr__(self): + return (('') + % (self.id, self.name, self.vpc_id, + self.driver.name, self.description)) + + +class CloudStackNetworkACL(object): + """ + a ACL rule in the given network (the network has to belong to VPC) + """ + + def __init__(self, id, protocol, acl_id, action, cidr_list, + start_port, end_port, traffic_type=None): + """ + a ACL rule in the given network (the network has to belong to + VPC) + + @note: This is a non-standard extension API, and only works for + Cloudstack. + + :param id: the ID of the ACL Item + :type id ``int`` + + :param protocol: the protocol for the ACL rule. Valid values are + TCP/UDP/ICMP/ALL or valid protocol number + :type protocol: ``string`` + + :param acl_id: Name of the network ACL List + :type acl_id: ``str`` + + :param action: scl entry action, allow or deny + :type action: ``string`` + + :param cidr_list: the cidr list to allow traffic from/to + :type cidr_list: ``str`` + + :param start_port: the starting port of ACL + :type start_port: ``str`` + + :param end_port: the ending port of ACL + :type end_port: ``str`` + + :param traffic_type: the traffic type for the ACL,can be Ingress + or Egress, defaulted to Ingress if not specified + :type traffic_type: ``str`` + + :rtype: :class:`CloudStackNetworkACL` + """ + + self.id = id + self.protocol = protocol + self.acl_id = acl_id + self.action = action + self.cidr_list = cidr_list + self.start_port = start_port + self.end_port = end_port + self.traffic_type = traffic_type + + def __eq__(self, other): + return self.__class__ is other.__class__ and self.id == other.id + + class CloudStackDiskOffering(object): """ A disk offering within CloudStack. @@ -633,10 +741,11 @@ def __init__(self, displaytext, name, networkofferingid, id, zoneid, self.extra = extra or {} def __repr__(self): - return (('') - % (self.id, self.displaytext, self.name, - self.networkofferingid, self.zoneid, self.driver.name)) + return (('') + % (self.displaytext, self.name, self.networkofferingid, + self.id, self.zoneid, self.driver.name)) class CloudStackNetworkOffering(object): @@ -658,7 +767,7 @@ def __init__(self, name, display_text, guest_ip_type, id, def __repr__(self): return (('') + 'for_vpc=%s, driver=%s>') % (self.id, self.name, self.display_text, self.guest_ip_type, self.service_offering_id, self.for_vpc, self.driver.name)) @@ -698,8 +807,8 @@ class CloudStackVPC(object): Class representing a CloudStack VPC. """ - def __init__(self, display_text, name, vpc_offering_id, id, zone_id, cidr, - driver, extra=None): + def __init__(self, name, vpc_offering_id, id, cidr, driver, + zone_id=None, display_text=None, extra=None): self.display_text = display_text self.name = name self.vpc_offering_id = vpc_offering_id @@ -710,11 +819,11 @@ def __init__(self, display_text, name, vpc_offering_id, id, zone_id, cidr, self.extra = extra or {} def __repr__(self): - return (('') - % (self.id, self.displaytext, self.name, - self.vpc_offering_id, self.zoneid, self.cidr, - self.driver.name)) + return (('') + % (self.name, self.vpc_offering_id, self.id, + self.cidr, self.driver.name, self.zone_id, + self.display_text)) class CloudStackVPCOffering(object): @@ -731,13 +840,33 @@ def __init__(self, name, display_text, id, self.extra = extra or {} def __repr__(self): - return (('') - % (self.id, self.name, self.display_text, + return (('') + % (self.name, self.display_text, self.id, self.driver.name)) +class CloudStackRouter(object): + """ + Class representing a CloudStack Router. + """ + + def __init__(self, id, name, state, public_ip, vpc_id, driver): + self.id = id + self.name = name + self.state = state + self.public_ip = public_ip + self.vpc_id = vpc_id + self.driver = driver + + def __repr__(self): + return (('') + % (self.id, self.name, self.state, + self.public_ip, self.vpc_id, self.driver.name)) + + class CloudStackProject(object): """ Class representing a CloudStack Project. @@ -751,7 +880,7 @@ def __init__(self, id, name, display_text, driver, extra=None): self.extra = extra or {} def __repr__(self): - return (('') % (self.id, self.display_text, self.name, self.driver.name)) @@ -1361,18 +1490,48 @@ def ex_list_vpcs(self): for vpc in vpcs: networks.append(CloudStackVPC( - vpc['displaytext'], vpc['name'], vpc['vpcofferingid'], vpc['id'], - vpc['zoneid'], vpc['cidr'], - self)) + self, + vpc['zoneid'], + vpc['displaytext'])) return networks + def ex_list_routers(self, vpc_id=None): + """ + List routers + + :rtype ``list`` of :class:`CloudStackRouter` + """ + + args = {} + + if vpc_id is not None: + args['vpcid'] = vpc_id + + res = self._sync_request(command='listRouters', + params=args, + method='GET') + rts = res.get('router', []) + + routers = [] + for router in rts: + + routers.append(CloudStackRouter( + router['id'], + router['name'], + router['state'], + router['publicip'], + router['vpcid'], + self)) + + return routers + def ex_create_vpc(self, cidr, display_text, name, vpc_offering, - zoneid): + zone_id, network_domain=None): """ Creates a VPC, only available in advanced zones. @@ -1391,8 +1550,11 @@ def ex_create_vpc(self, cidr, display_text, name, vpc_offering, :param vpc_offering: the ID of the VPC offering :type vpc_offering: :class:'CloudStackVPCOffering` - :param zoneid: the ID of the availability zone - :type zoneid: ``str`` + :param zone_id: the ID of the availability zone + :type zone_id: ``str`` + + :param network_domain: Optional, the DNS domain of the network + :type network_domain: ``str`` :rtype: :class:`CloudStackVPC` @@ -1405,22 +1567,25 @@ def ex_create_vpc(self, cidr, display_text, name, vpc_offering, 'displaytext': display_text, 'name': name, 'vpcofferingid': vpc_offering.id, - 'zoneid': zoneid, + 'zoneid': zone_id, } + if network_domain is not None: + args['networkdomain'] = network_domain + result = self._sync_request(command='createVPC', params=args, method='GET') extra = self._get_extra_dict(result, extra_map) - vpc = CloudStackVPC(display_text, - name, + vpc = CloudStackVPC(name, vpc_offering.id, result['id'], - zoneid, cidr, self, + zone_id, + display_text, extra=extra) return vpc @@ -1747,20 +1912,38 @@ def ex_list_public_ips(self): return ips - def ex_allocate_public_ip(self, location=None): + def ex_allocate_public_ip(self, vpc_id=None, network_id=None, + location=None): """ Allocate a public IP. + :param vpc_id: VPC the ip belongs to + :type vpc_id: ``str`` + + :param network_id: Network where this IP is connected to. + :type network_id: ''str'' + :param location: Zone :type location: :class:`NodeLocation` :rtype: :class:`CloudStackAddress` """ - if location is None: - location = self.list_locations()[0] + + args = {} + + if location is not None: + args['zoneid'] = location.id + else: + args['zoneid'] = self.list_locations()[0].id + + if vpc_id is not None: + args['vpcid'] = vpc_id + + if network_id is not None: + args['networkid'] = network_id addr = self._async_request(command='associateIpAddress', - params={'zoneid': location.id}, + params=args, method='GET') addr = addr['ipaddress'] addr = CloudStackAddress(addr['id'], addr['ipaddress'], self) @@ -2002,7 +2185,8 @@ def ex_create_port_forwarding_rule(self, node, address, protocol, public_end_port=None, private_end_port=None, - openfirewall=True): + openfirewall=True, + network_id=None): """ Creates a Port Forwarding Rule, used for Source NAT @@ -2021,6 +2205,12 @@ def ex_create_port_forwarding_rule(self, node, address, :param node: The virtual machine :type node: :class:`CloudStackNode` + :param network_id: The network of the vm the Port Forwarding rule + will be created for. Required when public Ip + address is not associated with any Guest + network yet (VPC case) + :type network_id: ``string`` + :rtype: :class:`CloudStackPortForwardingRule` """ args = { @@ -2035,6 +2225,8 @@ def ex_create_port_forwarding_rule(self, node, address, args['publicendport'] = int(public_end_port) if private_end_port: args['privateendport'] = int(private_end_port) + if network_id: + args['networkid'] = network_id result = self._async_request(command='createPortForwardingRule', params=args, @@ -2047,7 +2239,8 @@ def ex_create_port_forwarding_rule(self, node, address, public_port, private_port, public_end_port, - private_end_port) + private_end_port, + network_id) node.extra['port_forwarding_rules'].append(rule) node.public_ips.append(address.address) return rule @@ -2135,6 +2328,178 @@ def ex_delete_ip_forwarding_rule(self, node, rule): method='GET') return True + def ex_create_network_acllist(self, name, vpc_id, description=None): + """ + Create a ACL List for a network within a VPC. + + :param name: Name of the network ACL List + :type name: ``string`` + + :param vpc_id: Id of the VPC associated with this network ACL List + :type vpc_id: ``string`` + + :param description: Description of the network ACL List + :type description: ``string`` + + :rtype: :class:`CloudStackNetworkACLList` + """ + + args = { + 'name': name, + 'vpcid': vpc_id + } + if description: + args['description'] = description + + result = self._sync_request(command='createNetworkACLList', + params=args, + method='GET') + + acl_list = CloudStackNetworkACLList(result['id'], + name, + vpc_id, + self, + description) + return acl_list + + def ex_create_network_acl(self, protocol, acl_id, cidr_list, + start_port, end_port, action=None, + traffic_type=None): + """ + Creates a ACL rule in the given network (the network has to belong to + VPC) + + :param protocol: the protocol for the ACL rule. Valid values are + TCP/UDP/ICMP/ALL or valid protocol number + :type protocol: ``string`` + + :param acl_id: Name of the network ACL List + :type acl_id: ``str`` + + :param cidr_list: the cidr list to allow traffic from/to + :type cidr_list: ``str`` + + :param start_port: the starting port of ACL + :type start_port: ``str`` + + :param end_port: the ending port of ACL + :type end_port: ``str`` + + :param action: scl entry action, allow or deny + :type action: ``str`` + + :param traffic_type: the traffic type for the ACL,can be Ingress + or Egress, defaulted to Ingress if not specified + :type traffic_type: ``str`` + + :rtype: :class:`CloudStackNetworkACL` + """ + + args = { + 'protocol': protocol, + 'aclid': acl_id, + 'cidrlist': cidr_list, + 'startport': start_port, + 'endport': end_port + } + + if action: + args['action'] = action + else: + action = "allow" + + if traffic_type: + args['traffictype'] = traffic_type + + result = self._async_request(command='createNetworkACL', + params=args, + method='GET') + + acl = CloudStackNetworkACL(result['networkacl']['id'], + protocol, + acl_id, + action, + cidr_list, + start_port, + end_port, + traffic_type) + + return acl + + def ex_list_network_acllists(self): + """ + Lists all network ACLs + + :rtype: ``list`` of :class:`CloudStackNetworkACLList` + """ + acllists = [] + + result = self._sync_request(command='listNetworkACLLists', + method='GET') + + if not result: + return acllists + + for acllist in result['networkacllist']: + acllists.append(CloudStackNetworkACLList(acllist['id'], + acllist['name'], + acllist.get('vpcid', []), + self, + acllist['description'])) + + return acllists + + def ex_replace_network_acllist(self, acl_id, network_id): + """ + Create a ACL List for a network within a VPC.Replaces ACL associated + with a Network or private gateway + + :param acl_id: the ID of the network ACL + :type acl_id: ``string`` + + :param network_id: the ID of the network + :type network_id: ``string`` + + :rtype: :class:`CloudStackNetworkACLList` + """ + + args = { + 'aclid': acl_id, + 'networkid': network_id + } + + self._async_request(command='replaceNetworkACLList', + params=args, + method='GET') + + return True + + def ex_list_network_acl(self): + """ + Lists all network ACL items + + :rtype: ``list`` of :class:`CloudStackNetworkACL` + """ + acls = [] + + result = self._sync_request(command='listNetworkACLs', + method='GET') + + if not result: + return acls + + for acl in result['networkacl']: + acls.append(CloudStackNetworkACL(acl['id'], + acl['protocol'], + acl['aclid'], + acl['action'], + acl['cidrlist'], + acl.get('startport', []), + acl.get('endport', []), + acl['traffictype'])) + + return acls + def ex_list_keypairs(self, **kwargs): """ List Registered SSH Key Pairs diff --git a/libcloud/test/compute/fixtures/cloudstack/createNetworkACLList_default.json b/libcloud/test/compute/fixtures/cloudstack/createNetworkACLList_default.json new file mode 100644 index 0000000000..e05e8f39c4 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createNetworkACLList_default.json @@ -0,0 +1 @@ +{ "createnetworkacllistresponse" : {"id":"5692ffdc-d98e-4c7e-a53e-817835e97694","jobid":"0e6707fa-15d9-4cd4-8e16-fd58232321e1"} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/createNetworkACL_default.json b/libcloud/test/compute/fixtures/cloudstack/createNetworkACL_default.json new file mode 100644 index 0000000000..d7bd22de78 --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/createNetworkACL_default.json @@ -0,0 +1 @@ +{ "createnetworkaclresponse" : {"id":"54bd8cc6-5db0-4f64-8495-a0dfaf3fb4d5","jobid":"111112"} } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworkACLLists_default.json b/libcloud/test/compute/fixtures/cloudstack/listNetworkACLLists_default.json new file mode 100644 index 0000000000..98f8ad4cab --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworkACLLists_default.json @@ -0,0 +1 @@ +{ "listnetworkacllistsresponse" : { "count":4 ,"networkacllist" : [ {"id":"54bd8cc6-5db0-4f64-8495-a0dfaf3fb4d5","name":"test","description":"test","vpcid":"cf698ec3-edd0-466c-a8f6-f9096700b6ec"} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listNetworkACLs_default.json b/libcloud/test/compute/fixtures/cloudstack/listNetworkACLs_default.json new file mode 100644 index 0000000000..f71c54ce6d --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listNetworkACLs_default.json @@ -0,0 +1 @@ +{ "listnetworkaclsresponse" : { "count":1 ,"networkacl" : [ {"id":"54bd8cc6-5db0-4f64-8495-a0dfaf3fb4d5","protocol":"tcp","startport":"80","endport":"80","traffictype":"Ingress","state":"Active","cidrlist":"0.0.0.0/0","tags":[],"aclid":"4b1a2ef9-b53a-4e1b-9a45-0408b3ce3494","number":1,"action":"Allow"} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/listRouters_default.json b/libcloud/test/compute/fixtures/cloudstack/listRouters_default.json new file mode 100644 index 0000000000..f8bf35478c --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/listRouters_default.json @@ -0,0 +1 @@ +{ "listroutersresponse" : { "count":1 ,"router" : [ {"id":"634c57f2-5ea2-487b-a5d3-5bb5550de691","zoneid":"2","zonename":"TEST-ZONE","dns1":"8.8.8.8","dns2":"8.8.8.4","networkdomain":"cloudify-mgmt.local","gateway":"178.237.34.1","name":"r-21786-VM","publicip":"178.237.34.110","publicmacaddress":"06:49:62:00:02:cf","publicnetmask":"255.255.255.0","publicnetworkid":"38204176-846f-4d24-bb3e-725e5756b62a","guestipaddress":"10.10.10.1","guestmacaddress":"02:00:57:2b:00:03","guestnetmask":"255.255.255.192","guestnetworkid":"3cfab464-d6b5-4944-bd77-24afae1190b2","templateid":"77f1ad92-4b83-4481-b7ca-5c6b1a9fa82f","created":"2014-09-30T11:29:46+0200","state":"Running","account":"test","domainid":"ee90435a-bba8-427f-90f9-02f9bf9e03aa","domain":"test","serviceofferingid":"01f93707-3a35-44a6-84e9-ea767287a6b2","serviceofferingname":"System Offering For Software Router","isredundantrouter":false,"redundantstate":"UNKNOWN","version":"4.3.0","vpcid":"3ec197ca-167f-40f0-b306-edf97a0490bf","role":"VIRTUAL_ROUTER","nic":[{"id":"b62719ae-c5dc-423d-928d-892aeee1798a","networkid":"38204176-846f-4d24-bb3e-725e5756b62a","netmask":"255.255.255.0","gateway":"178.237.34.1","ipaddress":"178.237.34.110","isolationuri":"vlan://50","broadcasturi":"vlan://50","traffictype":"Public","isdefault":true,"macaddress":"06:49:62:00:02:cf"},{"id":"cf9f148e-2e7b-4894-8071-50f35778e5f0","networkid":"3cfab464-d6b5-4944-bd77-24afae1190b2","netmask":"255.255.255.192","ipaddress":"10.10.10.1","isolationuri":"lswitch:3a682dc2-2c86-439a-97cf-e594957e9e72","broadcasturi":"lswitch:3a682dc2-2c86-439a-97cf-e594957e9e72","traffictype":"Guest","type":"Isolated","isdefault":false,"macaddress":"02:00:57:2b:00:03"}],"requiresupgrade":false} ] } } \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_111112.json b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_111112.json new file mode 100644 index 0000000000..ff981d891b --- /dev/null +++ b/libcloud/test/compute/fixtures/cloudstack/queryAsyncJobResult_111112.json @@ -0,0 +1 @@ +{ "queryasyncjobresultresponse" : {"accountid":"5d7d4c5e-7e0b-4ac2-8550-713180d8a342","userid":"5fb4b286-ac58-44a7-acae-d47dbbac78d1","cmd":"org.apache.cloudstack.api.command.user.network.CreateNetworkACLCmd","jobstatus":1,"jobprocstatus":0,"jobresultcode":0,"jobresulttype":"object","jobresult":{"networkacl":{"id":"54bd8cc6-5db0-4f64-8495-a0dfaf3fb4d5","protocol":"tcp","startport":"1","endport":"1","traffictype":"Egress","state":"Active","cidrlist":"0.0.0.0/0","tags":[],"aclid":"0420fe87-b781-4579-a083-4890fb718d7e","number":20,"action":"Allow"}},"created":"2014-10-09T11:05:50+0200","jobid":"111112"} } \ No newline at end of file diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index 21643239f1..d9fd43448d 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -388,6 +388,20 @@ def test_ex_list_vpcs(self): fixture_vpcs[i]['vpcofferingid']) self.assertEqual(vpc.zone_id, fixture_vpcs[i]['zoneid']) + def test_ex_list_routers(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listRouters_default.json') + fixture_routers = fixture['listroutersresponse']['router'] + + routers = self.driver.ex_list_routers() + + for i, router in enumerate(routers): + self.assertEqual(router.id, fixture_routers[i]['id']) + self.assertEqual(router.name, fixture_routers[i]['name']) + self.assertEqual(router.state, fixture_routers[i]['state']) + self.assertEqual(router.public_ip, fixture_routers[i]['publicip']) + self.assertEqual(router.vpc_id, fixture_routers[i]['vpcid']) + def test_ex_create_vpc(self): _, fixture = CloudStackMockHttp()._load_fixture( 'createVPC_default.json') @@ -399,7 +413,7 @@ def test_ex_create_vpc(self): display_text='cloud.local', name='cloud.local', vpc_offering=vpcoffer, - zoneid="2") + zone_id="2") self.assertEqual(vpc.id, fixture_vpc['id']) @@ -410,6 +424,53 @@ def test_ex_delete_vpc(self): result = self.driver.ex_delete_vpc(vpc=vpc) self.assertTrue(result) + def test_ex_create_network_acllist(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'createNetworkACLList_default.json') + + fixture_network_acllist = fixture['createnetworkacllistresponse'] + + vpc = self.driver.ex_list_vpcs()[0] + network_acllist = self.driver.ex_create_network_acllist( + name='test_acllist', + vpc_id=vpc.id, + description='test description') + + self.assertEqual(network_acllist.id, fixture_network_acllist['id']) + + def test_ex_list_network_acllist(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'listNetworkACLLists_default.json') + fixture_acllist = \ + fixture['listnetworkacllistsresponse']['networkacllist'] + + acllist = self.driver.ex_list_network_acllists() + + for i, acllist in enumerate(acllist): + self.assertEqual(acllist.id, + fixture_acllist[i]['id']) + self.assertEqual(acllist.name, + fixture_acllist[i]['name']) + self.assertEqual(acllist.description, + fixture_acllist[i]['description']) + + def test_ex_create_network_acl(self): + _, fixture = CloudStackMockHttp()._load_fixture( + 'createNetworkACL_default.json') + + fixture_network_acllist = fixture['createnetworkaclresponse'] + + acllist = self.driver.ex_list_network_acllists()[0] + + network_acl = self.driver.ex_create_network_acl( + protocol='test_acllist', + acl_id=acllist.id, + cidr_list='', + start_port='80', + end_port='80') + + self.assertEqual(network_acl.id, fixture_network_acllist['id']) + def test_ex_list_projects(self): _, fixture = CloudStackMockHttp()._load_fixture( 'listProjects_default.json') From 79436eebfe819661751d188c19e4971e2e5d58aa Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Fri, 7 Nov 2014 00:57:47 -0300 Subject: [PATCH 254/315] Fixed some typos in the code and docs Closes #381 Signed-off-by: Tomaz Muraus --- docs/compute/drivers/cloudstack.rst | 2 +- docs/compute/drivers/outscale_inc.rst | 2 +- docs/compute/drivers/outscale_sas.rst | 2 +- docs/compute/drivers/vsphere.rst | 2 +- docs/loadbalancer/drivers/elb.rst | 4 ++-- docs/third_party_drivers.rst | 2 +- libcloud/common/abiquo.py | 2 +- libcloud/compute/drivers/cloudstack.py | 6 +++--- libcloud/compute/drivers/ec2.py | 22 +++++++++++----------- libcloud/compute/drivers/elasticstack.py | 2 +- libcloud/compute/drivers/gce.py | 14 +++++++------- libcloud/compute/drivers/hpcloud.py | 2 +- libcloud/compute/drivers/kili.py | 2 +- libcloud/compute/drivers/openstack.py | 2 +- libcloud/compute/drivers/profitbricks.py | 2 +- libcloud/compute/drivers/vsphere.py | 2 +- libcloud/compute/ssh.py | 8 ++++---- libcloud/dns/drivers/gandi.py | 2 +- libcloud/loadbalancer/drivers/gce.py | 2 +- libcloud/storage/base.py | 2 +- libcloud/storage/drivers/atmos.py | 2 +- libcloud/storage/drivers/local.py | 2 +- 22 files changed, 44 insertions(+), 44 deletions(-) diff --git a/docs/compute/drivers/cloudstack.rst b/docs/compute/drivers/cloudstack.rst index a9ab631ec0..cff71e20ba 100644 --- a/docs/compute/drivers/cloudstack.rst +++ b/docs/compute/drivers/cloudstack.rst @@ -156,7 +156,7 @@ Advanced Zone examples Advanced zones in CloudStack provide tenant isolation via VLANs or SDN technologies like GRE/STT meshes. In a typical advanced zones, users will deploy nodes on a private network and will use NAT to access their nodes. -Therefore one needs to specifiy the network a node needs to be deployed on, +Therefore one needs to specify the network a node needs to be deployed on, and needs to setup port forwarding or IP forwarding rules. 1. Start an interactive shell on Ikoula public cloud diff --git a/docs/compute/drivers/outscale_inc.rst b/docs/compute/drivers/outscale_inc.rst index b765ad264e..a6786a2bd4 100644 --- a/docs/compute/drivers/outscale_inc.rst +++ b/docs/compute/drivers/outscale_inc.rst @@ -19,7 +19,7 @@ Outscale users can start virtual machines in the following regions: * (Soon) US East (New Jersey) Region * (Soon) Asia (Hong Kong) Region -Outscale INC is an american company: prices are in $. +Outscale INC is an American company: prices are in $. API Docs -------- diff --git a/docs/compute/drivers/outscale_sas.rst b/docs/compute/drivers/outscale_sas.rst index 80a1fcd9e0..441febcab0 100644 --- a/docs/compute/drivers/outscale_sas.rst +++ b/docs/compute/drivers/outscale_sas.rst @@ -19,7 +19,7 @@ Outscale users can start virtual machines in the following regions: * (Soon) US East (New Jersey) Region * (Soon) Asia (Hong Kong) Region -Outscale SAS is an european company: prices are in €. +Outscale SAS is a European company: prices are in €. API Docs -------- diff --git a/docs/compute/drivers/vsphere.rst b/docs/compute/drivers/vsphere.rst index 56013c88d4..243b63f28e 100644 --- a/docs/compute/drivers/vsphere.rst +++ b/docs/compute/drivers/vsphere.rst @@ -40,7 +40,7 @@ to the driver constructor Examples -------- -1. Connect by specfiying a host +1. Connect by specifying a host ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: /examples/compute/vsphere/connect_host.py diff --git a/docs/loadbalancer/drivers/elb.rst b/docs/loadbalancer/drivers/elb.rst index 219454fd12..26b63f9826 100644 --- a/docs/loadbalancer/drivers/elb.rst +++ b/docs/loadbalancer/drivers/elb.rst @@ -65,7 +65,7 @@ Load Balancing interface for AWS. The first step in accessing ELB is to create a connection to the service. -So, when you instantiate a ELB driver you need to pass the following arguments +So, when you instantiate an ELB driver you need to pass the following arguments to the driver constructor: * ``key`` - Your AWS API key @@ -138,7 +138,7 @@ attributes depending on the policy type .. literalinclude:: /examples/loadbalancer/elb/create_lb_policy.py :language: python -If you get ``True``, then congratulation you have succesfully created +If you get ``True``, then congratulation you have successfully created the load balancer policy. Now there are some extension methods to look on diff --git a/docs/third_party_drivers.rst b/docs/third_party_drivers.rst index f9191d1f9d..20a41b6d65 100644 --- a/docs/third_party_drivers.rst +++ b/docs/third_party_drivers.rst @@ -1,7 +1,7 @@ Third Party Drivers =================== -Libcloud includes most of the drivers in it's core, but some providers and +Libcloud includes most of the drivers in its core, but some providers and developers for various reasons decide to release their driver as a separate PyPi package. diff --git a/libcloud/common/abiquo.py b/libcloud/common/abiquo.py index 041a9ffea3..69bf3c7b6d 100644 --- a/libcloud/common/abiquo.py +++ b/libcloud/common/abiquo.py @@ -134,7 +134,7 @@ def success(self): """ Determine if the request was successful. - Any of the 2XX HTTP response codes are accepted as successfull requests + Any of the 2XX HTTP response codes are accepted as successful requests :rtype: ``bool`` :return: successful request or not. diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 43617bd083..d8475df4a4 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -2330,7 +2330,7 @@ def ex_delete_ip_forwarding_rule(self, node, rule): def ex_create_network_acllist(self, name, vpc_id, description=None): """ - Create a ACL List for a network within a VPC. + Create an ACL List for a network within a VPC. :param name: Name of the network ACL List :type name: ``string`` @@ -2366,7 +2366,7 @@ def ex_create_network_acl(self, protocol, acl_id, cidr_list, start_port, end_port, action=None, traffic_type=None): """ - Creates a ACL rule in the given network (the network has to belong to + Creates an ACL rule in the given network (the network has to belong to VPC) :param protocol: the protocol for the ACL rule. Valid values are @@ -2451,7 +2451,7 @@ def ex_list_network_acllists(self): def ex_replace_network_acllist(self, acl_id, network_id): """ - Create a ACL List for a network within a VPC.Replaces ACL associated + Create an ACL List for a network within a VPC.Replaces ACL associated with a Network or private gateway :param acl_id: the ID of the network ACL diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 217ed4cf23..87e6afaa55 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -732,7 +732,7 @@ """ Sizes must be hardcoded because Outscale doesn't provide an API to fetch them. -Outscale cloud instances share some names with EC2 but have differents +Outscale cloud instances share some names with EC2 but have different specifications so declare them in another constant. """ OUTSCALE_INSTANCE_TYPES = { @@ -974,7 +974,7 @@ """ -The function manipulating Outscale cloud regions will be overriden because +The function manipulating Outscale cloud regions will be overridden because Outscale instances types are in a separate dict so also declare Outscale cloud regions in some other constants. """ @@ -2079,7 +2079,7 @@ def list_images(self, location=None, ex_image_ids=None, ex_owner=None, def get_image(self, image_id): """ - Get an image based on a image_id + Get an image based on an image_id :param image_id: Image identifier :type image_id: ``str`` @@ -2595,7 +2595,7 @@ def ex_register_image(self, name, description=None, architecture=None, :type image_location: ``str`` :param root_device_name: The device name for the root device - Required if registering a EBS-backed AMI + Required if registering an EBS-backed AMI :type root_device_name: ``str`` :param block_device_mapping: A dictionary of the disk layout @@ -3652,7 +3652,7 @@ def ex_delete_network_interface(self, network_interface): def ex_attach_network_interface_to_node(self, network_interface, node, device_index): """ - Attatch a network interface to an instance. + Attach a network interface to an instance. :param network_interface: EC2NetworkInterface instance :type network_interface: :class:`EC2NetworkInterface` @@ -3680,7 +3680,7 @@ def ex_attach_network_interface_to_node(self, network_interface, def ex_detach_network_interface(self, attachment_id, force=False): """ - Detatch a network interface from an instance. + Detach a network interface from an instance. :param attachment_id: The attachment ID associated with the interface @@ -4159,7 +4159,7 @@ def ex_delete_internet_gateway(self, gateway): def ex_attach_internet_gateway(self, gateway, network): """ - Attach a Internet gateway to a VPC + Attach an Internet gateway to a VPC :param gateway: The gateway to attach :type gateway: :class:`.VPCInternetGateway` @@ -4179,7 +4179,7 @@ def ex_attach_internet_gateway(self, gateway, network): def ex_detach_internet_gateway(self, gateway, network): """ - Detach a Internet gateway from a VPC + Detach an Internet gateway from a VPC :param gateway: The gateway to detach :type gateway: :class:`.VPCInternetGateway` @@ -4200,7 +4200,7 @@ def ex_detach_internet_gateway(self, gateway, network): def ex_list_route_tables(self, route_table_ids=None, filters=None): """ Describes one or more of a VPC's route tables. - These are are used to determine where network traffic is directed. + These are used to determine where network traffic is directed. :param route_table_ids: Return only route tables matching the provided route table IDs. If not specified, @@ -4848,7 +4848,7 @@ def _to_interfaces(self, response): def _to_interface(self, element, name=None): """ - Parse the XML element and return a EC2NetworkInterface object. + Parse the XML element and return an EC2NetworkInterface object. :param name: An optional name for the interface. If not provided then either tag with a key "Name" or the interface ID @@ -5760,7 +5760,7 @@ def ex_register_image(self, name, description=None, architecture=None, :type architecture: ``str`` :param root_device_name: The device name for the root device - Required if registering a EBS-backed AMI + Required if registering an EBS-backed AMI :type root_device_name: ``str`` :param block_device_mapping: A dictionary of the disk layout diff --git a/libcloud/compute/drivers/elasticstack.py b/libcloud/compute/drivers/elasticstack.py index 739fa8faff..18b581bf61 100644 --- a/libcloud/compute/drivers/elasticstack.py +++ b/libcloud/compute/drivers/elasticstack.py @@ -224,7 +224,7 @@ def list_nodes(self): return nodes def create_node(self, **kwargs): - """Creates a ElasticStack instance + """Creates an ElasticStack instance @inherits: :class:`NodeDriver.create_node` diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 139ab78f05..a268ad052a 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -1311,7 +1311,7 @@ def create_node(self, name, size, image, location=None, 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in - 'gcloud cmopute'. + 'gcloud compute'. :type ex_service_accounts: ``list`` :return: A Node object for the new node. @@ -1446,7 +1446,7 @@ def ex_create_multiple_nodes(self, base_name, size, image, number, 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in - 'gcloud cmopute'. + 'gcloud compute'. :type ex_service_accounts: ``list`` :keyword timeout: The number of seconds to wait for all nodes to be @@ -2026,7 +2026,7 @@ def deploy_node(self, name, size, image, script, location=None, 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in - 'gcloud cmopute'. + 'gcloud compute'. :type ex_service_accounts: ``list`` :return: A Node object for the new node. @@ -2126,7 +2126,7 @@ def ex_set_volume_auto_delete(self, volume, node, auto_delete=True): :keyword auto_delete: Flag to set for the auto-delete value :type auto_delete: ``bool`` (default True) - :return: True if successfull + :return: True if successful :rtype: ``bool`` """ request = '/zones/%s/instances/%s/setDiskAutoDelete' % ( @@ -2163,7 +2163,7 @@ def ex_delete_image(self, image): :param image: Image object to delete :type image: ``str`` or :class:`GCENodeImage` - :return: True if successfull + :return: True if successful :rtype: ``bool`` """ if not hasattr(image, 'name'): @@ -2186,7 +2186,7 @@ def ex_deprecate_image(self, image, replacement, state=None): :param state: State of the image :type state: ``str`` - :return: True if successfull + :return: True if successful :rtype: ``bool`` """ if not hasattr(image, 'name'): @@ -2996,7 +2996,7 @@ def _create_node_req(self, name, size, image, location, network, 'default' service account email and a scope of 'devstorage.read_only'. Also accepts the aliases defined in - 'gcloud cmopute'. + 'gcloud compute'. :type ex_service_accounts: ``list`` :return: A tuple containing a request string and a node_data dict. diff --git a/libcloud/compute/drivers/hpcloud.py b/libcloud/compute/drivers/hpcloud.py index 3c4360abe0..5a50d1f9e3 100644 --- a/libcloud/compute/drivers/hpcloud.py +++ b/libcloud/compute/drivers/hpcloud.py @@ -13,7 +13,7 @@ # limitations under the License. """ -HP Public cloud driver which is esentially just a small wrapper around +HP Public cloud driver which is essentially just a small wrapper around OpenStack driver. """ diff --git a/libcloud/compute/drivers/kili.py b/libcloud/compute/drivers/kili.py index f98e131755..11c1c7bc81 100644 --- a/libcloud/compute/drivers/kili.py +++ b/libcloud/compute/drivers/kili.py @@ -13,7 +13,7 @@ # limitations under the License. """ -HP Public cloud driver which is esentially just a small wrapper around +HP Public cloud driver which is essentially just a small wrapper around OpenStack driver. """ diff --git a/libcloud/compute/drivers/openstack.py b/libcloud/compute/drivers/openstack.py index 750891d375..056d016ea5 100644 --- a/libcloud/compute/drivers/openstack.py +++ b/libcloud/compute/drivers/openstack.py @@ -227,7 +227,7 @@ def list_images(self, location=None, ex_only_active=True): def get_image(self, image_id): """ - Get an image based on a image_id + Get an image based on an image_id @inherits: :class:`NodeDriver.get_image` diff --git a/libcloud/compute/drivers/profitbricks.py b/libcloud/compute/drivers/profitbricks.py index 0d4a5ea101..7b23cab6d6 100644 --- a/libcloud/compute/drivers/profitbricks.py +++ b/libcloud/compute/drivers/profitbricks.py @@ -522,7 +522,7 @@ def destroy_node(self, node, ex_remove_attached_disks=False): :param node: The node you wish to destroy. :type volume: :class:`Node` - :param ex_remove_attached_disks: True to destory all attached volumes. + :param ex_remove_attached_disks: True to destroy all attached volumes. :type ex_remove_attached_disks: : ``bool`` :rtype: : ``bool`` diff --git a/libcloud/compute/drivers/vsphere.py b/libcloud/compute/drivers/vsphere.py index 5a51007f78..4206d43bf4 100644 --- a/libcloud/compute/drivers/vsphere.py +++ b/libcloud/compute/drivers/vsphere.py @@ -62,7 +62,7 @@ class VSphereConnection(ConnectionUserAndKey): def __init__(self, user_id, key, secure=True, host=None, port=None, url=None, timeout=None): if host and url: - raise ValueError('host and url arguments are mutally exclusive') + raise ValueError('host and url arguments are mutually exclusive') if host: host_or_url = host diff --git a/libcloud/compute/ssh.py b/libcloud/compute/ssh.py index 1a88e9f38f..7f4719c2eb 100644 --- a/libcloud/compute/ssh.py +++ b/libcloud/compute/ssh.py @@ -119,8 +119,8 @@ def connect(self): """ Connect to the remote node over SSH. - :return: True if the connection has been successfuly established, False - otherwise. + :return: True if the connection has been successfully established, + False otherwise. :rtype: ``bool`` """ raise NotImplementedError( @@ -155,7 +155,7 @@ def delete(self, path): :type path: ``str`` :keyword path: File path on the remote node. - :return: True if the file has been successfuly deleted, False + :return: True if the file has been successfully deleted, False otherwise. :rtype: ``bool`` """ @@ -178,7 +178,7 @@ def close(self): """ Shutdown connection to the remote node. - :return: True if the connection has been successfuly closed, False + :return: True if the connection has been successfully closed, False otherwise. :rtype: ``bool`` """ diff --git a/libcloud/dns/drivers/gandi.py b/libcloud/dns/drivers/gandi.py index 48a6ed5d9e..792110da40 100644 --- a/libcloud/dns/drivers/gandi.py +++ b/libcloud/dns/drivers/gandi.py @@ -40,7 +40,7 @@ class NewZoneVersion(object): In effect, this is a transaction. Any calls made inside this context manager will be applied to a new version - id. If your changes are succesful (and only if they are successful) they + id. If your changes are successful (and only if they are successful) they are activated. """ diff --git a/libcloud/loadbalancer/drivers/gce.py b/libcloud/loadbalancer/drivers/gce.py index e1fa29956d..c754221004 100644 --- a/libcloud/loadbalancer/drivers/gce.py +++ b/libcloud/loadbalancer/drivers/gce.py @@ -21,7 +21,7 @@ from libcloud.loadbalancer.base import LoadBalancer, Member, Driver, Algorithm from libcloud.compute.drivers.gce import GCEConnection, GCENodeDriver -# GCE doesn't actually give you a algorithm choice, but this is here simply as +# GCE doesn't actually give you an algorithm choice, but this is here simply as # the closest match. The actual algorithm is described here: # https://developers.google.com/compute/docs/load-balancing/#overview DEFAULT_ALGORITHM = Algorithm.RANDOM diff --git a/libcloud/storage/base.py b/libcloud/storage/base.py index f12b906409..5e4f09afd4 100644 --- a/libcloud/storage/base.py +++ b/libcloud/storage/base.py @@ -284,7 +284,7 @@ def get_object(self, container_name, object_name): def get_object_cdn_url(self, obj): """ - Return a object CDN URL. + Return an object CDN URL. :param obj: Object instance :type obj: :class:`Object` diff --git a/libcloud/storage/drivers/atmos.py b/libcloud/storage/drivers/atmos.py index a1e12cc8e5..c52be0341b 100644 --- a/libcloud/storage/drivers/atmos.py +++ b/libcloud/storage/drivers/atmos.py @@ -387,7 +387,7 @@ def enable_object_cdn(self, obj): def get_object_cdn_url(self, obj, expiry=None, use_object=False): """ - Return a object CDN URL. + Return an object CDN URL. :param obj: Object instance :type obj: :class:`Object` diff --git a/libcloud/storage/drivers/local.py b/libcloud/storage/drivers/local.py index 985243dd32..60de8ce326 100644 --- a/libcloud/storage/drivers/local.py +++ b/libcloud/storage/drivers/local.py @@ -285,7 +285,7 @@ def get_object(self, container_name, object_name): def get_object_cdn_url(self, obj): """ - Return a object CDN URL. + Return an object CDN URL. :param obj: Object instance :type obj: :class:`Object` From cf8cffd6eb3bfd18b6f6c655a729c881abfc0973 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Fri, 7 Nov 2014 16:44:26 +0800 Subject: [PATCH 255/315] Fix typo in the changelog. --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 71dac63608..a733d558e7 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -175,7 +175,7 @@ Compute (GITHUB-369) [Roeland Kuipers] -- Implements VPC network ACLs for CloudStack drive +- Implements VPC network ACLs for CloudStack driver. (GITHUB-371) [Jeroen de Korte] From cb3094b774b1f771de4cde54d3bda12354800d11 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Fri, 7 Nov 2014 11:09:18 -0300 Subject: [PATCH 256/315] Fixed another two typos Closes #383 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index a733d558e7..f5501ae8bc 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -455,7 +455,7 @@ Compute (GITHUB-301) [Csaba Hoch] -- Add extension methods for manging floating IPs (ex_get_floating_ip, +- Add extension methods for managing floating IPs (ex_get_floating_ip, ex_create_floating_ip, ex_delete_floating_ip) to the Openstack 1.1 driver. (GITHUB-301) [Csaba Hoch] @@ -518,7 +518,7 @@ Storage [Tomaz MUraus] - Fix a bug in the OpenStack Swift driver which prevented the driver to work - with installations where region names in the service catalog werent upper + with installations where region names in the service catalog weren't upper case. (LIBCLOUD-576, GITHUB-311) [Zak Estrada] From 2c652c0bc2a4572783167b8fa99773e0d9d74b8a Mon Sep 17 00:00:00 2001 From: Borja Martin Date: Mon, 10 Nov 2014 16:16:42 +0100 Subject: [PATCH 257/315] Added images from ubuntu-os-cloud image project Closes #385 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 10 ++++++++++ libcloud/compute/drivers/gce.py | 3 ++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index f5501ae8bc..e2136c31ff 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,6 +1,16 @@ Changelog ========= +Changes with Apache Libcloud in development +------------------------------------------- + +Compute +~~~~~~~ + +- Add missing ``ubuntu-os-cloud`` images to the GCE driver. + (LIBCLOUD-632, GITHUB-385) + [Borja Martin] + Changes with Apache Libcloud 0.16.0 ----------------------------------- diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index a268ad052a..91f4629a63 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -2548,7 +2548,8 @@ def ex_get_image(self, partial_name): image = self._match_images('coreos-cloud', partial_name) elif partial_name.startswith('opensuse'): image = self._match_images('opensuse-cloud', partial_name) - + elif partial_name.startswith('ubuntu'): + image = self._match_images('ubuntu-os-cloud', partial_name) return image def ex_get_network(self, name): From 10b21cff9735f8ec16dc285d8e90e379d78dc782 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 11 Nov 2014 15:43:42 +0800 Subject: [PATCH 258/315] Update examples to use "region" argument instead of using old, unsupported approach. --- example_compute.py | 6 +++--- example_loadbalancer.py | 7 +++---- example_storage.py | 4 ++-- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/example_compute.py b/example_compute.py index ed57184948..346e7d4684 100644 --- a/example_compute.py +++ b/example_compute.py @@ -16,11 +16,11 @@ from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver -EC2 = get_driver(Provider.EC2_US_EAST) +EC2 = get_driver(Provider.EC2) Rackspace = get_driver(Provider.RACKSPACE) -drivers = [EC2('access key id', 'secret key'), - Rackspace('username', 'api key')] +drivers = [EC2('access key id', 'secret key', region='us-east-1'), + Rackspace('username', 'api key', region='iad')] nodes = [driver.list_nodes() for driver in drivers] diff --git a/example_loadbalancer.py b/example_loadbalancer.py index c480cc4a78..d47a0febcf 100644 --- a/example_loadbalancer.py +++ b/example_loadbalancer.py @@ -24,9 +24,8 @@ def main(): - Rackspace = get_driver(Provider.RACKSPACE_US) - - driver = Rackspace('username', 'api key') + cls = get_driver(Provider.RACKSPACE) + driver = cls('username', 'api key', region='ord') balancers = driver.list_balancers() @@ -68,5 +67,5 @@ def main(): # remove the balancer driver.destroy_balancer(new_balancer) -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/example_storage.py b/example_storage.py index 87b5402608..be3058c9e1 100644 --- a/example_storage.py +++ b/example_storage.py @@ -18,9 +18,9 @@ from libcloud.storage.types import Provider from libcloud.storage.providers import get_driver -CloudFiles = get_driver(Provider.CLOUDFILES_UK) +CloudFiles = get_driver(Provider.CLOUDFILES) -driver = CloudFiles('access key id', 'secret key') +driver = CloudFiles('access key id', 'secret key', region='ord') containers = driver.list_containers() container_objects = driver.list_container_objects(containers[0]) From 526d06b86423ed1efff1b538fe0e364d29c4261e Mon Sep 17 00:00:00 2001 From: ZuluPro Date: Mon, 10 Nov 2014 20:04:27 +0100 Subject: [PATCH 259/315] Added us-east-2 and us-east-3 regions to the Joyent compute driver. Closes #386 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/joyent.py | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index e2136c31ff..09c0212d84 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -11,6 +11,10 @@ Compute (LIBCLOUD-632, GITHUB-385) [Borja Martin] +- Add new `us-east-2` and `us-east-3` region to the Joyent driver. + (GITHUB-386) + [ZuluPro] + Changes with Apache Libcloud 0.16.0 ----------------------------------- diff --git a/libcloud/compute/drivers/joyent.py b/libcloud/compute/drivers/joyent.py index 6d8142e8f8..42bbae22fd 100644 --- a/libcloud/compute/drivers/joyent.py +++ b/libcloud/compute/drivers/joyent.py @@ -46,7 +46,12 @@ 'deleted': NodeState.TERMINATED } -VALID_REGIONS = ['us-east-1', 'us-west-1', 'us-sw-1', 'eu-ams-1'] +VALID_REGIONS = [ + 'us-east-1', 'us-east-2', 'us-east-3', + 'us-west-1', + 'us-sw-1', + 'eu-ams-1' +] DEFAULT_REGION = 'us-east-1' From 9defdb5e2f6e51161eb5c9f78e6b750a512f05c8 Mon Sep 17 00:00:00 2001 From: "clark.kang" Date: Sun, 9 Nov 2014 11:57:14 +0900 Subject: [PATCH 260/315] Add Vultr compute driver Closes #384 Signed-off-by: Tomaz Muraus --- docs/compute/drivers/vultr.rst | 30 +++ .../compute/vultr/vultr_compute_simple.py | 5 + libcloud/compute/drivers/vultr.py | 184 ++++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 2 + .../compute/fixtures/vultr/list_images.json | 1 + .../fixtures/vultr/list_locations.json | 1 + .../compute/fixtures/vultr/list_nodes.json | 1 + .../compute/fixtures/vultr/list_sizes.json | 1 + libcloud/test/compute/test_vultr.py | 114 +++++++++++ libcloud/test/secrets.py-dist | 1 + 11 files changed, 342 insertions(+) create mode 100644 docs/compute/drivers/vultr.rst create mode 100644 docs/examples/compute/vultr/vultr_compute_simple.py create mode 100644 libcloud/compute/drivers/vultr.py create mode 100644 libcloud/test/compute/fixtures/vultr/list_images.json create mode 100644 libcloud/test/compute/fixtures/vultr/list_locations.json create mode 100644 libcloud/test/compute/fixtures/vultr/list_nodes.json create mode 100644 libcloud/test/compute/fixtures/vultr/list_sizes.json create mode 100644 libcloud/test/compute/test_vultr.py diff --git a/docs/compute/drivers/vultr.rst b/docs/compute/drivers/vultr.rst new file mode 100644 index 0000000000..f062afd870 --- /dev/null +++ b/docs/compute/drivers/vultr.rst @@ -0,0 +1,30 @@ +Vultr Compute Driver Documentation +======================================== + +`Vultr`_ is a public cloud provider based in mulitiple counties. + +How to get API Key +------------------ + +Visit https://my.vultr.com/settings/#API + +You can see Your API Key in API Information Section in the middle of page. +If you want to change your API Key, press the Regenerate Button. + +Examples +-------- + +1. Create vultr driver - how to create vultr driver with api key +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. literalinclude:: /examples/compute/vultr/vultr_compute_simple.py + :language: python + +API Docs +-------- + +.. autoclass:: libcloud.compute.drivers.vultr.VultrNodeDriver + :members: + :inherited-members: + +.. _`vultr`: http://www.vultr.com diff --git a/docs/examples/compute/vultr/vultr_compute_simple.py b/docs/examples/compute/vultr/vultr_compute_simple.py new file mode 100644 index 0000000000..5db958b0a7 --- /dev/null +++ b/docs/examples/compute/vultr/vultr_compute_simple.py @@ -0,0 +1,5 @@ +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +cls = get_driver(Provider.VULTR) +driver = cls('your api key') diff --git a/libcloud/compute/drivers/vultr.py b/libcloud/compute/drivers/vultr.py new file mode 100644 index 0000000000..ab47aa056c --- /dev/null +++ b/libcloud/compute/drivers/vultr.py @@ -0,0 +1,184 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Vultr Driver +""" + +import time + +from libcloud.utils.py3 import httplib +from libcloud.utils.py3 import urlencode + +from libcloud.common.base import ConnectionKey, JsonResponse +from libcloud.compute.types import Provider, NodeState +from libcloud.common.types import LibcloudError, InvalidCredsError +from libcloud.compute.base import NodeDriver +from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation + + +class VultrResponse(JsonResponse): + def parse_error(self): + if self.status == httplib.OK: + body = self.parse_body() + return body + elif self.status == httplib.FORBIDDEN: + raise InvalidCredsError(self.body) + else: + raise LibcloudError(self.body) + + +class VultrConnection(ConnectionKey): + """ + Connection class for the Vultr driver. + """ + + host = 'api.vultr.com' + responseCls = VultrResponse + + def add_default_params(self, params): + """ + Add parameters that are necessary for every request + + This method add ``api_key`` to + the request. + """ + params['api_key'] = self.key + return params + + def encode_data(self, data): + return urlencode(data) + + def get(self, url): + return self.request(url) + + def post(self, url, data): + headers = {'Content-Type': 'application/x-www-form-urlencoded'} + return self.request(url, data=data, headers=headers, method='POST') + + +class VultrNodeDriver(NodeDriver): + """ + VultrNode node driver. + """ + + connectionCls = VultrConnection + + type = Provider.VULTR + name = 'Vultr' + website = 'https://www.vultr.com' + + NODE_STATE_MAP = {'pending': NodeState.PENDING, + 'active': NodeState.RUNNING} + + def list_nodes(self): + return self._list_resources('/v1/server/list', self._to_node) + + def list_locations(self): + return self._list_resources('/v1/regions/list', self._to_location) + + def list_sizes(self): + return self._list_resources('/v1/plans/list', self._to_size) + + def list_images(self): + return self._list_resources('/v1/os/list', self._to_image) + + def create_node(self, name, size, image, location): + params = {'DCID': location.id, 'VPSPLANID': size.id, + 'OSID': image.id, 'label': name} + + result = self.connection.post('/v1/server/create', params) + if result.status != httplib.OK: + return False + + subid = result.object['SUBID'] + + retry_count = 3 + created_node = None + + for i in range(retry_count): + try: + nodes = self.list_nodes() + created_node = [n for n in nodes if n.id == subid][0] + except IndexError: + time.sleep(1) + pass + else: + break + + return created_node + + def reboot_node(self, node): + params = {'SUBID': node.id} + res = self.connection.post('/v1/server/reboot', params) + + return res.status == httplib.OK + + def destroy_node(self, node): + params = {'SUBID': node.id} + res = self.connection.post('/v1/server/destroy', params) + + return res.status == httplib.OK + + def _list_resources(self, url, tranform_func): + data = self.connection.get(url).object + sorted_key = sorted(data) + return [tranform_func(data[key]) for key in sorted_key] + + def _to_node(self, data): + if 'status' in data: + state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN) + if state == NodeState.RUNNING and \ + data['power_status'] != 'running': + state = NodeState.STOPPED + else: + state = NodeState.UNKNOWN + + if 'main_ip' in data and data['main_ip'] is not None: + public_ips = [data['main_ip']] + else: + public_ips = [] + + extra_keys = [] + extra = {} + for key in extra_keys: + if key in data: + extra[key] = data[key] + + node = Node(id=data['SUBID'], name=data['label'], state=state, + public_ips=public_ips, private_ips=None, extra=extra, + driver=self) + + return node + + def _to_location(self, data): + return NodeLocation(id=data['DCID'], name=data['name'], + country=data['country'], driver=self) + + def _to_size(self, data): + extra = {'vcpu_count': int(data['vcpu_count'])} + ram = int(data['ram']) + disk = int(data['disk']) + bandwidth = float(data['bandwidth']) + price = float(data['price_per_month']) + + return NodeSize(id=data['VPSPLANID'], name=data['name'], + ram=ram, disk=disk, + bandwidth=bandwidth, price=price, + extra=extra, driver=self) + + def _to_image(self, data): + extra = {'arch': data['arch'], 'family': data['family']} + return NodeImage(id=data['OSID'], name=data['name'], extra=extra, + driver=self) diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index b449e78353..bdceafc896 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -151,6 +151,8 @@ ('libcloud.compute.drivers.vsphere', 'VSphereNodeDriver'), Provider.PROFIT_BRICKS: ('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'), + Provider.VULTR: + ('libcloud.compute.drivers.vultr', 'VultrNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index f07bc4873a..c21a4d17b1 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -77,6 +77,7 @@ class Provider(object): :cvar OUTSCALE_SAS: Outscale SAS driver. :cvar OUTSCALE_INC: Outscale INC driver. :cvar PROFIT_BRICKS: ProfitBricks driver. + :cvar VULTR: vultr driver. """ DUMMY = 'dummy' EC2 = 'ec2_us_east' @@ -124,6 +125,7 @@ class Provider(object): OUTSCALE_INC = 'outscale_inc' VSPHERE = 'vsphere' PROFIT_BRICKS = 'profitbricks' + VULTR = 'vultr' # OpenStack based providers HPCLOUD = 'hpcloud' diff --git a/libcloud/test/compute/fixtures/vultr/list_images.json b/libcloud/test/compute/fixtures/vultr/list_images.json new file mode 100644 index 0000000000..139f2afc7b --- /dev/null +++ b/libcloud/test/compute/fixtures/vultr/list_images.json @@ -0,0 +1 @@ +{"127":{"OSID":127,"name":"CentOS 6 x64","arch":"x64","family":"centos","windows":false},"147":{"OSID":147,"name":"CentOS 6 i386","arch":"i386","family":"centos","windows":false},"162":{"OSID":162,"name":"CentOS 5 x64","arch":"x64","family":"centos","windows":false},"163":{"OSID":163,"name":"CentOS 5 i386","arch":"i386","family":"centos","windows":false},"167":{"OSID":167,"name":"CentOS 7 x64","arch":"x64","family":"centos","windows":false},"160":{"OSID":160,"name":"Ubuntu 14.04 x64","arch":"x64","family":"ubuntu","windows":false},"161":{"OSID":161,"name":"Ubuntu 14.04 i386","arch":"i386","family":"ubuntu","windows":false},"128":{"OSID":128,"name":"Ubuntu 12.04 x64","arch":"x64","family":"ubuntu","windows":false},"148":{"OSID":148,"name":"Ubuntu 12.04 i386","arch":"i386","family":"ubuntu","windows":false},"181":{"OSID":181,"name":"Ubuntu 14.10 x64","arch":"x64","family":"ubuntu","windows":false},"182":{"OSID":182,"name":"Ubuntu 14.10 i386","arch":"i386","family":"ubuntu","windows":false},"139":{"OSID":139,"name":"Debian 7 x64 (wheezy)","arch":"x64","family":"debian","windows":false},"152":{"OSID":152,"name":"Debian 7 i386 (wheezy)","arch":"i386","family":"debian","windows":false},"140":{"OSID":140,"name":"FreeBSD 10 x64","arch":"x64","family":"freebsd","windows":false},"124":{"OSID":124,"name":"Windows 2012 R2 x64","arch":"x64","family":"windows","windows":true},"159":{"OSID":159,"name":"Custom","arch":"x64","family":"iso","windows":false},"164":{"OSID":164,"name":"Snapshot","arch":"x64","family":"snapshot","windows":false},"180":{"OSID":180,"name":"Backup","arch":"x64","family":"backup","windows":false}} \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/vultr/list_locations.json b/libcloud/test/compute/fixtures/vultr/list_locations.json new file mode 100644 index 0000000000..6cf6d0ba46 --- /dev/null +++ b/libcloud/test/compute/fixtures/vultr/list_locations.json @@ -0,0 +1 @@ +{"6":{"DCID":"6","name":"Atlanta","country":"US","continent":"North America","state":"GA"},"2":{"DCID":"2","name":"Chicago","country":"US","continent":"North America","state":"IL"},"3":{"DCID":"3","name":"Dallas","country":"US","continent":"North America","state":"TX"},"5":{"DCID":"5","name":"Los Angeles","country":"US","continent":"North America","state":"CA"},"39":{"DCID":"39","name":"Miami","country":"US","continent":"","state":"FL"},"1":{"DCID":"1","name":"New Jersey","country":"US","continent":"North America","state":"NJ"},"4":{"DCID":"4","name":"Seattle","country":"US","continent":"North America","state":"WA"},"12":{"DCID":"12","name":"Silicon Valley","country":"US","continent":"North America","state":"CA"},"7":{"DCID":"7","name":"Amsterdam","country":"NL","continent":"Europe","state":""},"25":{"DCID":"25","name":"Tokyo","country":"JP","continent":"Asia","state":""},"8":{"DCID":"8","name":"London","country":"GB","continent":"Europe","state":""},"24":{"DCID":"24","name":"France","country":"FR","continent":"Europe","state":""},"9":{"DCID":"9","name":"Frankfurt","country":"DE","continent":"Europe","state":""},"19":{"DCID":"19","name":"Australia","country":"AU","continent":"Australia","state":""}} diff --git a/libcloud/test/compute/fixtures/vultr/list_nodes.json b/libcloud/test/compute/fixtures/vultr/list_nodes.json new file mode 100644 index 0000000000..e12ba4a2a7 --- /dev/null +++ b/libcloud/test/compute/fixtures/vultr/list_nodes.json @@ -0,0 +1 @@ +{"1":{"SUBID":"1","os":"Ubuntu 12.04 x64","ram":"1024 MB","disk":"Virtual 20 GB","main_ip":"108.61.206.153","vcpu_count":"1","location":"Los Angeles","DCID":"5","default_password":"twizewnatpom!7","date_created":"2014-03-21 12:46:35","pending_charges":"1.92","status":"active","cost_per_month":"7.00","current_bandwidth_gb":0.929,"allowed_bandwidth_gb":"2000","netmask_v4":"255.255.254.0","gateway_v4":"108.61.206.1","power_status":"running","VPSPLANID":"30","v6_network":"::","v6_main_ip":"","v6_network_size":"0","label":"","internal_ip":"","kvm_url":"https:\/\/my.vultr.com\/subs\/vps\/novnc\/api.php?data=IF3C6VCEN5NFOZ3VMM3FOV3JJVXUQV3OHBWDG6TUKI3VST3JMFDXOOCMIE3HCTBWKVJXOZZYF5BVMZ3IM5XXGWRZIVZW4S2WKAVTSMTQHFCG4QTCNFUEKOCXKY3CW3LGNF4HIVTVJ5GXM4CJORTU6SKYOBDE6WJVMZ3E4ZSVOB2FQ4KYF5DXC5CTJI4FETDIGBITQQZPGFLXKTSRKRJS6ODOMFKDSNLLNVETONSNKA2XQ6CWLJMW6L2EGI2U6SDNN5FGUTJYNB3UC5DXN46Q","auto_backups":"no"},"2":{"SUBID":"2","os":"Ubuntu 14.04 x64","ram":"768 MB","disk":"Virtual 15 GB","main_ip":"104.207.153.143","vcpu_count":"1","location":"Los Angeles","DCID":"5","default_password":"cewxoaezap!0","date_created":"2014-11-08 14:12:13","pending_charges":"0.01","status":"active","cost_per_month":"5.00","current_bandwidth_gb":0,"allowed_bandwidth_gb":"1000","netmask_v4":"255.255.254.0","gateway_v4":"104.207.152.1","power_status":"running","VPSPLANID":"29","v6_network":"::","v6_main_ip":"","v6_network_size":"0","label":"vultr-test1","internal_ip":"","kvm_url":"https:\/\/my.vultr.com\/subs\/vps\/novnc\/api.php?data=NBUUYMTDI4VXGVZXOFBE6UBWKFLWE43MI5EFOR3MJNZW4NRXLFBHA33BHF3C63LSOJRXAU2PO5GHM5LPOFAW2MSDMFZWUMCNNJRG6TRWJREWYNBLG5VTG2DEGIYWITKIGV2FA3JTNJVEETLOGBHFG42XLEZHG22VFNWHE5RUKFIWU3DSOJCS6WDQGJRDIZRPIU2HILZTKB4E4MZSNZIFEQ3SOFSDANCBHBBEWRLVGZEUEVDSJVQVKOKZNQ4GKSRSIJEG62TWMREG6USNIE6Q","auto_backups":"no"}} diff --git a/libcloud/test/compute/fixtures/vultr/list_sizes.json b/libcloud/test/compute/fixtures/vultr/list_sizes.json new file mode 100644 index 0000000000..ded841de15 --- /dev/null +++ b/libcloud/test/compute/fixtures/vultr/list_sizes.json @@ -0,0 +1 @@ +{"31":{"VPSPLANID":"31","name":"768 MB RAM,15 GB SSD,0.20 TB BW","vcpu_count":"1","ram":"768","disk":"15","bandwidth":"0.20","bandwidth_gb":"204.8","price_per_month":"5.00","windows":false},"29":{"VPSPLANID":"29","name":"768 MB RAM,15 GB SSD,1.00 TB BW","vcpu_count":"1","ram":"768","disk":"15","bandwidth":"1.00","bandwidth_gb":"1024","price_per_month":"5.00","windows":false},"32":{"VPSPLANID":"32","name":"1024 MB RAM,20 GB SSD,0.40 TB BW","vcpu_count":"1","ram":"1024","disk":"20","bandwidth":"0.40","bandwidth_gb":"409.6","price_per_month":"7.00","windows":false},"30":{"VPSPLANID":"30","name":"1024 MB RAM,20 GB SSD,2.00 TB BW","vcpu_count":"1","ram":"1024","disk":"20","bandwidth":"2.00","bandwidth_gb":"2048","price_per_month":"7.00","windows":false},"3":{"VPSPLANID":"3","name":"2048 MB RAM,40 GB SSD,3.00 TB BW","vcpu_count":"2","ram":"2048","disk":"40","bandwidth":"3.00","bandwidth_gb":"3072","price_per_month":"15.00","windows":false},"8":{"VPSPLANID":"8","name":"2048 MB RAM,40 GB SSD,0.60 TB BW","vcpu_count":"2","ram":"2048","disk":"40","bandwidth":"0.60","bandwidth_gb":"614.4","price_per_month":"15.00","windows":false},"33":{"VPSPLANID":"33","name":"4096 MB RAM,65 GB SSD,0.80 TB BW","vcpu_count":"2","ram":"4096","disk":"65","bandwidth":"0.80","bandwidth_gb":"819.2","price_per_month":"35.00","windows":false},"27":{"VPSPLANID":"27","name":"4096 MB RAM,65 GB SSD,4.00 TB BW","vcpu_count":"2","ram":"4096","disk":"65","bandwidth":"4.00","bandwidth_gb":"4096","price_per_month":"35.00","windows":false},"28":{"VPSPLANID":"28","name":"8192 MB RAM,120 GB SSD,5.00 TB BW","vcpu_count":"4","ram":"8192","disk":"120","bandwidth":"5.00","bandwidth_gb":"5120","price_per_month":"70.00","windows":false},"34":{"VPSPLANID":"34","name":"8192 MB RAM,120 GB SSD,1.00 TB BW","vcpu_count":"4","ram":"8192","disk":"120","bandwidth":"1.00","bandwidth_gb":"1024","price_per_month":"70.00","windows":false},"11":{"VPSPLANID":"11","name":"512 MB RAM,160 GB SATA,1.00 TB BW","vcpu_count":"1","ram":"512","disk":"160","bandwidth":"1.00","bandwidth_gb":"1024","price_per_month":"5.00","windows":false},"78":{"VPSPLANID":"78","name":"16384 MB RAM,250 GB SSD,6.00 TB BW","vcpu_count":"8","ram":"16384","disk":"250","bandwidth":"6.00","bandwidth_gb":"6144","price_per_month":"149.95","windows":false},"71":{"VPSPLANID":"71","name":"16384 MB RAM,250 GB SSD,8.00 TB BW","vcpu_count":"4","ram":"16384","disk":"250","bandwidth":"8.00","bandwidth_gb":"8192","price_per_month":"125.00","windows":false},"68":{"VPSPLANID":"68","name":"16384 MB RAM,250 GB SSD,1.60 TB BW","vcpu_count":"4","ram":"16384","disk":"250","bandwidth":"1.60","bandwidth_gb":"1638.4","price_per_month":"125.00","windows":false},"12":{"VPSPLANID":"12","name":"1024 MB RAM,320 GB SATA,2.00 TB BW","vcpu_count":"1","ram":"1024","disk":"320","bandwidth":"2.00","bandwidth_gb":"2048","price_per_month":"8.00","windows":false},"62":{"VPSPLANID":"62","name":"1024 MB RAM,320 GB SATAPERF,1.00 TB BW, 10GigE","vcpu_count":"1","ram":"1024","disk":"320","bandwidth":"1.00","bandwidth_gb":"1024","price_per_month":"15.00","windows":false},"79":{"VPSPLANID":"79","name":"32768 MB RAM,400 GB SSD,7.00 TB BW","vcpu_count":"12","ram":"32768","disk":"400","bandwidth":"7.00","bandwidth_gb":"7168","price_per_month":"299.95","windows":false},"80":{"VPSPLANID":"80","name":"49152 MB RAM,600 GB SSD,8.00 TB BW","vcpu_count":"16","ram":"49152","disk":"600","bandwidth":"8.00","bandwidth_gb":"8192","price_per_month":"429.95","windows":false},"13":{"VPSPLANID":"13","name":"2048 MB RAM,640 GB SATA,3.00 TB BW","vcpu_count":"1","ram":"2048","disk":"640","bandwidth":"3.00","bandwidth_gb":"3072","price_per_month":"15.00","windows":false},"63":{"VPSPLANID":"63","name":"2048 MB RAM,640 GB SATAPERF,2.00 TB BW, 10GigE","vcpu_count":"1","ram":"2048","disk":"640","bandwidth":"2.00","bandwidth_gb":"2048","price_per_month":"25.00","windows":false},"81":{"VPSPLANID":"81","name":"65536 MB RAM,800 GB SSD,9.00 TB BW","vcpu_count":"24","ram":"65536","disk":"800","bandwidth":"9.00","bandwidth_gb":"9216","price_per_month":"599.95","windows":false},"64":{"VPSPLANID":"64","name":"4096 MB RAM,1280 GB SATAPERF,3.00 TB BW, 10GigE","vcpu_count":"2","ram":"4096","disk":"1280","bandwidth":"3.00","bandwidth_gb":"3072","price_per_month":"50.00","windows":false}} \ No newline at end of file diff --git a/libcloud/test/compute/test_vultr.py b/libcloud/test/compute/test_vultr.py new file mode 100644 index 0000000000..066cc86e26 --- /dev/null +++ b/libcloud/test/compute/test_vultr.py @@ -0,0 +1,114 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import sys +import unittest + +try: + import simplejson as json +except ImportError: + import json # NOQA + +from libcloud.utils.py3 import httplib + +from libcloud.compute.drivers.vultr import VultrNodeDriver + +from libcloud.test import LibcloudTestCase, MockHttpTestCase +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.test.secrets import VULTR_PARAMS + + +# class VultrTests(unittest.TestCase, TestCaseMixin): +class VultrTests(LibcloudTestCase): + + def setUp(self): + VultrNodeDriver.connectionCls.conn_classes = \ + (VultrMockHttp, VultrMockHttp) + VultrMockHttp.type = None + self.driver = VultrNodeDriver(*VULTR_PARAMS) + + def test_list_images_success(self): + images = self.driver.list_images() + self.assertTrue(len(images) >= 1) + + image = images[0] + self.assertTrue(image.id is not None) + self.assertTrue(image.name is not None) + + def test_list_sizes_success(self): + sizes = self.driver.list_sizes() + self.assertTrue(len(sizes) == 22) + + size = sizes[0] + self.assertTrue(size.id is not None) + self.assertEqual(size.name, '512 MB RAM,160 GB SATA,1.00 TB BW') + self.assertEqual(size.ram, 512) + + size = sizes[21] + self.assertTrue(size.id is not None) + self.assertEqual(size.name, '65536 MB RAM,800 GB SSD,9.00 TB BW') + self.assertEqual(size.ram, 65536) + + def test_list_locations_success(self): + locations = self.driver.list_locations() + self.assertTrue(len(locations) >= 1) + + location = locations[0] + self.assertEqual(location.id, '1') + self.assertEqual(location.name, 'New Jersey') + + def test_list_nodes_success(self): + nodes = self.driver.list_nodes() + self.assertEqual(len(nodes), 2) + self.assertEqual(nodes[0].id, '1') + self.assertEqual(nodes[0].public_ips, ['108.61.206.153']) + + def test_reboot_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.reboot_node(node) + self.assertTrue(result) + + def test_destroy_node_success(self): + node = self.driver.list_nodes()[0] + result = self.driver.destroy_node(node) + self.assertTrue(result) + + +class VultrMockHttp(MockHttpTestCase): + fixtures = ComputeFileFixtures('vultr') + + def _v1_regions_list(self, method, url, body, headers): + body = self.fixtures.load('list_locations.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _v1_os_list(self, method, url, body, headers): + body = self.fixtures.load('list_images.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _v1_plans_list(self, method, url, body, headers): + body = self.fixtures.load('list_sizes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _v1_server_list(self, method, url, body, headers): + body = self.fixtures.load('list_nodes.json') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + + def _v1_server_destroy(self, method, url, body, headers): + return (httplib.OK, "", {}, httplib.responses[httplib.OK]) + + def _v1_server_reboot(self, method, url, body, headers): + return (httplib.OK, "", {}, httplib.responses[httplib.OK]) + +if __name__ == '__main__': + sys.exit(unittest.main()) diff --git a/libcloud/test/secrets.py-dist b/libcloud/test/secrets.py-dist index 940f455c7e..3bac5aec79 100644 --- a/libcloud/test/secrets.py-dist +++ b/libcloud/test/secrets.py-dist @@ -45,6 +45,7 @@ HOSTVIRTUAL_PARAMS = ('key',) DIGITAL_OCEAN_PARAMS = ('user', 'key') CLOUDFRAMES_PARAMS = ('key', 'secret', False, 'host', 8888) PROFIT_BRICKS_PARAMS = ('user', 'key') +VULTR_PARAMS = ('key') # Storage STORAGE_S3_PARAMS = ('key', 'secret') From 51d119422663455e54452b4f11f3522afc3ad734 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 11 Nov 2014 18:38:22 +0800 Subject: [PATCH 261/315] Re-generate supported providers and methods tables. --- docs/compute/_supported_methods_block_storage.rst | 2 ++ docs/compute/_supported_methods_image_management.rst | 2 ++ docs/compute/_supported_methods_key_pair_management.rst | 2 ++ docs/compute/_supported_methods_main.rst | 2 ++ docs/compute/_supported_providers.rst | 2 ++ 5 files changed, 10 insertions(+) diff --git a/docs/compute/_supported_methods_block_storage.rst b/docs/compute/_supported_methods_block_storage.rst index 31978266a5..6698cd988f 100644 --- a/docs/compute/_supported_methods_block_storage.rst +++ b/docs/compute/_supported_methods_block_storage.rst @@ -64,6 +64,7 @@ Provider list volumes create volume destroy volume `Voxel VoxCLOUD`_ no no no no no no no `vps.net`_ no no no no no no no `VMware vSphere`_ no no no no no no no +`Vultr`_ no no no no no no no ===================================== ============ ============= ============== ============= ============= ============== =============== .. _`Abiquo`: http://www.abiquo.com/ @@ -129,3 +130,4 @@ Provider list volumes create volume destroy volume .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ .. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ +.. _`Vultr`: https://www.vultr.com diff --git a/docs/compute/_supported_methods_image_management.rst b/docs/compute/_supported_methods_image_management.rst index 6ef61d1acf..0ff6195b0f 100644 --- a/docs/compute/_supported_methods_image_management.rst +++ b/docs/compute/_supported_methods_image_management.rst @@ -64,6 +64,7 @@ Provider list images get image create image delete `Voxel VoxCLOUD`_ yes no no no no `vps.net`_ yes no no no no `VMware vSphere`_ yes no no no no +`Vultr`_ yes no no no no ===================================== =========== ========= ============ ============ ========== .. _`Abiquo`: http://www.abiquo.com/ @@ -129,3 +130,4 @@ Provider list images get image create image delete .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ .. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ +.. _`Vultr`: https://www.vultr.com diff --git a/docs/compute/_supported_methods_key_pair_management.rst b/docs/compute/_supported_methods_key_pair_management.rst index d177e3f270..d390b682f7 100644 --- a/docs/compute/_supported_methods_key_pair_management.rst +++ b/docs/compute/_supported_methods_key_pair_management.rst @@ -64,6 +64,7 @@ Provider list key pairs get key pair create key pai `Voxel VoxCLOUD`_ no no no no no no `vps.net`_ no no no no no no `VMware vSphere`_ no no no no no no +`Vultr`_ no no no no no no ===================================== ============== ============ =============== ============================= =========================== =============== .. _`Abiquo`: http://www.abiquo.com/ @@ -129,3 +130,4 @@ Provider list key pairs get key pair create key pai .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ .. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ +.. _`Vultr`: https://www.vultr.com diff --git a/docs/compute/_supported_methods_main.rst b/docs/compute/_supported_methods_main.rst index 1e6a0a43a6..4ef8e09a98 100644 --- a/docs/compute/_supported_methods_main.rst +++ b/docs/compute/_supported_methods_main.rst @@ -64,6 +64,7 @@ Provider list nodes create node reboot node destroy `Voxel VoxCLOUD`_ yes yes yes yes yes yes no `vps.net`_ yes yes yes yes yes yes no `VMware vSphere`_ yes no yes yes yes no no +`Vultr`_ yes yes yes yes yes yes no ===================================== ========== =========== =========== ============ =========== ========== =========== .. _`Abiquo`: http://www.abiquo.com/ @@ -129,3 +130,4 @@ Provider list nodes create node reboot node destroy .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ .. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ +.. _`Vultr`: https://www.vultr.com diff --git a/docs/compute/_supported_providers.rst b/docs/compute/_supported_providers.rst index da9bff6402..b9f3f9797e 100644 --- a/docs/compute/_supported_providers.rst +++ b/docs/compute/_supported_providers.rst @@ -64,6 +64,7 @@ Provider Documentation `Voxel VoxCLOUD`_ VOXEL :mod:`libcloud.compute.drivers.voxel` :class:`VoxelNodeDriver` `vps.net`_ VPSNET :mod:`libcloud.compute.drivers.vpsnet` :class:`VPSNetNodeDriver` `VMware vSphere`_ :doc:`Click ` VSPHERE :mod:`libcloud.compute.drivers.vsphere` :class:`VSphereNodeDriver` +`Vultr`_ :doc:`Click ` VULTR :mod:`libcloud.compute.drivers.vultr` :class:`VultrNodeDriver` ===================================== ============================================ =================== ============================================== ==================================== .. _`Abiquo`: http://www.abiquo.com/ @@ -129,3 +130,4 @@ Provider Documentation .. _`Voxel VoxCLOUD`: http://www.voxel.net/ .. _`vps.net`: http://vps.net/ .. _`VMware vSphere`: http://www.vmware.com/products/vsphere/ +.. _`Vultr`: https://www.vultr.com From 9c0ef27f0d790d896fe87a4a997eec1e09ed7d4b Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 11 Nov 2014 18:58:48 +0800 Subject: [PATCH 262/315] Fix some docstrings. --- libcloud/compute/base.py | 4 ++-- libcloud/compute/drivers/cloudstack.py | 2 +- libcloud/loadbalancer/base.py | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/libcloud/compute/base.py b/libcloud/compute/base.py index c043706cfc..8387104551 100644 --- a/libcloud/compute/base.py +++ b/libcloud/compute/base.py @@ -1139,7 +1139,7 @@ def copy_image(self, source_region, node_image, name, description=None): :type source_region: ``str`` :param node_image: NodeImage to copy. - :type node_image: :class`.NodeImage`: + :type node_image: :class:`.NodeImage`: :param name: name for new image. :type name: ``str`` @@ -1228,7 +1228,7 @@ def delete_key_pair(self, key_pair): Delete an existing key pair. :param key_pair: Key pair object. - :type key_pair: :class`.KeyPair` + :type key_pair: :class:`.KeyPair` """ raise NotImplementedError( 'delete_key_pair not implemented for this driver') diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index d8475df4a4..28fc144e25 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -1864,7 +1864,7 @@ def delete_key_pair(self, key_pair, **kwargs): Delete an existing key pair. :param key_pair: Key pair object. - :type key_pair: :class`libcloud.compute.base.KeyPair` + :type key_pair: :class:`libcloud.compute.base.KeyPair` :param projectid: The project associated with keypair :type projectid: ``str`` diff --git a/libcloud/loadbalancer/base.py b/libcloud/loadbalancer/base.py index b785af0232..602ea224fd 100644 --- a/libcloud/loadbalancer/base.py +++ b/libcloud/loadbalancer/base.py @@ -185,7 +185,7 @@ def create_balancer(self, name, port, protocol, algorithm, members): :type members: ``list`` of :class:`Member` :param algorithm: Load balancing algorithm, defaults to ROUND_ROBIN. - :type algorithm: :class:`Algorithm` + :type algorithm: :class:`.Algorithm` :rtype: :class:`LoadBalancer` """ @@ -230,7 +230,7 @@ def update_balancer(self, balancer, **kwargs): :type name: ``str`` :param algorithm: New load balancer algorithm - :type algorithm: :class:`Algorithm` + :type algorithm: :class:`.Algorithm` :param protocol: New load balancer protocol :type protocol: ``str`` @@ -317,12 +317,12 @@ def list_supported_algorithms(self): def _value_to_algorithm(self, value): """ - Return :class`Algorithm` based on the value. + Return :class:`.Algorithm` based on the value. :param value: Algorithm name (e.g. http, tcp, ...). :type value: ``str`` - @rype :class:`Algorithm` + :rtype: :class:`.Algorithm` """ try: return self._VALUE_TO_ALGORITHM_MAP[value] @@ -337,7 +337,7 @@ def _algorithm_to_value(self, algorithm): :param value: Algorithm enum. :type value: :class:`Algorithm` - @rype ``str`` + :rtype: ``str`` """ try: return self._ALGORITHM_TO_VALUE_MAP[algorithm] From 90c884b4935677884475fb41f9297487a98607d4 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 11 Nov 2014 21:21:39 +0800 Subject: [PATCH 263/315] Fix some formatting and link issues in the GCE docs. --- docs/compute/drivers/gce.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/compute/drivers/gce.rst b/docs/compute/drivers/gce.rst index 9230ca9165..0d4e283642 100644 --- a/docs/compute/drivers/gce.rst +++ b/docs/compute/drivers/gce.rst @@ -20,8 +20,9 @@ Google Compute Engine features: Connecting to Google Compute Engine ----------------------------------- + Libcloud supports three different methods for authenticating: -`Service Account`_ and `Installed Application`_ and `Internal Authentication_` +`Service Account`_, `Installed Application`_ and `Internal Authentication`_. Which one should I use? @@ -48,11 +49,11 @@ Service Account To set up Service Account authentication: -1. Follow the instructions at +1. Follow the instructions at https://developers.google.com/console/help/new/#serviceaccounts to create and download a PKCS-12 private key. 2. Convert the PKCS-12 private key to a .pem file using the following: - ``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts + ``openssl pkcs12 -in YOURPRIVKEY.p12 -nodes -nocerts | openssl rsa -out PRIV.pem`` 3. Move the .pem file to a safe location 4. You will need the Service Account's "Email Address" and the path to the @@ -78,8 +79,8 @@ Internal Authentication ~~~~~~~~~~~~~~~~~~~~~~~ To use GCE's internal metadata service to authenticate, simply specify -your Project ID and let the driver handle the rest. See the `Examples`_ -below for a sample. +your Project ID and let the driver handle the rest. See the +`5. Using GCE Internal Authorization`_ example bellow. Accessing Google Cloud services from your Libcloud nodes -------------------------------------------------------- From 4ccf0225fe9072c248765e17b57033f989c43f9e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Tue, 11 Nov 2014 21:59:11 +0800 Subject: [PATCH 264/315] Update doap file - add info about 0.16.0 release. --- doap_libcloud.rdf | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/doap_libcloud.rdf b/doap_libcloud.rdf index c6c52501de..0a2c4f1711 100644 --- a/doap_libcloud.rdf +++ b/doap_libcloud.rdf @@ -217,6 +217,13 @@ 0.14.0 + + + 0.14.1 + 2014-02-08 + 0.14.1 + + 0.15.0 @@ -233,9 +240,9 @@ - 0.14.1 - 2014-02-08 - 0.14.1 + 0.16.0 + 2014-11-12 + 0.16.0 From 853161c309dbd809fc5bd81e3edd709773bae75e Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 12 Nov 2014 19:11:01 +0800 Subject: [PATCH 265/315] --changes-path argument is required. --- contrib/generate_contributor_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/generate_contributor_list.py b/contrib/generate_contributor_list.py index d02f826ccf..fbf601a798 100755 --- a/contrib/generate_contributor_list.py +++ b/contrib/generate_contributor_list.py @@ -154,7 +154,7 @@ def compare(item1, item2): if __name__ == '__main__': parser = argparse.ArgumentParser(description='Assemble provider logos ' ' in a single image') - parser.add_argument('--changes-path', action='store', + parser.add_argument('--changes-path', action='store', required=True, help='Path to the changes file') parser.add_argument('--versions', action='store', nargs='+', type=str, From b20c2857c8e6eb855a64bcec923cb1b27c2c31a2 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 12 Nov 2014 19:11:40 +0800 Subject: [PATCH 266/315] Remove trailing whitespace from the lines. --- contrib/generate_contributor_list.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/generate_contributor_list.py b/contrib/generate_contributor_list.py index fbf601a798..30057be5d4 100755 --- a/contrib/generate_contributor_list.py +++ b/contrib/generate_contributor_list.py @@ -146,7 +146,7 @@ def compare(item1, item2): else: line = '* %(name)s' % {'name': name} - result.append(line) + result.append(line.strip()) result = '\n'.join(result) return result From f165cd02478a298b99648213d657ec620ebc32bb Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 12 Nov 2014 23:53:27 +0800 Subject: [PATCH 267/315] Fix title underline. --- docs/compute/drivers/vultr.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/compute/drivers/vultr.rst b/docs/compute/drivers/vultr.rst index f062afd870..1ed09ad580 100644 --- a/docs/compute/drivers/vultr.rst +++ b/docs/compute/drivers/vultr.rst @@ -1,5 +1,5 @@ Vultr Compute Driver Documentation -======================================== +================================== `Vultr`_ is a public cloud provider based in mulitiple counties. From 5e086878595a02814d77389c53ad677e40eb3729 Mon Sep 17 00:00:00 2001 From: Tomaz Muraus Date: Wed, 12 Nov 2014 23:56:48 +0800 Subject: [PATCH 268/315] Add logo to vultr driver docs page. --- docs/_static/images/provider_logos/vultr.png | Bin 0 -> 19142 bytes docs/compute/drivers/vultr.rst | 6 ++++++ 2 files changed, 6 insertions(+) create mode 100644 docs/_static/images/provider_logos/vultr.png diff --git a/docs/_static/images/provider_logos/vultr.png b/docs/_static/images/provider_logos/vultr.png new file mode 100644 index 0000000000000000000000000000000000000000..e7d0cd41cd60130249ad93306cca10d775d45b10 GIT binary patch literal 19142 zcmcHhbyQYe^fn6Lq;x9Xh#+0k-Aamdr*wA-(nw3U2#9odH_}LVcY}1DbwBSpzj5C2 zeq(%p9YvAtz4u;wtvTnK^SZ9N!;}=H(NTy{AP@-pdl?C32m~eu`UeRS9Pw-88Uo)C zjbx=IAkWa>KU(tRz!7A78BHe$1Qi$h2L_UwP5=(RaDFc@`C=Ir4U-3J_Iu?&2!sOi zUP4sGeg0_4&HdBmI^roezkHfA3(8BnYST|wIH;BI@Pt2wMG!hlcp3&?m8ySyxk%wi zibVg@pQ`KY;oGLl0XiE(o4sY~_f=nfe`K3hzxVw6X7ApEEHfW@Cq3PJ)ZJ<179+OD zsKPHav|qsp4+a~XO(Zll6h=%!qEvofiQJl9J4WY6*Pmh(9p+LAtqQV~Jj8g_R3$Zw z?LGvg`x>-i?^ou>4ALP=ujb+DDY3)%eS9P?Z?OdPhx{_|Wy>$bkQH{! z8@REsk#luJ@z=F2u~x(au*g$(;QKSfX>PVP$iAPuEDlIWDBzL)?%o^`t#8Te^?@_7 z`oPx8^F5%Ly$k)B77wNbE@Ag&p_r2}LaOB^lKNn_TId>el07B`RilDO9wChgRMjlRIujfZ8#TVQ;LHBtRuwMczDU?8(97PPk~?aI`L6t<>;9fS z)_v(Bk|N@{K_c)T7I{;NupZY{e)0A>gYA9w2I%~8rV%4 zDZ=feA(4s^6Q5D_w;kfT;;dBjnCxMp+KHjRGYN+A9x_z|O{JZboR>U2Y`x(drElt` z{EGQ?{V=c1H1)$J+kX@If+q>AD+moGQYu&u9F5*wO>zpF%8V0Opha`5Lp7g;XlBL) zlsyepwPKupH#%-d>JIX98pOtK2yJ5@WV4e-M5&fK@WSjxFw~lf2}X|Tc~N=#$>{5) z%MR-+J<`dSFnY?iOjK0(pMJbsuQ}S|$y7D;vD4xA9XiC-YlEpgG{PIAGI*3ub)7;nXmk9S3qcg z&!5*+(0?C)yR2)i+m{gfBBZuLJclRb=Rpg31xq2>aB#%^of6 zyr%Vf{E3m|51o^qU+&A@)9n$og9GNZ}VBDl^NpEOszoXilFQLIlK z2TCCxzWQ%ao=X);R%v>C5v4?W|AEWOvRC}p>d6<&Kt4Gcd?_S?k(Z>YoeX9-{RU~e zmWs;J3CzJzSeq;DTQ=$NfW6wKfl6KxJo5UhMPro+;YKb)eoQJV#j%<&ZmAv}uisA& z2^T&Y{~7Dwr(=R~#kU<1$CQi?R8*MkIbKK)@# zRt@Dk$nDil4PwBxD_UTBBtlrIz;_+0P9IXZk}`Ew-sc5UiXbiOkuBX4T*kQ2bgsfz zVCjPmZ61n;IYzeklr-`#B%5G!_r>Fdh9|wRL?t%YI8l)AIMr7;UmT*ukgD=65Rv2= z{=vb12dg@It?Jai4Pi@R;?d1pqv*S)pt2-sPJ+@gK0p`PzcG9j;3N`!+!t%J&)5inxmtml9txD^>w58q$KeI6|oXE@jT^@(NRpB z`D#%Ulh;PZ#^VK_Xsc}&5)s~aL$Z4*XlM`?YHhlXmzpX(E=1 zX&*i3{QPNue^i&_cCt*sX^!>t=TD7(Fy8Z*NzA&P-(E4q#Kica6LTSv$-I60_U3$$ z1qLD>j3x>$$>zMP6hXrKJxCEeu$%MQx}^7M#|w>06J)tJpIW@P6PdKjO$YH9wW^Uw z`JCG)3qGCA8b^05ETjJkTRX{)P4#T~XaWCVrc7E%2@5qEZ1s_r0T-K&TgIR+8v6m zQDKCD8VS9|QqxJRl@ouokk6gLMsFnJe^cWMHP>RzD(h4(c6J=E>tmM>;f}yHV$ukO zzckBF+}_+6thD-|VM6Ee^W5Cr&E2^h`mo z`K4GYkl6GO1D{#%BlqnF-eA8wMk$&&r%`mUy(h<`Z7z~M5}_PGn3wQ5o|sALgT$$N z=von0mm;$iu`r>33y=9LZ~=Ln*>V^#cYXU)MIY7F-po3Zd!LE|7oDyWSGVf?$|!hc zwil!KSwgA-`E3uPiF)!6jwvCE1Z65Q)X0}i@7N{WWwG#!MV{Y$a7KxD>U)id)KGe~ zP`6Ptqv!M?+#Xn6bjw};RBHKcC0J)Db))``WS7YyLclpN20X znQd9e4V|4tp+`RsT%b;Oscuub`2mI^4kiu%I3sza8KnSMHRPvhh{@smEyD@}5Lt4*WdTSoA2X~z0{z(i-qfBV6q zWx%SfWKl~|G8rh4f!veu`Q`plKOHw;wH}zsh-X3KfHaS_;boXz!ot-kc{D7?OyCg_ zWP(J%nTwM~@4EDYUTj@_BYAJ2Tpj%>kqHkb*77aYlu&(LBxb~C)ag96)roE#aJl)K zVnxLSgN-0ZYckxZE12RMNi-~+eh{rrx6|zSOy@c}I(C;^_(Ax=0wHm1OsTuOn}Ulg z5y9@HgAUJ7%*syA{3vq6BS9lhC-^yoUb1z>Wa2@6Kz_G^5AMPfVKsA^w6S;vWLPHMlXK@`J64d(4n}MF2(^`?m z@kvy2QkLFY=LK7v5(VZEM=Rzhd~$e}w+s)!7!O%3C+VygYD*LbvBD(W&;QV#fyJTb z<4X;i_dB*M=+tFWb-y&A)DzY*pAgW`V(NKfB*l;m74iFyIrH9qu}EO7YE(!&{}c%W zqZfpanAFs7J>mEq0y_OxEe~f?s|N6IZ4NjNyklVkX3RP}e^MhNO?_VP4!haSi6%)^ z&r>$+@JE2|1$fLlpj(opps$qW z*(PN-K^jTt4e)(D&{rx@VU)w)$;aAQ6~s2ri=E^25i2DNm@%zf1>uhSgPdHqakSv) zYwzJS?mk7~$3XBs$Kzs~QHC4mn5m=Uo@$JHu~y4|;ctz*gT4@I6aj^EV; zppH`Q{_T#-$jNOYeYKTi!Z$uhwofDf-gd3d)+$SH+yMf9X{wM9-}(9ZVXE0$XOKqQ z7eQ}tZw)`&5n1}qCuD4-3J2E0_i0Y{tlFdCF=ULBGBP5|+Mec1J-$3%st={U3lG{6 z{rPCZBf!?V&Ac2^x^sbL3=y&VBB##?>7@X}wT&CTH#&`6~<3Tb-(Bmk~0 znnZzc7tAomr15njm2P}8Bp<=Z&%N3A`F@2N%8yrd!#`%)23H zoJDFSV#>>(|lmxVKz+G)yBJ6GFX+Vn%Z${dXKO?lixVVLJPHeNfyj3lEo z;)kn0YKR(;dJ>|RQrR_2)vjCSchrQm?g#9pIauT(Rdd`3UX)NFhDw921MHB3kN+kv z$;cqA#SJKucRDZkf|~kT|EVA^@8KiarR?a1r4N*K{~CPdq9rBOV1E#&V=V%LWL74< zXqgg%-u`w-MF~#?^P^ACAeFuF=PE+qr(3g<377pzqG-7{+iW6UGJZ> zIdwjLIHBkq}qbz2qtr2E22$)b`#Opc*Vkxz?B(R6wz7bc;+v+RCgsetL* zk#swvFG;qj8KleX&oOe^qQpEEaA6rI#` z%lpU=CRJj=ZsN3~!ielQi0_ghVJLY_I$UgEu4*0s!X6R&@``@*Lj$sK`gJh|%nFar z>l^kH+btVXj?EGw828$$vWd2kKd=IA+~fh%rc_mn-EfI8Vx^n^_9osv`O3)11gI>O z{3$N&%&K3$AFTSa#q8ACb>XY4eoVfI7kUyLw7Pa>`43%PY^%HCuUoFwxUDMDQ+@H4 z=t0LUM)$BzgJWLrGr5p={a}fccObr&Wp2F^l}^oQ7q4eJ^KXeJNCOx?HLs@%K~RY{ z-tYJyinEa$fr~S}FRp7EJhdxC%%qG&X)sLoVsxy#=Q=+ew*JJmCjIvBGulc_*p?f- zv8DDqOg(5yIp1h=vfR=nTY5dX^l-WxB_Nu_`)rP_hJoyGZ*$jUv6i*BNT}>m;D@ zsC)Nuz4pzF|C@q-KhixdZ?{&oCiXz&IEw|xSRLzj;ubh}h!??frz1x~!nmiej0H|sQYHsoVs)Ugl<&L^?I_!=uD^bxkCy{^nNm>&M^!LlI= zBN0|;?%Uzh!QZTCR(f?;uTgQnoesbRBY})IB1wJtg@b0w!L))Wh1v~$3&TjgTf#C8 zZBQB7sW<)_15<=`cIU70H4Q<5m!xo{zQ>%;GT6&z@ASbGMa&$brbz z(bHhWY_n@aHfx*gUnfm>@Yp10eH0n@Jo9qm(E8v^y_9UCZs)xH zv!9YrGOMA#{KVFFA)rTi{7!u23U=!Ji~I_=OgXl}rZl)+pUf zR+0XK-e~e@P=PIQ+L)XFmi0~g$P|PbqrXHa61BbSAhWvP=NN`G%4zQhQE!sHHC0LW zx$k1p6lf=;*ko*bx+@wMpB*q^cRre*%4<^$O@enrCZXchid)bL;>jU8*fb~arzvHd zJn6@yZ}LRqiOF+o(vIndI1o?dE7p0;gW^o2K&A1)&9fx`H0RaRb<9C@lv~4OsOtMp zb;FEUzN7(kPW!?W3 zMjds1LJqIzYy74wKPfNx!{SqNLGf4S1r9B?M^bevYawg3OA5m?DW^xgwdjU^F+Vch z*OAkHeWFj8EjJ9%_NkjwAlNkT7^g~l8nMb?{b%K)wp26IJ-wVvVywOJ((E6y6k3dw z)!BNFjG|(l#XKnRsF|5ZN?KO}DlFmYY5bWl!YA_R{x*Gvs~K{P`*eIMytrJ`^g*BZ zHGGMWRonbx>MzN+#Q5wYzmJDLb1Z+kw@Le@TW?S4`EPeEam7i7gQX*>;ob1N7i@RC z^{1GlFSsu%qxg0QrX$fLZANdHAk(ie4Ef*!_Yb3mpMJZX(m~7@YMDL{r*74*IC;m{ ztq$tAbzCZlAyy%f^HGW{nnpxk!8kUXbTJJo;>x<8a)|scbv<6Rm~bvqFLOOO;mxk$ z_TK6|4lWu^LKW)N*Y)wKl{>ZN|LJYI`>N1B&E`?YOTJ`+WMI0=kP_Us)2L$gpscOb zr4i&i+2SD>w$DDKM=@o+`uEJ!rZ>+D$j^hG3U0!6^CKWF{#;%3=rR1nD5wYvV)|s3 z?^9TDajNzXA_9UTNVdq%e6n_bV|%BNXE|p;*piuU7}Rg#)=y;;7Hq7vZH;L**+oF% zTn^4_n6}XKK9h{TZGA$iST)u#NlZIkpqVjGdD<&bMq1jl&KKeBRKclRm@e@#-BlYj z4@2fn5I_b7{;}=xO1or2;Iti%>@vg573Yb5^RZ25gMM?Ey0b1OYr}|X^ys5@Pgpz- z+<(KAu(f3#$>3Mt-~PC7yfg6#@Mzvw?Xju=PD7{BUNdoPJdVlNi?Q<~p=(Ubq4 z;q|*K66;#X8&tSJ7%~4B{eCcTt4Bviu`4Z~evtTd1s%P2AALotu*E$jJ~x^%MtA%~ zKo0Nq9-Mn1k4G?Ehk?`}0UspM9R~w$sj7XsWV_Vp@$cG~ZG_PH7x$2Af35SH*D~q2 z*w)oTef{4EqH{Q+1_E|A=76Hl4Hf`f%VwAqCO9zA-J1~UkyY2Oay8BAazXV*TVYr2 zi(^FeY70JhAXAOA>mKcn1t>%?*itM@&kNLw1Y6P8sRPMBI(T_LUXE$!x+xs7s1sZ? zmoh>yOlG{TPLjf3)vH);{_z9V%lE-8u+iJ7dJA^6|I+hc>794h4FB#)WVPO6-1BaE z!>!v76y9j@O^>xqlPR$}Do)}ZC^-lLUq=4d`N{>BUu3kD|ME08;Whdee~3dE9Gx7s zDIQH9b67`p_(E2pYicCISojFzVHpOacsR6y{1x052^F>I?AeAidu&N?>JR>huSJf` zGWJjQb+qRUu{p0=w{CY6>7!>$EgJ}I9!zx$rViX?HGF zzH|BR;W$d0B0a8mice=eS&a?te1)#r@wO_k4{JPPV(pFRllhvW@%WxY+s>8R6u0^@ zi-V$fTIDx^r*&nYdu}}^U42T!S*V(Aym}LK4Lj*+1KHU`1eE)tNUa{8LGHhPzBL&6 zlhNqG)SUj*eWj^vS1H$Rr0btu+nMmJFz z<9xxHS{4LgP9DBrSJmA>(k}yb!?Wk`Z{MtmoCk0@FJgbTwUaVFr4)35jh?vB$5Hvl zjC^9jVs%eftYvW+Q#=E&u&e6C5`r!?rU0x zy<^(4!V+EW$?=)Ib?R~7OSL^yKcy0Y@v<7U`w`{yI;_Kk2z?~=k?agxk85VWVrs^0 z(DG1ukqpEDx>xlHP0?}@3S?vFi+Esd-RFXd+&~!3`P=OvMnIubr$-&g` zk2%(L5bB<_Z8U^8!Z+4C?kPKG%i-z9G}Z8zD$I(S4!yD~WJOVb9WAxmqzG{s7?5{=iUk z-p9Q`Z;iA%Xx(O)W&oAcQKr{1uUtCM_%W^+Kr1U8w?0JEhkq7QUYwSgP22-^AX8`> z-+a)r5qPWw%HZ)l7+ZZNy=DSbd}imA-hvjZz1G&D_K;mPtja-5;Rhuv#g>_cHecc6 zw&yR4E;G8bO>Sj<@jYXoJFgY2PLlV?N$`b40!v>Tgg-oZLklD!b~M>D5uzwj$O0LG%iZbhiOuP?`GXN0*X zT2|7pI@mT;k}C6s&S&ikq{Y$M3gZgTOUowm;+3I1*`(96KJtx?XkkPMr^R3Rr>7?* zG_=61EOJ1N{1`W**`gna2toJe;bV84p4Eo=Mu;spkYdCOmbTKdXSN13;Chiw3s_lbQ~n9CH&(Klr!27B~gzd@gX%^@~wqX=nvfx#`pJhoS@7l2dTRWX~|Hqha)#3nv1&eD+-YBGpf*{d&*xFo@$i-Xn#7=e!0M_{k zLJlZsn56$@j%Mbh3sYDEa9}Y>Nwl-<`8BH4N^cKY{sM^D*8OBcVr^k0!ZKToSa>H3 z4IkizZYw_c?Nf!TrHkrXo+%a66{B9k^d99LBK?Fa^skmZ5160{9@vl^8ZP<2Jn(p4 z9fkos|0$_aES*DV~@6K>$45xH;Q!Kdk5jhN<&;iCL%KFFBc@`RVS!pV3ZDT_5T3W%fR6 zjTs91>_Oni7t(wUTepD0V*B#oesBOlI-(Y@Yu7m+E`77k1?h0h^f$~JUw;V|3k*QA zJ)txrqi*9%@O149c5RVrC0Y&Lo`h(K&3F4n$?T>mL)(p)^4P!Q4Rqqm+qT{Gn0tbh zaUrWY@M}#xr6yh%DN*Xm-tK!Iv&gu18RRPFhx<$x{=tDsD*d5O;M`1XU3O z1qFnqLiOeiVmLla>4YOZIfw36_|jWmh~F!V z&aB<%g~64#TLn{=Y%cpM0KV?J(*wkz>&f!b-+Qx$4*wbJU8?O44RW#}=D->M$T5)j zf-M>X5s{Qcz#tO{rIwCb9Tk4=t}yP4$;d!2nSLUYCW=ICZEcm2m94UB-P|;RhSG&P zyS|6pi}6|;?IvL__uO{De|JIaU1fM{NhkC_PA=J%-Ykm$t9@eshlKqk^u^MWzBhs} z;8VVWs+aF@>N^lXocDYj*MC{(CQL0-T-Cn$P<>^J&Io`qw1f{qU*TS85huf_H{B?F z_|OU6b8gQ~;}a7*%{j%2raDd7MVXVtvz0^`@wxytSfW)UW^PV5Uu%O(PEHQMdq|bp zXzepzZ=;Jwoh>P}*nyTTt}goIU~QDExz%WpC#Ph-@I?h(^{~ak^tVtMglV6i*mhoR zB*eu9L<`*^0ZVMoR`+LVz3IF&?oe&<$VFA=gO41WCa@o_GBU%;Ns+A+gW!5j~z%KIe|Vaa~2gcYG` zw_~azq$GM(nUBJ;uWx8bDfIa76Ljd3lH5YxPhhiU1(!)$R&@Pr&UU&Zm-cJelMW}F zo{Oqjv1SXB^)=>`!1bcj&2iJoesu(xA*e-wytH?^MB6Gus8=yK3j4|Lyfy{^8 zGXJytDM}zw8YoRw55#hqK~57EMapjkAa?>D`&D_X_zXrEz7iIMa}G$sVo($(HWp4; z6RvPqGTsNkor>m+Qvf@Lfq@w_=frwkQuHu2rJ<*%k4Z@h0~>O>pcfu8GO`R%%$Ru&^+I)xg%;EU4v=w%D)10-rZH@)zLZ!UMi61z_&6U-HE8FJ6d~&cA%Z6MlII z4{2ndp<@k4Kr)Bb5%}s-CKz=PV)y++9v~~Q!h5D6;|G_lDRQp9lYoSa_Bv^q$-B;a zW%hy1Che$&&evHRtE)>Au$Z=%DM9N9oO$=bGeWurG#VEL$ z%NZf$aW-YUV?)y4LKccV#v16L5AN#{Dw#a57HQ_Xv$3PHpon;}J>1pmb3c~*9yh-< zYi$;<9U`(xbv@+s@O22y;CeKaTRgi>*X z0pIw~kto5nHrKgJ?TY0dV75UK6;<#lpU-Yf!4^T`2oAQZY+1-s1`l-=A98p{^!sHk z_X_`UJ)H3K1BcM_gHzIyVr{+^+uF6Y+>Icy2j9KzpOzJ2tjEqd2apg6{1rc%484lW z29UPvF#gMzfncdOPS?83S6W-*Ms)NcmNb5d7+3{pV|!nHR*-A^K}%ea88?f=%%(a*@u%UNv=v;hw1oA04g z0FCNmYmuWu^!m0JV}H5EEr2bnV-NT7Wlp#)!|U$wFiM!p`|gc6?D9rGVfxF|5cX z(vDO62RDr7D=8?mo829nO9h8vb`Tp=B%Qs?sNo=4m?z`$gZFiD#R0qJAPgcB@_Jkx zjk}{d*~2R%l6qR$aTgMc6RKLpAzj|uR)2JZxVp) z9_ueF)o&P&0MNoKDb#Q9Xy4NI$Vl-k8F|o0RD?`^UFwLhZF~LP{l@gF|D1P4sM5u1>(akChu6;h%m%w-pjn1Tu z8LIMC(#tV7tq3gYMUEmOUO*hXAcl)^28m195y)+3)OoXhJJ;K&R}|IK(PX>hIWO=S zHT-}U!TA2_s2&=QbQ=RV`l7}2i`FvlxAXkuFw6(B2!BiUZD*U$b72(L#+DPHm-7^) zmLd4nsZ9P@bKsPYc4#wea&~W?pCEre+8Yh%lV=l=Vcp1SW~1qS!7!Sz7GT^1mWoZW z{cBvV^4nkx(r+Mr(^?rE9_|5QCl+jCV4pUx(E?w|!=Is(3J*^Kv?FY6Y+Tp-2guBc z1miv~!GopxGX@B1Xc$s`BD|O+h)Clr>XZTg70flTridKNJUXFz*(ec?>#VteVD}d^ zzk2Drwa?})o;X;#l0(XQqwkHy9sk1MogPj+kj|C41!?L>KhdD3u~=!cUT!`b_w3$@ zBRW=>x-IMjN+>;LF=DL)LfKdYBOgCqgn{a3NX%YA6G^#l{k*L22VA0Q(KFIA8_LVx zSR1e!J@URTp2VT_NpVu}P0!~e&(NSe6!lUpvQUpsK zrq;@^jIHCjkF85P1E-f4d4TS$FUbA<>CqUL+_HBGHV_$}$0X~AVcMbwcA}?}>{!b4 zB&s9V$VC8VCP}lbC55lq^*5F_@$U2wB#}BS1i)y8s>qHDN_^#~R-Kj#1!}Ous`ukU zAXKgO_{@BvVG&gI*Jn{(UHbC1z7N5-;3=V~@0fwly0y6H%9+XeSCNgRXUJPX-n1d= z?#SjzJy_`rsnI6!bGBuPMW~94Q{aZ%bE-!lqTr6aB2u#D6t}AWv1lNg(t%ci1iVA( zJ@}GDI{!{Fp#F_65-pbKEnu*%-zfm(xD|ih+o5&VPf<~q*yRM&0=V3!8-V(G$j>LI zY9_W-A1Y-vln%z@|LZcFR8S>U*RTxaIB#qRdI1#Z-W-5vtW`)+mS6PSI|CUowLQ&U zLDM2mn}utm$k@}Z1Bl!g>m%>MeSDt7G5yD_J1Qsgrk0->E8qu59tT(#m2T&8(?uLtYuxk5lnH7eFc35}RIy2SgSz+hK z8%UtO0s|Af#zHW9GEJB$lQpLO1|v`_CZE57i&LjJTqiY#9b2Xq>X3xEpBNr`@0M$X*4 z_q?+wcXMa`wpGaMok?m@p7{ntK(rj1eh>hEDe%K7aCAwTYX4e)op!J3n{^Bz1R)v! zLj+Lhbm~NR`Q;`Ol>dtdLZO5oc~ak^zB&TfX^c_{N+!-D^sh4Eir-!{u~Z?@aU??{ zGq4Skk#Xvx#n6c$#D|muT~Q3kWSaGXskiq^G34UWfb zoQP?RNWQ-$gjp(>^P=;h1y-kUp3UQp+Wap>T>Rp8;Ii)d`V_fF7~-^nK+II`A;;4d z01d>=1Uuo~@8!oi;%Nn?T~OkUcV^!tPfbM!iCzZRmG{#~-2bsZ%|v zGb}1l=?aHhnMqvRmk_KD z(sd$5x2!)N5h1V);Gc%_j)ZFIZ+j}52N0Q(DDvQ=U6qWTW$l*F!E}DKfx=SU4_Zm9 zd~pbUjsTT@N7*pz2V4PPHiB+4&9DL@6^9Qrg`W+oY0%Ps_cS$CHhoD0fuH*9$%{2bM@Sp{7wVgX%)HrcUMLY=7MsUC(#R(5K3ZxE4UX)nHiv-gd_ zJdQqtfxO<_uMn1v1`K>I9@Bt`ht&-rgx|cq;f(tKNEJLKF+fE@RXGa8oTpRj`l4^*<^-Za9YdSt?GMmVILI%B zjBh^oSEBSSo+rl^16;D#l;yvdd-yT0>zV2&9kC{La#KrPhfj{JMs48Vby3%e7tkly?UvRML7g) zN>o(*d(M0z+Z+UF(`_v+Ep~jvKsV$$aa(IW=Qyz!9v)6yOyZ6pCSj8Au+iHwGlNeR z+kdwV=&;z-R7;#-Bgee+@2ITi}wpuYR>Wu~}+-Wol|V zJp%;0c;N^TB|W_nAhmYHhgNx;f8vT-uKTH@RZwG+^gNMsq#_&l9)PG01cXe4SlY<)O-N}n!@orH=siWfQ}{lOX;%Uz;mz=Bs*cVinQ`=JqmuNA26Zsop4GOPTf4>29LCWvi4N#~ZWqv6!F<4+9 zD3B>j^U2(wKVbnt&?}B9sB-iO1(k%KZ!urvgvk%Gr40iTstcfIhEBXFZ+UqMfo8Aj zw&~d^tj%(Y-g$SdV|$d4-<9!uT%65QlvEBjppJPmJ@*Sd--eu@JE9S?!{_DYp;wZo zcbKqen~i2d_2G6mZI=f#8wu)q6&}JaZ^9&C!B>F_3dqy_{2k*qCj&y z-r!_LmoR{+J9=QH0E!J~pkJBC9H_451jb?v=qGi>Y2~+k&b#*F+E{8EJyWR&E8Z88 zx)-nCyjch8*CI95((D$174dkS=Pq4b?oI3jUqhwV-Q5yRiB9{I1wS#S zke2$cmQL)gZfpdA{{>oLSjg4MO7xE(6v=XLAnJDdON^{fDXoIVrX47as`0*8DK3i1%FDa#wqAph-F*|MAqGqw-hbe=@W!1~@H4y&aFMY>)gt0w zX&l)Ug>Fr1Y~KeaCM38V zy=KHyM-567oP6H7dS!p9X*~4?6D@}RmwejiYXQW7?2e*~FPBs5uA>}4tO`*6Kxu|p z)J9-O8MTN0{;&rt>?y_bHU**qh^lGRD=XxnvMhB)I3MM}^{Xo{w3x5nLq{+Vzblpe zICXWjP@;=Io6|Z^qx8t0HavC5`(N*brOLt?n9CcW@h4%a&=>XUKCWp@C?*p68_)`kf-kh zI}AkrZ!{_~QSAutqwWLfx@a+tZk+MjPBFtGC$~GY^jU5HT9@v+-0U%ZEC*UIigdr; z+~02isAl@b(F}K`D&0%>oeZ~Pn5mdZDqVIFfN;VA<#`j!uv)Rk*BO1^7QLC8=H|uu zPrn2K!fx&)>*B})0_F`cHw4}7CAXDG5EmBSje;w(d0km2F;eF#hXD+m-*gT!00!_n z(vFVoP$lg0W`(6a^(pIZ29ny=!W$eIn3J#rN{-l9?cs9 zT+e+kFUfwhKbCK4y|k>B^0?oWf8SyAR>afOvnHxnxoCdP{&xtK3Vd_K~8{ zx2*a6oA`z4=g*a=i%cSrzImBq?Yh$CM(!uD*OH{#fkud(880-ZD)J!{VtNR zAL3vbrQ^8o%=d8Gg?U-|C93+7fY)q8JeOsF|3C3l6qqWDwxt z4M4+vFP{O zwLNafU*3YH0aFV$Q1gjj=+`_y5jSP}0?;tIF=H@D`FJ>wZ$TRh-A!XUj|1#~=?b^c zo%6Ac1bQ*BuwxHzA0JKoV-gC;>6kd?oZu*!3Clz532e~1)A|_1h{r$q4iy`DFqXc7 z#IXYowtIYfLErbW#^4|(E9*pQAsYCeNY1#u9krzaXsmJ_#U&u<_jkI6AfurbbI~W2 z1RlSCsi-V!I=f-0zD=T|hAO|1= zjY^YiMbV)3$liELWZ#y71kmo_vvZGDMj)CJ2zpkFXEhjhA%^Tn^Yi(k$L51{PowHH zIwz5Q#tBBK1Qh`q;{U=~pgl3r2LV);LZ-mpsbd!4XF(U1gRzG)RPhI2!7T!KOux=f zv5+s9pt-C6$v@$P10d5KFsU1nTlAvCaA_M4@8nb1d$KoL$93N~ZhTNoW`(h=nLoZI zX+Teijs2Rz?=~;ELLDv8i7^|x7*STnBJ^})98z@1A+oqu`JUcocP5;=rh>$R2kAco z>6)2}8ygQVShMOgCCFyLmhc+2d)ld@sq_W}oulJd*T6`5ysj8Qmz+&zUu>q^#`-!5 z5g%xHRD6w-4faOh25^YzM1cVK{|SV|jLE%@2(S>JyMGSm?jO8};EsX(9iXAEzKGJK zuN{vdXP{FJAk*W8stm!>#%NgW66C&zTBU}hzxiDG0Cxv%JCCui-K;` zAT)P$p8kWtkR0r}g1nWEnYq+~NP6Fo+iDsO4iRltPjYh`Y>UAlVZxJO$~}HU=#3&p zjg&H;X^{=@@dw^|l{l)T{OOYeKopy1wkrC_tS0&6WL~dhOLBdrPXN{tDSp&YYQ~k8 zJ+Y^xtn3f^YjX4Q*4BH%K}LaDSy@SYCzQVW<3I>)D~n&SPVqi#1d zbg@W{Rf@YqQ`3Q+jHV&CzC}?&>R6J{Aplwo12?EzKrt|Q5PP_ z80IETtpmB|Wn~oFTNFTznhTYv4R$sm6wvw*#>xc#iKAV3r0@n(SqyX{1Rc_bp_Jq1 z3x$yT?G!E>B4`ANAYg+HKtvZCOk{#K3D_SouUe8Drlh3wkBs!Lb%mS(q3Jqcv)tT0 zKt)rOP9a} z0ZohhenCFb@Yn5wgTX3!IF7X8!%IIH1nrv&j}dod*Bb@-nVsP*t-II@JK$TpP2Ey&4rmNrPbDEdo>qGnFe|V-X}-V^ zx><#*TV@8L;a(7ffy=Uq=*)y-Q?Ksq?8KY$HyugKXZl=eJ^+yeI*x%0n@K-bx_}1` zV0$s6`M_PRa6vQJS`dDErH)zMJN|+yd4w-c~dgKTj`HM63~=5IQiQH@F8a)m)ZdP0H|tcfuK1IuocigU36vD z3dwLh^QppD&I&?Eo7k51{esa=>dLYR8c=K+@OPzr;MB;hZrOV6Db7; zDgjVcQG@tTNl6JUr9&`Cff!4ZWyKBfeZUD^(;Ot^-r%NkT7cf@=Bs%tz&lrZ2y+pj z5wIGFw0O4x3<=sjTII0>#N?n56>pKBkrWYuK!qv;%Ly(oXk7r zR)$69gWN@_YO6?*AZXSF6;Cbi@&faqr>h9A^r`=WX@cF1qSHTExu5v(v2y`nXk;Z4 zcAZbv$_ED>s*dfY5aiSERBv|oTUY}A_zREVS2%-rOU#n*gd5y65yvG{(7-n5Mj;?U z*99`8fqZl@7&v185Hk{NH-OrGRNpD_^n{qTqai%cPY%_!7gRs7Z^W#@%!0mx(PY#n$&`_6bAx2p_waBX;D&$F&Qce6&By`dTakM#cE!kscxNkxRJch=>|US)~b|9nQfo z=L)MmtA3_W7{)qq+e+{N0e~YJWtg`!1I4;)-JJL-C3vV?2<~j-2bn~~ElEUFPfTVD zem`GSt%W%N_$nos8y{i!19PVa2mquM=KqjAl1ii& z=y4*u0!ew=o3_OZQc3Y}_Ye$g|8)w) zz*fFa{hWFUkbh*AR$}+XQtZ4~g45SMCm|vt$F&Rb!tE_IxV?oMVQP83d}h4{NL41Y z1?Tg1IGJ66%SC#ml~f|D)PQ2W30i}hdjk>Cb-ZL^XcQ6zsibJ;D@QATB|3(w5FMz5 wL|p$hT^lSwh=|C&@o6;?5ivlBh^YSjKhCWeAoQqiEdT%j07*qoM6N<$f|Sqf9RL6T literal 0 HcmV?d00001 diff --git a/docs/compute/drivers/vultr.rst b/docs/compute/drivers/vultr.rst index 1ed09ad580..41428f4c69 100644 --- a/docs/compute/drivers/vultr.rst +++ b/docs/compute/drivers/vultr.rst @@ -3,6 +3,12 @@ Vultr Compute Driver Documentation `Vultr`_ is a public cloud provider based in mulitiple counties. +.. figure:: /_static/images/provider_logos/vultr.png + :align: center + :width: 200 + :target: http://www.vultr.com + + How to get API Key ------------------ From 8cabf0bb0602e80c94bb655450c256886649147f Mon Sep 17 00:00:00 2001 From: Matt Lehman Date: Wed, 12 Nov 2014 16:29:55 -0500 Subject: [PATCH 269/315] Add t2 nodes to us-west-1 region in the EC2 driver. Closes #388 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/ec2.py | 5 ++++- libcloud/test/compute/test_ec2.py | 2 +- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 09c0212d84..fc4c273801 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -15,6 +15,10 @@ Compute (GITHUB-386) [ZuluPro] +- Add missing t2. instance types to the us-west-1 region in the EC2 driver. + (GITHUB-388) + [Matt Lehman] + Changes with Apache Libcloud 0.16.0 ----------------------------------- diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 87e6afaa55..04ab11b7a3 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -448,7 +448,10 @@ 'r3.xlarge', 'r3.2xlarge', 'r3.4xlarge', - 'r3.8xlarge' + 'r3.8xlarge', + 't2.micro', + 't2.small', + 't2.medium' ] }, # US West (Oregon) Region diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 4a2a2e6081..8dfaac065e 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -384,7 +384,7 @@ def test_list_sizes(self): self.assertTrue('cc2.8xlarge' in ids) self.assertTrue('cr1.8xlarge' in ids) elif region_name == 'us-west-1': - self.assertEqual(len(sizes), 29) + self.assertEqual(len(sizes), 32) if region_name == 'us-west-2': self.assertEqual(len(sizes), 29) elif region_name == 'ap-southeast-1': From 680e23b3b2c690841d11b304c29ac68cf05f7c96 Mon Sep 17 00:00:00 2001 From: Roeland Kuipers Date: Fri, 7 Nov 2014 14:18:24 +0100 Subject: [PATCH 270/315] CLOUDSTACK: Add option to expunge vm on destroy Signed-off-by: Sebastien Goasguen This closes #382 --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/cloudstack.py | 16 ++++++++++++++-- libcloud/test/compute/test_cloudstack.py | 5 +++++ 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index fc4c273801..44bc03f3f5 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -19,6 +19,10 @@ Compute (GITHUB-388) [Matt Lehman] +- Add option to expunge VM on destroy in CloudStack driver. + (GITHUB-382) + [Roeland Kuipers] + Changes with Apache Libcloud 0.16.0 ----------------------------------- diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 28fc144e25..7b4a8e2f86 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -1202,15 +1202,27 @@ def _create_args_to_params(self, node, **kwargs): return server_params - def destroy_node(self, node): + def destroy_node(self, node, ex_expunge=False): """ @inherits: :class:`NodeDriver.reboot_node` :type node: :class:`CloudStackNode` + :keyword ex_expunge: If true is passed, the vm is expunged + immediately. False by default. + :type ex_expunge: ``bool`` + :rtype: ``bool`` """ + + args = { + 'id': node.id, + } + + if ex_expunge: + args['expunge'] = ex_expunge + self._async_request(command='destroyVirtualMachine', - params={'id': node.id}, + params=args, method='GET') return True diff --git a/libcloud/test/compute/test_cloudstack.py b/libcloud/test/compute/test_cloudstack.py index d9fd43448d..49da109a3c 100644 --- a/libcloud/test/compute/test_cloudstack.py +++ b/libcloud/test/compute/test_cloudstack.py @@ -587,6 +587,11 @@ def test_destroy_node(self): res = node.destroy() self.assertTrue(res) + def test_expunge_node(self): + node = self.driver.list_nodes()[0] + res = self.driver.destroy_node(node, ex_expunge=True) + self.assertTrue(res) + def test_reboot_node(self): node = self.driver.list_nodes()[0] res = node.reboot() From 71bf9a340bcb91a781d1674e88b6384110b980f1 Mon Sep 17 00:00:00 2001 From: Loic Lambiel Date: Thu, 13 Nov 2014 10:51:28 +0100 Subject: [PATCH 271/315] LIBCLOUD-634 Cloudstack: get template size in list_images extra attributes Signed-off-by: Sebastien Goasguen This closes #389 --- CHANGES.rst | 4 ++++ libcloud/compute/drivers/cloudstack.py | 1 + 2 files changed, 5 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 44bc03f3f5..0c1975d230 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -23,6 +23,10 @@ Compute (GITHUB-382) [Roeland Kuipers] +- Add extra attribute in list_images for CloudStack driver. + (GITHUB-389) + [Loic Lambiel] + Changes with Apache Libcloud 0.16.0 ----------------------------------- diff --git a/libcloud/compute/drivers/cloudstack.py b/libcloud/compute/drivers/cloudstack.py index 7b4a8e2f86..50af6d1b6b 100644 --- a/libcloud/compute/drivers/cloudstack.py +++ b/libcloud/compute/drivers/cloudstack.py @@ -982,6 +982,7 @@ def list_images(self, location=None): extra={ 'hypervisor': img['hypervisor'], 'format': img['format'], + 'size': img['size'], 'os': img['ostypename'], 'displaytext': img['displaytext']})) return images From d84f7f83ba67aa5e12d485511cfeea2c41e3e50d Mon Sep 17 00:00:00 2001 From: Sijis Aviles Date: Tue, 18 Nov 2014 15:19:05 -0600 Subject: [PATCH 272/315] fixed word typo Signed-off-by: Sebastien Goasguen --- libcloud/compute/drivers/ec2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 04ab11b7a3..7018b68499 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -1935,7 +1935,7 @@ class EC2SubnetAssociation(object): def __init__(self, id, route_table_id, subnet_id, main=False): """ - :param id: The ID of the subent association in the VPC. + :param id: The ID of the subnet association in the VPC. :type id: ``str`` :param route_table_id: The ID of a route table in the VPC. From d7c8671cf28eb02a5d73382fe5dd1ebeb95a4431 Mon Sep 17 00:00:00 2001 From: Itxaka Serrano Date: Sun, 12 Oct 2014 01:05:42 +0200 Subject: [PATCH 273/315] Ad ex_security_group_ids argument to the create_node method in the EC driver in order to be able to launch nodes with security groups on a VPC. Closes #373 Signed-off-by: Tomaz Muraus --- CHANGES.rst | 5 +++++ libcloud/compute/drivers/ec2.py | 18 ++++++++++++++++++ libcloud/test/compute/test_ec2.py | 27 +++++++++++++++++++++++++++ 3 files changed, 50 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 0c1975d230..5a6f1be82c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -27,6 +27,11 @@ Compute (GITHUB-389) [Loic Lambiel] +- Add ``ex_security_group_ids`` argument to the create_node method in the + EC2 driver. This way users can launch VPC nodes with security groups. + (GITHUB-373) + [Itxaka Serrano] + Changes with Apache Libcloud 0.16.0 ----------------------------------- diff --git a/libcloud/compute/drivers/ec2.py b/libcloud/compute/drivers/ec2.py index 7018b68499..2274c5e58b 100644 --- a/libcloud/compute/drivers/ec2.py +++ b/libcloud/compute/drivers/ec2.py @@ -2138,6 +2138,10 @@ def create_node(self, **kwargs): assign to the node. :type ex_security_groups: ``list`` + :keyword ex_security_group_ids: A list of ids of security groups to + assign to the node.[for VPC nodes only] + :type ex_security_group_ids: ``list`` + :keyword ex_metadata: Key/Value metadata to associate with a node :type ex_metadata: ``dict`` @@ -2190,6 +2194,20 @@ def create_node(self, **kwargs): params['SecurityGroup.%d' % (sig + 1,)] =\ security_groups[sig] + if 'ex_security_group_ids' in kwargs and 'ex_subnet' not in kwargs: + raise ValueError('You can only supply ex_security_group_ids' + ' combinated with ex_subnet') + + security_group_ids = kwargs.get('ex_security_group_ids', None) + + if security_group_ids: + if not isinstance(security_group_ids, (tuple, list)): + security_group_ids = [security_group_ids] + + for sig in range(len(security_group_ids)): + params['SecurityGroupId.%d' % (sig + 1,)] =\ + security_group_ids[sig] + if 'location' in kwargs: availability_zone = getattr(kwargs['location'], 'availability_zone', None) diff --git a/libcloud/test/compute/test_ec2.py b/libcloud/test/compute/test_ec2.py index 8dfaac065e..ce84912bfa 100644 --- a/libcloud/test/compute/test_ec2.py +++ b/libcloud/test/compute/test_ec2.py @@ -34,6 +34,7 @@ from libcloud.compute.drivers.ec2 import IdempotentParamError from libcloud.compute.drivers.ec2 import REGION_DETAILS from libcloud.compute.drivers.ec2 import ExEC2AvailabilityZone +from libcloud.compute.drivers.ec2 import EC2NetworkSubnet from libcloud.compute.base import Node, NodeImage, NodeSize, NodeLocation from libcloud.compute.base import StorageVolume, VolumeSnapshot from libcloud.compute.types import KeyPairDoesNotExistError @@ -877,6 +878,25 @@ def test_create_node_ex_security_groups(self): ex_securitygroup=security_groups, ex_security_groups=security_groups) + def test_create_node_ex_security_group_ids(self): + EC2MockHttp.type = 'ex_security_group_ids' + + image = NodeImage(id='ami-be3adfd7', + name=self.image_name, + driver=self.driver) + size = NodeSize('m1.small', 'Small Instance', None, None, None, None, + driver=self.driver) + + subnet = EC2NetworkSubnet(12345, "test_subnet", "pending") + security_groups = ['sg-1aa11a1a', 'sg-2bb22b2b'] + + self.driver.create_node(name='foo', image=image, size=size, + ex_security_group_ids=security_groups, + ex_subnet=subnet) + self.assertRaises(ValueError, self.driver.create_node, + name='foo', image=image, size=size, + ex_security_group_ids=security_groups) + def test_ex_get_metadata_for_node(self): image = NodeImage(id='ami-be3adfd7', name=self.image_name, @@ -1195,6 +1215,13 @@ def _ex_security_groups_RunInstances(self, method, url, body, headers): body = self.fixtures.load('run_instances.xml') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _ex_security_group_ids_RunInstances(self, method, url, body, headers): + self.assertUrlContainsQueryParams(url, {'SecurityGroupId.1': 'sg-1aa11a1a'}) + self.assertUrlContainsQueryParams(url, {'SecurityGroupId.2': 'sg-2bb22b2b'}) + + body = self.fixtures.load('run_instances.xml') + return (httplib.OK, body, {}, httplib.responses[httplib.OK]) + def _create_ex_blockdevicemappings_RunInstances(self, method, url, body, headers): expected_params = { 'BlockDeviceMapping.1.DeviceName': '/dev/sda1', From 5fb73ba18327d249b33e58c81deeec1c79620846 Mon Sep 17 00:00:00 2001 From: Eric Johnson Date: Thu, 13 Nov 2014 20:59:51 +0000 Subject: [PATCH 274/315] improved static address coverage Closes #390 Signed-off-by: Tomaz Muraus --- libcloud/compute/drivers/gce.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/gce.py b/libcloud/compute/drivers/gce.py index 91f4629a63..a773e4d3c3 100644 --- a/libcloud/compute/drivers/gce.py +++ b/libcloud/compute/drivers/gce.py @@ -937,7 +937,8 @@ def ex_list_zones(self): list_zones = [self._to_zone(z) for z in response['items']] return list_zones - def ex_create_address(self, name, region=None, address=None): + def ex_create_address(self, name, region=None, address=None, + description=None): """ Create a static address in a region. @@ -951,6 +952,9 @@ def ex_create_address(self, name, region=None, address=None): (e.g. 'xxx.xxx.xxx.xxx') :type address: ``str`` or ``None`` + :keyword description: Optional descriptive comment. + :type description: ``str`` or ``None`` + :return: Static Address object :rtype: :class:`GCEAddress` """ @@ -963,6 +967,8 @@ def ex_create_address(self, name, region=None, address=None): address_data = {'name': name} if address: address_data['address'] = address + if description: + address_data['description'] = description request = '/regions/%s/addresses' % (region.name) self.connection.async_request(request, method='POST', data=address_data) @@ -2145,11 +2151,14 @@ def ex_destroy_address(self, address): Destroy a static address. :param address: Address object to destroy - :type address: :class:`GCEAddress` + :type address: ``str`` or :class:`GCEAddress` :return: True if successful :rtype: ``bool`` """ + if not hasattr(address, 'name'): + address = self.ex_get_address(address) + request = '/regions/%s/addresses/%s' % (address.region.name, address.name) @@ -3285,6 +3294,9 @@ def _to_address(self, address): extra['selfLink'] = address.get('selfLink') extra['status'] = address.get('status') + extra['description'] = address.get('description', None) + if address.get('users', None) is not None: + extra['users'] = address.get('users') extra['creationTimestamp'] = address.get('creationTimestamp') return GCEAddress(id=address['id'], name=address['name'], From 4ee55d2c0a2bd1226012a5d25464ff139ecfa800 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Wed, 30 Apr 2014 22:08:23 -0700 Subject: [PATCH 275/315] Adding compute support for Microsoft Azure. The azure_compute.py driver can be used to crete, destroy, and so forth. --- libcloud/compute/drivers/azure_compute.py | 1011 +++++++++++++++++++++ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 2 + 3 files changed, 1015 insertions(+) create mode 100644 libcloud/compute/drivers/azure_compute.py diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py new file mode 100644 index 0000000000..476593a69c --- /dev/null +++ b/libcloud/compute/drivers/azure_compute.py @@ -0,0 +1,1011 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Azure driver +""" +import uuid +import re +import time +import collections +import random +import sys +import os +import copy + +#import azure +#import azure.servicemanagement +#from azure.servicemanagement import ServiceManagementService +#from azure.servicemanagement import WindowsConfigurationSet, ConfigurationSet, LinuxConfigurationSet +#from azure.servicemanagement import ConfigurationSetInputEndpoint + +from azure import * +from azure.servicemanagement import * + +from libcloud.compute.providers import Provider +from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize +from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot +from libcloud.compute.base import KeyPair, NodeAuthPassword +from libcloud.compute.types import NodeState, KeyPairDoesNotExistError +from libcloud.common.base import ConnectionUserAndKey + +""" +Sizes must be hardcoded, because Microsoft doesn't provide an API to fetch them. +From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx +""" +AZURE_COMPUTE_INSTANCE_TYPES = { + 'A0': { + 'id': 'A0', + 'name': 'ExtraSmall Instance', + 'ram': 768, + 'disk': 127, + 'bandwidth': None, + 'price': '0.02', + 'max_data_disks': 1, + 'cores': 'Shared' + }, + 'A1': { + 'id': 'A1', + 'name': 'Small Instance', + 'ram': 1792, + 'disk': 127, + 'bandwidth': None, + 'price': '0.09', + 'max_data_disks': 2, + 'cores': 1 + }, + 'A2': { + 'id': 'A2', + 'name': 'Medium Instance', + 'ram': 3584, + 'disk': 127, + 'bandwidth': None, + 'price': '0.18', + 'max_data_disks': 4, + 'cores': 2 + }, + 'A3': { + 'id': 'A3', + 'name': 'Large Instance', + 'ram': 7168, + 'disk': 127, + 'bandwidth': None, + 'price': '0.36', + 'max_data_disks': 8, + 'cores': 4 + }, + 'A4': { + 'id': 'A4', + 'name': 'ExtraLarge Instance', + 'ram': 14336, + 'disk': 127, + 'bandwidth': None, + 'price': '0.72', + 'max_data_disks': 16, + 'cores': 8 + }, + 'A5': { + 'id': 'A5', + 'name': 'Memory Intensive Instance', + 'ram': 14336, + 'disk': 127, + 'bandwidth': None, + 'price': '0.40', + 'max_data_disks': 4, + 'cores': 2 + }, + 'A6': { + 'id': 'A6', + 'name': 'A6 Instance', + 'ram': 28672, + 'disk': 127, + 'bandwidth': None, + 'price': '0.80', + 'max_data_disks': 8, + 'cores': 4 + }, + 'A7': { + 'id': 'A7', + 'name': 'A7 Instance', + 'ram': 57344, + 'disk': 127, + 'bandwidth': None, + 'price': '1.60', + 'max_data_disks': 16, + 'cores': 8 + } +} + +subscription_id = "aff4792f-fc2c-4fa8-88f4-bab437747469" +certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" + +class AzureConnection(ConnectionUserAndKey): + """AzureConnection + + Connection class for Azure Compute Driver. + """ + + + #def __init__(self, user_id, key, secure): + # super(AzureConnection, self).__init__(user_id, key, secure=secure, **kwargs) + +class AzureNodeDriver(NodeDriver): + + _instance_types = AZURE_COMPUTE_INSTANCE_TYPES + _blob_url = ".blob.core.windows.net" + features = {'create_node': ['password']} + service_location = collections.namedtuple('service_location',['is_affinity_group', 'service_location']) + sms = ServiceManagementService(subscription_id, certificate_path) + + def list_sizes(self): + """ + Lists all sizes + + :rtype: ``list`` of :class:`NodeSize` + """ + sizes = [] + + for key, values in self._instance_types.items(): + node_size = self._to_node_size(copy.deepcopy(values)) + sizes.append(node_size) + + return sizes + + def list_images(self): + """ + Lists all images + + :rtype: ``list`` of :class:`NodeImage` + """ + sms = ServiceManagementService(subscription_id, certificate_path) + + data = sms.list_os_images() + + return [self._to_image(i) for i in data] + + def list_locations(self): + """ + Lists all locations + + :rtype: ``list`` of :class:`NodeLocation` + """ + sms = ServiceManagementService(subscription_id, certificate_path) + + data = sms.list_locations() + + return [self._to_location(l) for l in data] + + def list_nodes(self, ex_cloud_service_name=None): + """ + List all nodes + + ex_cloud_service_name parameter is used to scope the request + to a specific Cloud Service. This is a required parameter as + nodes cannot exist outside of a Cloud Service nor be shared + between a Cloud Service within Azure. + + :param ex_cloud_service_name: Cloud Service name + :type ex_cloud_service_name: ``str`` + + :rtype: ``list`` of :class:`Node` + """ + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + + sms = ServiceManagementService(subscription_id, certificate_path) + + data = sms.get_hosted_service_properties(service_name=ex_cloud_service_name,embed_detail=True) + + try: + return [self._to_node(n) for n in data.deployments[0].role_instance_list] + except IndexError: + return None + + def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): + """ + Reboots a node. + + ex_cloud_service_name parameter is used to scope the request + to a specific Cloud Service. This is a required parameter as + nodes cannot exist outside of a Cloud Service nor be shared + between a Cloud Service within Azure. + + :param ex_cloud_service_name: Cloud Service name + :type ex_cloud_service_name: ``str`` + + :param ex_deployment_name: Options are "production" (default) + or "Staging". (Optional) + :type ex_deployment_name: ``str`` + + :rtype: ``list`` of :class:`Node` + """ + sms = ServiceManagementService(subscription_id, certificate_path) + + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + + if not ex_deployment_slot: + ex_deployment_slot = "production" + + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + + try: + result = sms.reboot_role_instance( + service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_instance_name=node.id + ) + if result.request_id: + return True + else: + return False + except Exception: + return False + + def list_volumes(self, node=None): + """ + Lists volumes of the disks in the image repository that are + associated with the specificed subscription. + + Pass Node object to scope the list of volumes to a single + instance. + + :rtype: ``list`` of :class:`StorageVolume` + """ + + sms = ServiceManagementService(subscription_id, certificate_path) + + data = sms.list_disks() + + volumes = [self._to_volume(volume=v,node=node) for v in data] + + return volumes + + def create_volume(self, **kwargs): + """ + Adds a data disk to a virtual machine. + + """ + + name = kwargs['name'] # Role Name in Azure + size = kwargs['size'] + + if "ex_cloud_service_name" in kwargs: + ex_cloud_service_name = kwargs['ex_cloud_service_name'] + else: + raise + + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + # create a new storage service name based upon the cloud service name. + ex_storage_service_name = ex_cloud_service_name + + if "ex_deployment_slot" in kwargs: + ex_deployment_slot = kwargs['ex_deployment_slot'] + else: + ex_deployment_slot = "production" + + if "ex_disk_lun" in kwargs: + ex_disk_lun = kwargs['ex_disk_lun'] + else: + # should identify the best candidate + ex_disk_lun = 1 + + if "ex_host_caching" in kwargs: + ex_host_caching = kwargs['ex_host_caching'] + else: + ex_host_caching = "ReadWrite" + + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + + blob_url = "http://" + ex_storage_service_name + self._blob_url + disk_name = "{0}-{1}-LUN{2}-{3}.vhd".format(ex_storage_service_name,name,ex_disk_lun,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. + media_link = blob_url + "/vhds/" + disk_name + + response = self.sms.add_data_disk(service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_name=name, + lun=ex_disk_lun, + host_caching=ex_host_caching, + logical_disk_size_in_gb=size, + media_link=media_link) + print('Response' + response.request_id) + return + + def detach_volume(self, **kwargs): + '''detach_volume + + Removes a volume from ''name''. + + The node will continue to maintain a lease to the blob. + ''' + name = kwargs['name'] + volume_id = kwargs['volume_id'] + + if "ex_cloud_service_name" in kwargs: + ex_cloud_service_name = kwargs['ex_cloud_service_name'] + else: + raise + + if "ex_deployment_slot" in kwargs: + ex_deployment_slot = kwargs['ex_deployment_slot'] + else: + ex_deployment_slot = "production" + + #if "ex_disk_lun" in kwargs: + # ex_disk_lun = kwargs['ex_disk_lun'] + #else: + # raise + + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + + response = self.sms.delete_data_disk(service_name=ex_cloud_service_name,deployment_name=_deployment_name,role_name=name,lun=volume_id) + print('Detach Volume Response') + print response + return + + def attach_volume(self, **kwargs): + #name = kwargs['name'] # Role Name in Azure + + if "ex_cloud_service_name" in kwargs: + ex_cloud_service_name = kwargs['ex_cloud_service_name'] + else: + raise + + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + # create a new storage service name based upon the cloud service name. + ex_storage_service_name = ex_cloud_service_name + + if "ex_deployment_slot" in kwargs: + ex_deployment_slot = kwargs['ex_deployment_slot'] + else: + ex_deployment_slot = "production" + + if "ex_disk_lun" in kwargs: + ex_disk_lun = kwargs['ex_disk_lun'] + else: + # should identify the best candidate + ex_disk_lun = 1 + + if "ex_host_caching" in kwargs: + ex_host_caching = kwargs['ex_host_caching'] + else: + ex_host_caching = "ReadWrite" + + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + + #blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + #disk_name = "{0}-{1}-LUN{2}-{3}.vhd".format(ex_storage_service_name,name,ex_disk_lun,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. + #media_link = blob_url + "/vhds/" + disk_name + media_link = "http://spcwestus.blob.core.windows.net/vhds/SPCWestUS-node001-LUN1-2014-03-19.vhd" + + #response = self.sms.add_disk(has_operating_system=False, label="test", media_link=media_link) + response = self.sms.update_data_disk(service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_name="node001", + disk_name="SPCWestUS-node001-1-201403191752470817", + lun=1, + host_caching="ReadWrite", + media_link=media_link) + + #response = self.sms.add_data_disk(service_name=ex_cloud_service_name, + # deployment_name=_deployment_name, + # role_name="node001", + # lun=1, + # media_link=media_link, + # disk_name="testtest", + # host_caching="ReadWrite", + # source_media_link=media_link) + + print response.request_id + + operation_status = self.sms.get_operation_status(response.request_id) + print('Request ID: ' + response.request_id) + print('Op Stats: ' + operation_status.status) + + timeout = 60 * 5 + waittime = 0 + interval = 5 + + while operation_status.status == "InProgress" and waittime < timeout: + operation_status = self.sms.get_operation_status(response.request_id) + print('Operation Status: ' + operation_status.status) + if operation_status.status == "Succeeded": + break + + waittime += interval + time.sleep(interval) + + if operation_status.status == "Failed": + raise Exception(operation_status.error.message) + + return + + def destroy_volume(self, **kwargs): + disk_name = kwargs['disk_name'] + + response = self.sms.delete_disk(disk_name=disk_name) + + return + + def create_node(self, ex_cloud_service_name=None, **kwargs): + """Create Azure Virtual Machine + + Reference: http://bit.ly/1fIsCb7 [www.windowsazure.com/en-us/documentation/] + + We default to: + + + 3389/TCP - RDP - 1st Microsoft instance. + + RANDOM/TCP - RDP - All succeeding Microsoft instances. + + + 22/TCP - SSH - 1st Linux instance + + RANDOM/TCP - SSH - All succeeding Linux instances. + + The above replicates the standard behavior of the Azure UI. + You can retrieve the assigned ports to each instance by + using the following private function: + + _get_endpoint_ports(service_name) + Returns public,private port key pair. + + @inherits: :class:`NodeDriver.create_node` + + :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. + :type ex_cloud_service_name: ``str`` + + :keyword ex_storage_service_name: Optional: Name of the Azure Storage Service. + :type ex_cloud_service_name: ``str`` + + :keyword ex_deployment_name: Optional. The name of the deployment. + If this is not passed in we default to + using the Cloud Service name. + :type ex_deployment_name: ``str`` + + :keyword ex_deployment_slot: Optional: Valid values: production|staging. + Defaults to production. + :type ex_cloud_service_name: ``str`` + + :keyword ex_linux_user_id: Optional. Defaults to 'azureuser'. + :type ex_cloud_service_name: ``str`` + + """ + name = kwargs['name'] + size = kwargs['size'] + image = kwargs['image'] + + password = None + auth = self._get_and_check_auth(kwargs["auth"]) + password = auth.password + + sms = ServiceManagementService(subscription_id, certificate_path) + + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + + if "ex_deployment_slot" in kwargs: + ex_deployment_slot = kwargs['ex_deployment_slot'] + else: + ex_deployment_slot = "production" # We assume production if this is not provided. + + if "ex_linux_user_id" in kwargs: + ex_linux_user_id = kwargs['ex_linux_user_id'] + else: + # This mimics the Azure UI behavior. + ex_linux_user_id = "azureuser" + + node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) + network_config = ConfigurationSet() + network_config.configuration_set_type = 'NetworkConfiguration' + + # We do this because we need to pass a Configuration to the + # method. This will be either Linux or Windows. + if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", image, re.I): + machine_config = WindowsConfigurationSet(name, password) + machine_config.domain_join = None + + if node_list is None: + port = "3389" + else: + port = random.randint(41952,65535) + endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + + for instances in endpoints.role_instance_list: + ports = [] + for ep in instances.instance_endpoints: + ports += [ep.public_port] + + while port in ports: + port = random.randint(41952,65535) + + endpoint = ConfigurationSetInputEndpoint( + name='Remote Desktop', + protocol='tcp', + port=port, + local_port='3389', + load_balanced_endpoint_set_name=None, + enable_direct_server_return=False + ) + else: + if node_list is None: + port = "22" + else: + port = random.randint(41952,65535) + endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + + for instances in endpoints.role_instance_list: + ports = [] + for ep in instances.instance_endpoints: + ports += [ep.public_port] + + while port in ports: + port = random.randint(41952,65535) + + endpoint = ConfigurationSetInputEndpoint( + name='SSH', + protocol='tcp', + port=port, + local_port='22', + load_balanced_endpoint_set_name=None, + enable_direct_server_return=False + ) + machine_config = LinuxConfigurationSet(name, ex_linux_user_id, password, False) + + network_config.input_endpoints.input_endpoints.append(endpoint) + + _storage_location = self._get_cloud_service_location(service_name=ex_cloud_service_name) + + # OK, bit annoying here. You must create a deployment before + # you can create an instance; however, the deployment function + # creates the first instance, but all subsequent instances + # must be created using the add_role function. + # + # So, yeah, annoying. + if node_list is None: + # This is the first node in this cloud service. + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + ex_storage_service_name = ex_cloud_service_name + ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) + if self._is_storage_service_unique(service_name=ex_storage_service_name): + self._create_storage_account( + service_name=ex_storage_service_name, + location=_storage_location.service_location, + is_affinity_group=_storage_location.is_affinity_group + ) + + if "ex_deployment_name" in kwargs: + ex_deployment_name = kwargs['ex_deployment_name'] + else: + ex_deployment_name = ex_cloud_service_name + + blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. + media_link = blob_url + "/vhds/" + disk_name + disk_config = OSVirtualHardDisk(image, media_link) + + result = sms.create_virtual_machine_deployment( + service_name=ex_cloud_service_name, + deployment_name=ex_deployment_name, + deployment_slot=ex_deployment_slot, + label=name, + role_name=name, + system_config=machine_config, + os_virtual_hard_disk=disk_config, + network_config=network_config, + role_size=size + ) + else: + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + ex_storage_service_name = ex_cloud_service_name + ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) + + if self._is_storage_service_unique(service_name=ex_storage_service_name): + self._create_storage_account( + service_name=ex_storage_service_name, + location=_storage_location.service_location, + is_affinity_group=_storage_location.is_affinity_group + ) + + blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + media_link = blob_url + "/vhds/" + disk_name + disk_config = OSVirtualHardDisk(image, media_link) + + result = self.sms.add_role( + service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_name=name, + system_config=machine_config, + os_virtual_hard_disk=disk_config, + network_config=network_config, + role_size=size + ) + + return Node( + id=name, + name=name, + state=NodeState.PENDING, + public_ips=[], + private_ips=[], + driver=self.connection.driver + ) + + #operation_status = self.sms.get_operation_status(result.request_id) + + #timeout = 60 * 5 + #waittime = 0 + #interval = 5 + + #while operation_status.status == "InProgress" and waittime < timeout: + # operation_status = self.sms.get_operation_status(result.request_id) + # if operation_status.status == "Succeeded": + # break + + # waittime += interval + # time.sleep(interval) + + #if operation_status.status == "Failed": + # raise Exception(operation_status.error.message) + #return + + def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): + """Remove Azure Virtual Machine + + This removes the instance, but does not + remove the disk. You will need to use destroy_volume. + Azure sometimes has an issue where it will hold onto + a blob lease for an extended amount of time. + + :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. + :type ex_cloud_service_name: ``str`` + + :keyword ex_deployment_slot: Optional: The name of the deployment + slot. If this is not passed in we + default to production. + :type ex_deployment_slot: ``str`` + """ + + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + + if not ex_deployment_slot: + ex_deployment_slot = "production" + + _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + _deployment_name = _deployment.name + + _server_deployment_count = len(_deployment.role_instance_list) + + sms = ServiceManagementService(subscription_id, certificate_path) + + for r in _deployment.role_list: + if r.role_name == node.id: + print "Hello" + print r.data_virtual_hard_disk + + #print _deployment.role_list[0].role_name + + return True + + if _server_deployment_count > 1: + data = sms.delete_role(service_name=ex_cloud_service_name, deployment_name=_deployment_name,role_name=node.id) + + # We need to wait before we can remove the disk itself. + #operation_status = sms.get_operation_status(data.request_id) + + #timeout = 60 * 5 + #waittime = 0 + #interval = 5 # Creating a storage service should take no longer than 30 seconds. + + #while operation_status.status == "InProgress" and waittime < timeout: + # status = sms.get_operation_status(data.request_id) + # if operation_status.status == "Succeeded": + # break + + # waittime += interval + # time.sleep(interval) + + #_delete_disk = sms.delete_data_disk(service_name=ex_cloud_service_name,deployment_name=_deployment_name,role_name=node.id,lun=0) + else: + data = sms.delete_deployment(service_name=ex_cloud_service_name, deployment_name=_deployment_name) + + operation_status = sms.get_operation_status(data.request_id) + + timeout = 60 * 5 + waittime = 0 + interval = 5 # Creating a storage service should take no longer than 30 seconds. + + while operation_status.status == "InProgress" and waittime < timeout: + status = sms.get_operation_status(data.request_id) + if operation_status.status == "Succeeded": + break + + waittime += interval + time.sleep(interval) + + _delete_disk = sms.delete_data_disk(service_name=ex_cloud_service_name,deployment_name=_deployment_name,role_name=node.id,lun=0) + return True + + """ Functions not implemented + """ + def create_volume_snapshot(self): + raise NotImplementedError( + 'You cannot create snapshots of ' + 'Azure VMs at this time.') + +# def attach_volume(self): +# raise NotImplementedError( +# 'attach_volume is not supported ' +# 'at this time.') + + + """ External Functions + + """ + + def ex_list_storage_services(self, **kwargs): + res = self.sms.get_storage_account_properties(kwargs['service_name']) + print(res.storage_service_properties.status) + return + + """Private Functions + + """ + + def _to_node(self, data): + """ + Convert the data from a Azure response object into a Node + """ + + if len(data.instance_endpoints) >= 1: + public_ip = data.instance_endpoints[0].vip + else: + public_ip = [] + + for port in data.instance_endpoints: + if port.name == 'Remote Desktop': + remote_desktop_port = port.public_port + + return Node( + id=data.instance_name, + name=data.instance_name, + state=data.instance_status, + public_ips=[public_ip], + private_ips=[data.ip_address], + driver=self.connection.driver, + extra={ + 'remote_desktop_port': remote_desktop_port, + 'power_state': data.power_state, + 'instance_size': data.instance_size}) + + def _to_location(self, data): + """ + Convert the data from a Azure resonse object into a location + """ + country = data.display_name + + if "Asia" in data.display_name: + country = "Asia" + + if "Europe" in data.display_name: + country = "Europe" + + if "US" in data.display_name: + country = "US" + + if "Japan" in data.display_name: + country = "Japan" + + if "Brazil" in data.display_name: + country = "Brazil" + + return NodeLocation( + id=data.name, + name=data.display_name, + country=country, + driver=self.connection.driver) + + def _to_node_size(self, data): + """ + Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize + """ + + return NodeSize( + id=data["id"], + name=data["name"], + ram=data["ram"], + disk=data["disk"], + bandwidth=data["bandwidth"], + price=data["price"], + driver=self.connection.driver, + extra={ + 'max_data_disks' : data["max_data_disks"], + 'cores' : data["cores"] + }) + + def _to_image(self, data): + + return NodeImage( + id=data.name, + name=data.label + + + , + driver=self.connection.driver, + extra={ + 'os' : data.os, + 'category' : data.category, + 'description' : data.description, + 'location' : data.location, + 'affinity_group' : data.affinity_group, + 'media_link' : data.media_link + }) + + def _to_volume(self, volume, node): + + if node: + if hasattr(volume.attached_to, 'role_name'): + if volume.attached_to.role_name == node.id: + extra = {} + extra['affinity_group'] = volume.affinity_group + if hasattr(volume.attached_to, 'hosted_service_name'): + extra['hosted_service_name'] = volume.attached_to.hosted_service_name + if hasattr(volume.attached_to, 'role_name'): + extra['role_name'] = volume.attached_to.role_name + if hasattr(volume.attached_to, 'deployment_name'): + extra['deployment_name'] = volume.attached_to.deployment_name + extra['os'] = volume.os + extra['location'] = volume.location + extra['media_link'] = volume.media_link + extra['source_image_name'] = volume.source_image_name + + return StorageVolume(id=volume.name, + name=volume.name, + size=int(volume.logical_disk_size_in_gb), + driver=self.connection.driver, + extra=extra) + else: + extra = {} + extra['affinity_group'] = volume.affinity_group + if hasattr(volume.attached_to, 'hosted_service_name'): + extra['hosted_service_name'] = volume.attached_to.hosted_service_name + if hasattr(volume.attached_to, 'role_name'): + extra['role_name'] = volume.attached_to.role_name + if hasattr(volume.attached_to, 'deployment_name'): + extra['deployment_name'] = volume.attached_to.deployment_name + extra['os'] = volume.os + extra['location'] = volume.location + extra['media_link'] = volume.media_link + extra['source_image_name'] = volume.source_image_name + + return StorageVolume(id=volume.name, + name=volume.name, + size=int(volume.logical_disk_size_in_gb), + driver=self.connection.driver, + extra=extra) + + def _get_deployment(self, **kwargs): + _service_name = kwargs['service_name'] + _deployment_slot = kwargs['deployment_slot'] + + sms = ServiceManagementService(subscription_id, certificate_path) + + return sms.get_deployment_by_slot(service_name=_service_name,deployment_slot=_deployment_slot) + + def _get_cloud_service_location(self, service_name=None): + + if not service_name: + raise ValueError("service_name is required.") + + sms = ServiceManagementService(subscription_id, certificate_path) + + res = sms.get_hosted_service_properties(service_name=service_name,embed_detail=False) + + _affinity_group = res.hosted_service_properties.affinity_group + _cloud_service_location = res.hosted_service_properties.location + + if _affinity_group is not None: + return self.service_location(True, _affinity_group) + elif _cloud_service_location is not None: + return self.service_location(False, _cloud_service_location) + else: + return None + + def _is_storage_service_unique(self, service_name=None): + if not service_name: + raise ValueError("service_name is required.") + + sms = ServiceManagementService(subscription_id, certificate_path) + + _check_availability = sms.check_storage_account_name_availability(service_name=service_name) + + return _check_availability.result + + def _create_storage_account(self, **kwargs): + sms = ServiceManagementService(subscription_id, certificate_path) + + if kwargs['is_affinity_group'] is True: + result = sms.create_storage_account( + service_name=kwargs['service_name'], + description=kwargs['service_name'], + label=kwargs['service_name'], + affinity_group=kwargs['location']) + else: + result = sms.create_storage_account( + service_name=kwargs['service_name'], + description=kwargs['service_name'], + label=kwargs['service_name'], + location=kwargs['location']) + + # We need to wait for this to be created before we can + # create the storage container and the instance. + + operation_status = sms.get_operation_status(result.request_id) + + timeout = 60 * 5 + waittime = 0 + interval = 5 + + while operation_status.status == "InProgress" and waittime < timeout: + operation_status = sms.get_operation_status(result.request_id) + if operation_status.status == "Succeeded": + break + + waittime += interval + time.sleep(interval) + return + + # items below this line are being refactored + + def _get_available_lun(self, **kwargs): + service_name = kwargs['service_name'] + + response = self.sms.get_hosted_service_properties(service_name=service_name,embed_detail=True) + deployments = response.deployments[0].role_instance_list + roles = response.deployments[0].role_list.roles + print roles + + for r in roles: + print('#############') + print r + print r.__dict__ + print('#############') + print r + print dir(r) + +# d = res.deployments[0].role_list.roles[0].__dict__ +# print d + #for i in res.deployments[0].role_list: + # l = dir(i) + # print l + + #_affinity_group = res.hosted_service_properties.affinity_group + #_cloud_service_location = res.hosted_service_properties.location + + #if _affinity_group is not None: + # return service_location(True, _affinity_group) + #elif _cloud_service_location is not None: + # return service_location(False, _cloud_service_location) + #else: + # return None + + return \ No newline at end of file diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index bdceafc896..4e1b6c6664 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -153,6 +153,8 @@ ('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'), Provider.VULTR: ('libcloud.compute.drivers.vultr', 'VultrNodeDriver'), + Provider.AZURE_COMPUTE: + ('libcloud.compute.drivers.azure_compute', 'AzureNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index c21a4d17b1..472ded716b 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -78,6 +78,7 @@ class Provider(object): :cvar OUTSCALE_INC: Outscale INC driver. :cvar PROFIT_BRICKS: ProfitBricks driver. :cvar VULTR: vultr driver. + :cvar AZURE: Azure driver. """ DUMMY = 'dummy' EC2 = 'ec2_us_east' @@ -126,6 +127,7 @@ class Provider(object): VSPHERE = 'vsphere' PROFIT_BRICKS = 'profitbricks' VULTR = 'vultr' + AZURE_COMPUTE = 'azure_compute' # OpenStack based providers HPCLOUD = 'hpcloud' From 9a726c235082fb2c87e736713a5bb0e401c61ccd Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Wed, 30 Apr 2014 22:18:01 -0700 Subject: [PATCH 276/315] Updated azure_compute.py --- libcloud/compute/drivers/azure_compute.py | 297 +++------------------- 1 file changed, 30 insertions(+), 267 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index 476593a69c..248ce9f9a5 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. """ -Azure driver +Azure Compute driver """ import uuid import re @@ -41,7 +41,7 @@ from libcloud.common.base import ConnectionUserAndKey """ -Sizes must be hardcoded, because Microsoft doesn't provide an API to fetch them. +Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx """ AZURE_COMPUTE_INSTANCE_TYPES = { @@ -272,176 +272,6 @@ def list_volumes(self, node=None): return volumes - def create_volume(self, **kwargs): - """ - Adds a data disk to a virtual machine. - - """ - - name = kwargs['name'] # Role Name in Azure - size = kwargs['size'] - - if "ex_cloud_service_name" in kwargs: - ex_cloud_service_name = kwargs['ex_cloud_service_name'] - else: - raise - - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: - # create a new storage service name based upon the cloud service name. - ex_storage_service_name = ex_cloud_service_name - - if "ex_deployment_slot" in kwargs: - ex_deployment_slot = kwargs['ex_deployment_slot'] - else: - ex_deployment_slot = "production" - - if "ex_disk_lun" in kwargs: - ex_disk_lun = kwargs['ex_disk_lun'] - else: - # should identify the best candidate - ex_disk_lun = 1 - - if "ex_host_caching" in kwargs: - ex_host_caching = kwargs['ex_host_caching'] - else: - ex_host_caching = "ReadWrite" - - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name - - blob_url = "http://" + ex_storage_service_name + self._blob_url - disk_name = "{0}-{1}-LUN{2}-{3}.vhd".format(ex_storage_service_name,name,ex_disk_lun,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. - media_link = blob_url + "/vhds/" + disk_name - - response = self.sms.add_data_disk(service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_name=name, - lun=ex_disk_lun, - host_caching=ex_host_caching, - logical_disk_size_in_gb=size, - media_link=media_link) - print('Response' + response.request_id) - return - - def detach_volume(self, **kwargs): - '''detach_volume - - Removes a volume from ''name''. - - The node will continue to maintain a lease to the blob. - ''' - name = kwargs['name'] - volume_id = kwargs['volume_id'] - - if "ex_cloud_service_name" in kwargs: - ex_cloud_service_name = kwargs['ex_cloud_service_name'] - else: - raise - - if "ex_deployment_slot" in kwargs: - ex_deployment_slot = kwargs['ex_deployment_slot'] - else: - ex_deployment_slot = "production" - - #if "ex_disk_lun" in kwargs: - # ex_disk_lun = kwargs['ex_disk_lun'] - #else: - # raise - - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name - - response = self.sms.delete_data_disk(service_name=ex_cloud_service_name,deployment_name=_deployment_name,role_name=name,lun=volume_id) - print('Detach Volume Response') - print response - return - - def attach_volume(self, **kwargs): - #name = kwargs['name'] # Role Name in Azure - - if "ex_cloud_service_name" in kwargs: - ex_cloud_service_name = kwargs['ex_cloud_service_name'] - else: - raise - - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: - # create a new storage service name based upon the cloud service name. - ex_storage_service_name = ex_cloud_service_name - - if "ex_deployment_slot" in kwargs: - ex_deployment_slot = kwargs['ex_deployment_slot'] - else: - ex_deployment_slot = "production" - - if "ex_disk_lun" in kwargs: - ex_disk_lun = kwargs['ex_disk_lun'] - else: - # should identify the best candidate - ex_disk_lun = 1 - - if "ex_host_caching" in kwargs: - ex_host_caching = kwargs['ex_host_caching'] - else: - ex_host_caching = "ReadWrite" - - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name - - #blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" - #disk_name = "{0}-{1}-LUN{2}-{3}.vhd".format(ex_storage_service_name,name,ex_disk_lun,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. - #media_link = blob_url + "/vhds/" + disk_name - media_link = "http://spcwestus.blob.core.windows.net/vhds/SPCWestUS-node001-LUN1-2014-03-19.vhd" - - #response = self.sms.add_disk(has_operating_system=False, label="test", media_link=media_link) - response = self.sms.update_data_disk(service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_name="node001", - disk_name="SPCWestUS-node001-1-201403191752470817", - lun=1, - host_caching="ReadWrite", - media_link=media_link) - - #response = self.sms.add_data_disk(service_name=ex_cloud_service_name, - # deployment_name=_deployment_name, - # role_name="node001", - # lun=1, - # media_link=media_link, - # disk_name="testtest", - # host_caching="ReadWrite", - # source_media_link=media_link) - - print response.request_id - - operation_status = self.sms.get_operation_status(response.request_id) - print('Request ID: ' + response.request_id) - print('Op Stats: ' + operation_status.status) - - timeout = 60 * 5 - waittime = 0 - interval = 5 - - while operation_status.status == "InProgress" and waittime < timeout: - operation_status = self.sms.get_operation_status(response.request_id) - print('Operation Status: ' + operation_status.status) - if operation_status.status == "Succeeded": - break - - waittime += interval - time.sleep(interval) - - if operation_status.status == "Failed": - raise Exception(operation_status.error.message) - - return - - def destroy_volume(self, **kwargs): - disk_name = kwargs['disk_name'] - - response = self.sms.delete_disk(disk_name=disk_name) - - return - def create_node(self, ex_cloud_service_name=None, **kwargs): """Create Azure Virtual Machine @@ -697,53 +527,18 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None sms = ServiceManagementService(subscription_id, certificate_path) - for r in _deployment.role_list: - if r.role_name == node.id: - print "Hello" - print r.data_virtual_hard_disk - - #print _deployment.role_list[0].role_name - - return True - - if _server_deployment_count > 1: - data = sms.delete_role(service_name=ex_cloud_service_name, deployment_name=_deployment_name,role_name=node.id) - - # We need to wait before we can remove the disk itself. - #operation_status = sms.get_operation_status(data.request_id) - - #timeout = 60 * 5 - #waittime = 0 - #interval = 5 # Creating a storage service should take no longer than 30 seconds. - - #while operation_status.status == "InProgress" and waittime < timeout: - # status = sms.get_operation_status(data.request_id) - # if operation_status.status == "Succeeded": - # break - - # waittime += interval - # time.sleep(interval) - - #_delete_disk = sms.delete_data_disk(service_name=ex_cloud_service_name,deployment_name=_deployment_name,role_name=node.id,lun=0) - else: - data = sms.delete_deployment(service_name=ex_cloud_service_name, deployment_name=_deployment_name) - - operation_status = sms.get_operation_status(data.request_id) - - timeout = 60 * 5 - waittime = 0 - interval = 5 # Creating a storage service should take no longer than 30 seconds. - - while operation_status.status == "InProgress" and waittime < timeout: - status = sms.get_operation_status(data.request_id) - if operation_status.status == "Succeeded": - break - - waittime += interval - time.sleep(interval) - - _delete_disk = sms.delete_data_disk(service_name=ex_cloud_service_name,deployment_name=_deployment_name,role_name=node.id,lun=0) - return True + try: + if _server_deployment_count > 1: + data = sms.delete_role(service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_name=node.id, + delete_attached_disks=True) + return True + else: + data = sms.delete_deployment(service_name=ex_cloud_service_name,deployment_name=_deployment_name,delete_attached_disks=True) + return True + except Exception: + return False """ Functions not implemented """ @@ -752,23 +547,27 @@ def create_volume_snapshot(self): 'You cannot create snapshots of ' 'Azure VMs at this time.') -# def attach_volume(self): -# raise NotImplementedError( -# 'attach_volume is not supported ' -# 'at this time.') - + def attach_volume(self): + raise NotImplementedError( + 'attach_volume is not supported ' + 'at this time.') - """ External Functions + def create_volume(self): + raise NotImplementedError( + 'create_volume is not supported ' + 'at this time.') - """ + def detach_volume(self): + raise NotImplementedError( + 'detach_volume is not supported ' + 'at this time.') - def ex_list_storage_services(self, **kwargs): - res = self.sms.get_storage_account_properties(kwargs['service_name']) - print(res.storage_service_properties.status) - return + def destroy_volume(self): + raise NotImplementedError( + 'destroy_volume is not supported ' + 'at this time.') """Private Functions - """ def _to_node(self, data): @@ -972,40 +771,4 @@ def _create_storage_account(self, **kwargs): waittime += interval time.sleep(interval) - return - - # items below this line are being refactored - - def _get_available_lun(self, **kwargs): - service_name = kwargs['service_name'] - - response = self.sms.get_hosted_service_properties(service_name=service_name,embed_detail=True) - deployments = response.deployments[0].role_instance_list - roles = response.deployments[0].role_list.roles - print roles - - for r in roles: - print('#############') - print r - print r.__dict__ - print('#############') - print r - print dir(r) - -# d = res.deployments[0].role_list.roles[0].__dict__ -# print d - #for i in res.deployments[0].role_list: - # l = dir(i) - # print l - - #_affinity_group = res.hosted_service_properties.affinity_group - #_cloud_service_location = res.hosted_service_properties.location - - #if _affinity_group is not None: - # return service_location(True, _affinity_group) - #elif _cloud_service_location is not None: - # return service_location(False, _cloud_service_location) - #else: - # return None - return \ No newline at end of file From 8d005b1c3548f5c4a3b6993420afdfce5014e22b Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Mon, 19 May 2014 20:51:38 -0700 Subject: [PATCH 277/315] I've re-factored the code with code pulled from the Azure SDK for Python. This is being done to eliminate the requirement for the Azure library to be pre-installed on the system. Users can now simply load up libcloud and be on their merry way with no external requirements. --- libcloud/compute/drivers/azure_compute.py | 1519 ++++++++++++++++++++- 1 file changed, 1469 insertions(+), 50 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index 248ce9f9a5..f0a3e4427c 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -14,6 +14,7 @@ # limitations under the License. """ Azure Compute driver +Version: 1.0 """ import uuid import re @@ -23,12 +24,7 @@ import sys import os import copy - -#import azure -#import azure.servicemanagement -#from azure.servicemanagement import ServiceManagementService -#from azure.servicemanagement import WindowsConfigurationSet, ConfigurationSet, LinuxConfigurationSet -#from azure.servicemanagement import ConfigurationSetInputEndpoint +import base64 from azure import * from azure.servicemanagement import * @@ -40,6 +36,36 @@ from libcloud.compute.types import NodeState, KeyPairDoesNotExistError from libcloud.common.base import ConnectionUserAndKey +from datetime import datetime +from xml.dom import minidom +from xml.sax.saxutils import escape as xml_escape +from httplib import ( + HTTPSConnection, + HTTP_PORT, + HTTPS_PORT, + ) + +if sys.version_info < (3,): + from urllib2 import quote as url_quote + from urllib2 import unquote as url_unquote + _strtype = basestring +else: + from urllib.parse import quote as url_quote + from urllib.parse import unquote as url_unquote + _strtype = str + +if sys.version_info < (3,): + _unicode_type = unicode + + def _str(value): + if isinstance(value, unicode): + return value.encode('utf-8') + + return str(value) +else: + _str = str + _unicode_type = str + """ Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx @@ -127,18 +153,1007 @@ } } +_KNOWN_SERIALIZATION_XFORMS = { + 'include_apis': 'IncludeAPIs', + 'message_id': 'MessageId', + 'content_md5': 'Content-MD5', + 'last_modified': 'Last-Modified', + 'cache_control': 'Cache-Control', + 'account_admin_live_email_id': 'AccountAdminLiveEmailId', + 'service_admin_live_email_id': 'ServiceAdminLiveEmailId', + 'subscription_id': 'SubscriptionID', + 'fqdn': 'FQDN', + 'private_id': 'PrivateID', + 'os_virtual_hard_disk': 'OSVirtualHardDisk', + 'logical_disk_size_in_gb': 'LogicalDiskSizeInGB', + 'logical_size_in_gb': 'LogicalSizeInGB', + 'os': 'OS', + 'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo', + 'copy_id': 'CopyId', + } + subscription_id = "aff4792f-fc2c-4fa8-88f4-bab437747469" certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" +azure_service_management_host = 'management.core.windows.net' + +__version__ = '0.8.0' +_USER_AGENT_STRING = 'pyazure/' + __version__ +X_MS_VERSION = '2012-03-01' + + class AzureConnection(ConnectionUserAndKey): - """AzureConnection + """AzureConnection + + Connection class for Azure Compute Driver. + """ + +""" +XML Serializer +""" +class AzureXmlSerializer(): + + @staticmethod + def create_storage_service_input_to_xml(service_name, description, label, + affinity_group, location, + geo_replication_enabled, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'CreateStorageServiceInput', + [('ServiceName', service_name), + ('Description', description), + ('Label', label, _encode_base64), + ('AffinityGroup', affinity_group), + ('Location', location), + ('GeoReplicationEnabled', geo_replication_enabled, _lower)], + extended_properties) + + @staticmethod + def update_storage_service_input_to_xml(description, label, + geo_replication_enabled, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'UpdateStorageServiceInput', + [('Description', description), + ('Label', label, _encode_base64), + ('GeoReplicationEnabled', geo_replication_enabled, _lower)], + extended_properties) + + @staticmethod + def regenerate_keys_to_xml(key_type): + return AzureXmlSerializer.doc_from_data('RegenerateKeys', + [('KeyType', key_type)]) + + @staticmethod + def update_hosted_service_to_xml(label, description, extended_properties): + return AzureXmlSerializer.doc_from_data('UpdateHostedService', + [('Label', label, _encode_base64), + ('Description', description)], + extended_properties) + + @staticmethod + def create_hosted_service_to_xml(service_name, label, description, + location, affinity_group, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'CreateHostedService', + [('ServiceName', service_name), + ('Label', label, _encode_base64), + ('Description', description), + ('Location', location), + ('AffinityGroup', affinity_group)], + extended_properties) + + @staticmethod + def create_deployment_to_xml(name, package_url, label, configuration, + start_deployment, treat_warnings_as_error, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'CreateDeployment', + [('Name', name), + ('PackageUrl', package_url), + ('Label', label, _encode_base64), + ('Configuration', configuration), + ('StartDeployment', + start_deployment, _lower), + ('TreatWarningsAsError', treat_warnings_as_error, _lower)], + extended_properties) + + @staticmethod + def swap_deployment_to_xml(production, source_deployment): + return AzureXmlSerializer.doc_from_data( + 'Swap', + [('Production', production), + ('SourceDeployment', source_deployment)]) + + @staticmethod + def update_deployment_status_to_xml(status): + return AzureXmlSerializer.doc_from_data( + 'UpdateDeploymentStatus', + [('Status', status)]) + + @staticmethod + def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'ChangeConfiguration', + [('Configuration', configuration), + ('TreatWarningsAsError', treat_warnings_as_error, _lower), + ('Mode', mode)], + extended_properties) + + @staticmethod + def upgrade_deployment_to_xml(mode, package_url, configuration, label, + role_to_upgrade, force, extended_properties): + return AzureXmlSerializer.doc_from_data( + 'UpgradeDeployment', + [('Mode', mode), + ('PackageUrl', package_url), + ('Configuration', configuration), + ('Label', label, _encode_base64), + ('RoleToUpgrade', role_to_upgrade), + ('Force', force, _lower)], + extended_properties) + + @staticmethod + def rollback_upgrade_to_xml(mode, force): + return AzureXmlSerializer.doc_from_data( + 'RollbackUpdateOrUpgrade', + [('Mode', mode), + ('Force', force, _lower)]) + + @staticmethod + def walk_upgrade_domain_to_xml(upgrade_domain): + return AzureXmlSerializer.doc_from_data( + 'WalkUpgradeDomain', + [('UpgradeDomain', upgrade_domain)]) + + @staticmethod + def certificate_file_to_xml(data, certificate_format, password): + return AzureXmlSerializer.doc_from_data( + 'CertificateFile', + [('Data', data), + ('CertificateFormat', certificate_format), + ('Password', password)]) + + @staticmethod + def create_affinity_group_to_xml(name, label, description, location): + return AzureXmlSerializer.doc_from_data( + 'CreateAffinityGroup', + [('Name', name), + ('Label', label, _encode_base64), + ('Description', description), + ('Location', location)]) + + @staticmethod + def update_affinity_group_to_xml(label, description): + return AzureXmlSerializer.doc_from_data( + 'UpdateAffinityGroup', + [('Label', label, _encode_base64), + ('Description', description)]) + + @staticmethod + def subscription_certificate_to_xml(public_key, thumbprint, data): + return AzureXmlSerializer.doc_from_data( + 'SubscriptionCertificate', + [('SubscriptionCertificatePublicKey', public_key), + ('SubscriptionCertificateThumbprint', thumbprint), + ('SubscriptionCertificateData', data)]) + + @staticmethod + def os_image_to_xml(label, media_link, name, os): + return AzureXmlSerializer.doc_from_data( + 'OSImage', + [('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os)]) + + @staticmethod + def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, + logical_disk_size_in_gb, media_link, + source_media_link): + return AzureXmlSerializer.doc_from_data( + 'DataVirtualHardDisk', + [('HostCaching', host_caching), + ('DiskLabel', disk_label), + ('DiskName', disk_name), + ('Lun', lun), + ('LogicalDiskSizeInGB', logical_disk_size_in_gb), + ('MediaLink', media_link), + ('SourceMediaLink', source_media_link)]) + + @staticmethod + def disk_to_xml(has_operating_system, label, media_link, name, os): + return AzureXmlSerializer.doc_from_data( + 'Disk', + [('HasOperatingSystem', has_operating_system, _lower), + ('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os)]) + + @staticmethod + def restart_role_operation_to_xml(): + return AzureXmlSerializer.doc_from_xml( + 'RestartRoleOperation', + 'RestartRoleOperation') + + @staticmethod + def shutdown_role_operation_to_xml(): + return AzureXmlSerializer.doc_from_xml( + 'ShutdownRoleOperation', + 'ShutdownRoleOperation') + + @staticmethod + def start_role_operation_to_xml(): + return AzureXmlSerializer.doc_from_xml( + 'StartRoleOperation', + 'StartRoleOperation') + + @staticmethod + def windows_configuration_to_xml(configuration): + xml = AzureXmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type), + ('ComputerName', configuration.computer_name), + ('AdminPassword', configuration.admin_password), + ('ResetPasswordOnFirstLogon', + configuration.reset_password_on_first_logon, + _lower), + ('EnableAutomaticUpdates', + configuration.enable_automatic_updates, + _lower), + ('TimeZone', configuration.time_zone)]) + + if configuration.domain_join is not None: + xml += '' + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Domain', configuration.domain_join.credentials.domain), + ('Username', configuration.domain_join.credentials.username), + ('Password', configuration.domain_join.credentials.password)]) + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('JoinDomain', configuration.domain_join.join_domain), + ('MachineObjectOU', + configuration.domain_join.machine_object_ou)]) + xml += '' + if configuration.stored_certificate_settings is not None: + xml += '' + for cert in configuration.stored_certificate_settings: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('StoreLocation', cert.store_location), + ('StoreName', cert.store_name), + ('Thumbprint', cert.thumbprint)]) + xml += '' + xml += '' + return xml + + @staticmethod + def linux_configuration_to_xml(configuration): + xml = AzureXmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type), + ('HostName', configuration.host_name), + ('UserName', configuration.user_name), + ('UserPassword', configuration.user_password), + ('DisableSshPasswordAuthentication', + configuration.disable_ssh_password_authentication, + _lower)]) + + if configuration.ssh is not None: + xml += '' + xml += '' + for key in configuration.ssh.public_keys: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint), + ('Path', key.path)]) + xml += '' + xml += '' + xml += '' + for key in configuration.ssh.key_pairs: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint), + ('Path', key.path)]) + xml += '' + xml += '' + xml += '' + return xml + + @staticmethod + def network_configuration_to_xml(configuration): + xml = AzureXmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type)]) + xml += '' + for endpoint in configuration.input_endpoints: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('LoadBalancedEndpointSetName', + endpoint.load_balanced_endpoint_set_name), + ('LocalPort', endpoint.local_port), + ('Name', endpoint.name), + ('Port', endpoint.port)]) + + if endpoint.load_balancer_probe.path or\ + endpoint.load_balancer_probe.port or\ + endpoint.load_balancer_probe.protocol: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Path', endpoint.load_balancer_probe.path), + ('Port', endpoint.load_balancer_probe.port), + ('Protocol', endpoint.load_balancer_probe.protocol)]) + xml += '' + + xml += AzureXmlSerializer.data_to_xml( + [('Protocol', endpoint.protocol), + ('EnableDirectServerReturn', + endpoint.enable_direct_server_return, + _lower)]) + + xml += '' + xml += '' + xml += '' + for name in configuration.subnet_names: + xml += AzureXmlSerializer.data_to_xml([('SubnetName', name)]) + xml += '' + return xml + + @staticmethod + def role_to_xml(availability_set_name, data_virtual_hard_disks, + network_configuration_set, os_virtual_hard_disk, role_name, + role_size, role_type, system_configuration_set): + xml = AzureXmlSerializer.data_to_xml([('RoleName', role_name), + ('RoleType', role_type)]) + + xml += '' + + if system_configuration_set is not None: + xml += '' + if isinstance(system_configuration_set, WindowsConfigurationSet): + xml += AzureXmlSerializer.windows_configuration_to_xml( + system_configuration_set) + elif isinstance(system_configuration_set, LinuxConfigurationSet): + xml += AzureXmlSerializer.linux_configuration_to_xml( + system_configuration_set) + xml += '' + + if network_configuration_set is not None: + xml += '' + xml += AzureXmlSerializer.network_configuration_to_xml( + network_configuration_set) + xml += '' + + xml += '' + + if availability_set_name is not None: + xml += AzureXmlSerializer.data_to_xml( + [('AvailabilitySetName', availability_set_name)]) + + if data_virtual_hard_disks is not None: + xml += '' + for hd in data_virtual_hard_disks: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('HostCaching', hd.host_caching), + ('DiskLabel', hd.disk_label), + ('DiskName', hd.disk_name), + ('Lun', hd.lun), + ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb), + ('MediaLink', hd.media_link)]) + xml += '' + xml += '' + + if os_virtual_hard_disk is not None: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('HostCaching', os_virtual_hard_disk.host_caching), + ('DiskLabel', os_virtual_hard_disk.disk_label), + ('DiskName', os_virtual_hard_disk.disk_name), + ('MediaLink', os_virtual_hard_disk.media_link), + ('SourceImageName', os_virtual_hard_disk.source_image_name)]) + xml += '' + + if role_size is not None: + xml += AzureXmlSerializer.data_to_xml([('RoleSize', role_size)]) + + return xml + + @staticmethod + def add_role_to_xml(role_name, system_configuration_set, + os_virtual_hard_disk, role_type, + network_configuration_set, availability_set_name, + data_virtual_hard_disks, role_size): + xml = AzureXmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + system_configuration_set) + return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) + + @staticmethod + def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, + network_configuration_set, availability_set_name, + data_virtual_hard_disks, role_size): + xml = AzureXmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + None) + return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) + + @staticmethod + def capture_role_to_xml(post_capture_action, target_image_name, + target_image_label, provisioning_configuration): + xml = AzureXmlSerializer.data_to_xml( + [('OperationType', 'CaptureRoleOperation'), + ('PostCaptureAction', post_capture_action)]) + + if provisioning_configuration is not None: + xml += '' + if isinstance(provisioning_configuration, WindowsConfigurationSet): + xml += AzureXmlSerializer.windows_configuration_to_xml( + provisioning_configuration) + elif isinstance(provisioning_configuration, LinuxConfigurationSet): + xml += AzureXmlSerializer.linux_configuration_to_xml( + provisioning_configuration) + xml += '' + + xml += AzureXmlSerializer.data_to_xml( + [('TargetImageLabel', target_image_label), + ('TargetImageName', target_image_name)]) + + return AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml) + + @staticmethod + def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, + label, role_name, + system_configuration_set, + os_virtual_hard_disk, role_type, + network_configuration_set, + availability_set_name, + data_virtual_hard_disks, role_size, + virtual_network_name): + xml = AzureXmlSerializer.data_to_xml([('Name', deployment_name), + ('DeploymentSlot', deployment_slot), + ('Label', label)]) + xml += '' + xml += '' + xml += AzureXmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + system_configuration_set) + xml += '' + xml += '' + + if virtual_network_name is not None: + xml += AzureXmlSerializer.data_to_xml( + [('VirtualNetworkName', virtual_network_name)]) + + return AzureXmlSerializer.doc_from_xml('Deployment', xml) + + @staticmethod + def data_to_xml(data): + '''Creates an xml fragment from the specified data. + data: Array of tuples, where first: xml element name + second: xml element text + third: conversion function + ''' + xml = '' + for element in data: + name = element[0] + val = element[1] + if len(element) > 2: + converter = element[2] + else: + converter = None + + if val is not None: + if converter is not None: + text = _str(converter(_str(val))) + else: + text = _str(val) + + xml += ''.join(['<', name, '>', text, '']) + return xml + + @staticmethod + def doc_from_xml(document_element_name, inner_xml): + '''Wraps the specified xml in an xml root element with default azure + namespaces''' + xml = ''.join(['<', document_element_name, + ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', + ' xmlns="http://schemas.microsoft.com/windowsazure">']) + xml += inner_xml + xml += ''.join(['']) + return xml + + @staticmethod + def doc_from_data(document_element_name, data, extended_properties=None): + xml = AzureXmlSerializer.data_to_xml(data) + if extended_properties is not None: + xml += AzureXmlSerializer.extended_properties_dict_to_xml_fragment( + extended_properties) + return AzureXmlSerializer.doc_from_xml(document_element_name, xml) + + @staticmethod + def extended_properties_dict_to_xml_fragment(extended_properties): + xml = '' + if extended_properties is not None and len(extended_properties) > 0: + xml += '' + for key, val in extended_properties.items(): + xml += ''.join(['', + '', + _str(key), + '', + '', + _str(val), + '', + '']) + xml += '' + return xm + +""" +Data Classes +""" + +class WindowsAzureData(object): + + ''' This is the base of data class. + It is only used to check whether it is instance or not. ''' + pass + +class OSVirtualHardDisk(WindowsAzureData): + + def __init__(self, source_image_name=None, media_link=None, + host_caching=None, disk_label=None, disk_name=None): + self.source_image_name = source_image_name + self.media_link = media_link + self.host_caching = host_caching + self.disk_label = disk_label + self.disk_name = disk_name + self.os = u'' # undocumented, not used when adding a role + +class LinuxConfigurationSet(WindowsAzureData): + + def __init__(self, host_name=None, user_name=None, user_password=None, + disable_ssh_password_authentication=None): + self.configuration_set_type = u'LinuxProvisioningConfiguration' + self.host_name = host_name + self.user_name = user_name + self.user_password = user_password + self.disable_ssh_password_authentication =\ + disable_ssh_password_authentication + self.ssh = SSH() + +class WindowsConfigurationSet(WindowsAzureData): + + def __init__(self, computer_name=None, admin_password=None, + reset_password_on_first_logon=None, + enable_automatic_updates=None, time_zone=None): + self.configuration_set_type = u'WindowsProvisioningConfiguration' + self.computer_name = computer_name + self.admin_password = admin_password + self.reset_password_on_first_logon = reset_password_on_first_logon + self.enable_automatic_updates = enable_automatic_updates + self.time_zone = time_zone + self.domain_join = DomainJoin() + self.stored_certificate_settings = StoredCertificateSettings() + +class SSH(WindowsAzureData): + + def __init__(self): + self.public_keys = PublicKeys() + self.key_pairs = KeyPairs() + +class PublicKeys(WindowsAzureData): + + def __init__(self): + self.public_keys = _list_of(PublicKey) + + def __iter__(self): + return iter(self.public_keys) + + def __len__(self): + return len(self.public_keys) + + def __getitem__(self, index): + return self.public_keys[index] + +class PublicKey(WindowsAzureData): + + def __init__(self, fingerprint=u'', path=u''): + self.fingerprint = fingerprint + self.path = path + +class KeyPairs(WindowsAzureData): + + def __init__(self): + self.key_pairs = _list_of(KeyPair) + + def __iter__(self): + return iter(self.key_pairs) + + def __len__(self): + return len(self.key_pairs) + + def __getitem__(self, index): + return self.key_pairs[index] + +class KeyPair(WindowsAzureData): + + def __init__(self, fingerprint=u'', path=u''): + self.fingerprint = fingerprint + self.path = path + +class LoadBalancerProbe(WindowsAzureData): + + def __init__(self): + self.path = u'' + self.port = u'' + self.protocol = u'' + +class ConfigurationSet(WindowsAzureData): + + def __init__(self): + self.configuration_set_type = u'' + self.role_type = u'' + self.input_endpoints = ConfigurationSetInputEndpoints() + self.subnet_names = _scalar_list_of(str, 'SubnetName') + +class ConfigurationSetInputEndpoints(WindowsAzureData): + + def __init__(self): + self.input_endpoints = _list_of( + ConfigurationSetInputEndpoint, 'InputEndpoint') + + def __iter__(self): + return iter(self.input_endpoints) + + def __len__(self): + return len(self.input_endpoints) + + def __getitem__(self, index): + return self.input_endpoints[index] + +class ConfigurationSetInputEndpoint(WindowsAzureData): + + def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'', + load_balanced_endpoint_set_name=u'', + enable_direct_server_return=False): + self.enable_direct_server_return = enable_direct_server_return + self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name + self.local_port = local_port + self.name = name + self.port = port + self.load_balancer_probe = LoadBalancerProbe() + self.protocol = protocol + +class Locations(WindowsAzureData): + + def __init__(self): + self.locations = _list_of(Location) + + def __iter__(self): + return iter(self.locations) + + def __len__(self): + return len(self.locations) + + def __getitem__(self, index): + return self.locations[index] + +class Location(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.display_name = u'' + self.available_services = _scalar_list_of(str, 'AvailableService') + +class Images(WindowsAzureData): + + def __init__(self): + self.images = _list_of(OSImage) + + def __iter__(self): + return iter(self.images) + + def __len__(self): + return len(self.images) + + def __getitem__(self, index): + return self.images[index] + +class HostedServices(WindowsAzureData): + + def __init__(self): + self.hosted_services = _list_of(HostedService) + + def __iter__(self): + return iter(self.hosted_services) + + def __len__(self): + return len(self.hosted_services) + + def __getitem__(self, index): + return self.hosted_services[index] + +class HostedService(WindowsAzureData): + + def __init__(self): + self.url = u'' + self.service_name = u'' + self.hosted_service_properties = HostedServiceProperties() + self.deployments = Deployments() + +class HostedServiceProperties(WindowsAzureData): + + def __init__(self): + self.description = u'' + self.location = u'' + self.affinity_group = u'' + self.label = _Base64String() + self.status = u'' + self.date_created = u'' + self.date_last_modified = u'' + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') + +class Deployments(WindowsAzureData): + + def __init__(self): + self.deployments = _list_of(Deployment) + + def __iter__(self): + return iter(self.deployments) + + def __len__(self): + return len(self.deployments) + + def __getitem__(self, index): + return self.deployments[index] + +class Deployment(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.deployment_slot = u'' + self.private_id = u'' + self.status = u'' + self.label = _Base64String() + self.url = u'' + self.configuration = _Base64String() + self.role_instance_list = RoleInstanceList() + self.upgrade_status = UpgradeStatus() + self.upgrade_domain_count = u'' + self.role_list = RoleList() + self.sdk_version = u'' + self.input_endpoint_list = InputEndpoints() + self.locked = False + self.rollback_allowed = False + self.persistent_vm_downtime_info = PersistentVMDowntimeInfo() + self.created_time = u'' + self.last_modified_time = u'' + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') + +class RoleInstanceList(WindowsAzureData): + + def __init__(self): + self.role_instances = _list_of(RoleInstance) + + def __iter__(self): + return iter(self.role_instances) + + def __len__(self): + return len(self.role_instances) + + def __getitem__(self, index): + return self.role_instances[index] + +class RoleInstance(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.instance_name = u'' + self.instance_status = u'' + self.instance_upgrade_domain = 0 + self.instance_fault_domain = 0 + self.instance_size = u'' + self.instance_state_details = u'' + self.instance_error_code = u'' + self.ip_address = u'' + self.instance_endpoints = InstanceEndpoints() + self.power_state = u'' + self.fqdn = u'' + self.host_name = u'' + +class InstanceEndpoints(WindowsAzureData): + + def __init__(self): + self.instance_endpoints = _list_of(InstanceEndpoint) + + def __iter__(self): + return iter(self.instance_endpoints) + + def __len__(self): + return len(self.instance_endpoints) + + def __getitem__(self, index): + return self.instance_endpoints[index] + +class InstanceEndpoint(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.vip = u'' + self.public_port = u'' + self.local_port = u'' + self.protocol = u'' + +class InputEndpoints(WindowsAzureData): + + def __init__(self): + self.input_endpoints = _list_of(InputEndpoint) + + def __iter__(self): + return iter(self.input_endpoints) - Connection class for Azure Compute Driver. - """ + def __len__(self): + return len(self.input_endpoints) + def __getitem__(self, index): + return self.input_endpoints[index] - #def __init__(self, user_id, key, secure): - # super(AzureConnection, self).__init__(user_id, key, secure=secure, **kwargs) +class InputEndpoint(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.vip = u'' + self.port = u'' + +class RoleList(WindowsAzureData): + + def __init__(self): + self.roles = _list_of(Role) + + def __iter__(self): + return iter(self.roles) + + def __len__(self): + return len(self.roles) + + def __getitem__(self, index): + return self.roles[index] + +class Role(WindowsAzureData): + + def __init__(self): + self.role_name = u'' + self.os_version = u'' + +class PersistentVMDowntimeInfo(WindowsAzureData): + + def __init__(self): + self.start_time = u'' + self.end_time = u'' + self.status = u'' + +class AsynchronousOperationResult(WindowsAzureData): + + def __init__(self, request_id=None): + self.request_id = request_id + +class AzureHTTPRequest(object): + + '''Represents an HTTP Request. An HTTP Request consists of the following + attributes: + + host: the host name to connect to + method: the method to use to connect (string such as GET, POST, PUT, etc.) + path: the uri fragment + query: query parameters specified as a list of (name, value) pairs + headers: header values specified as (name, value) pairs + body: the body of the request. + protocol_override: + specify to use this protocol instead of the global one stored in + _HTTPClient. + ''' + + def __init__(self): + self.host = '' + self.method = '' + self.path = '' + self.query = [] # list of (name, value) + self.headers = [] # list of (header name, header value) + self.body = '' + self.protocol_override = None + +class AzureHTTPResponse(object): + + """Represents a response from an HTTP request. An HTTPResponse has the + following attributes: + + status: the status code of the response + message: the message + headers: the returned headers, as a list of (name, value) pairs + body: the body of the response + """ + + def __init__(self, status, message, headers, body): + self.status = status + self.message = message + self.headers = headers + self.body = body + +class AzureHTTPError(Exception): + + ''' HTTP Exception when response status code >= 300 ''' + + def __init__(self, status, message, respheader, respbody): + '''Creates a new HTTPError with the specified status, message, + response headers and body''' + self.status = status + self.respheader = respheader + self.respbody = respbody + Exception.__init__(self, message) + +""" +Helper classes. +""" + +class _Base64String(str): + pass + +class _list_of(list): + + """a list which carries with it the type that's expected to go in it. + Used for deserializaion and construction of the lists""" + + def __init__(self, list_type, xml_element_name=None): + self.list_type = list_type + if xml_element_name is None: + self.xml_element_name = list_type.__name__ + else: + self.xml_element_name = xml_element_name + super(_list_of, self).__init__() + +class _scalar_list_of(list): + + """a list of scalar types which carries with it the type that's + expected to go in it along with its xml element name. + Used for deserializaion and construction of the lists""" + + def __init__(self, list_type, xml_element_name): + self.list_type = list_type + self.xml_element_name = xml_element_name + super(_scalar_list_of, self).__init__() + +class _dict_of(dict): + + """a dict which carries with it the xml element names for key,val. + Used for deserializaion and construction of the lists""" + + def __init__(self, pair_xml_element_name, key_xml_element_name, + value_xml_element_name): + self.pair_xml_element_name = pair_xml_element_name + self.key_xml_element_name = key_xml_element_name + self.value_xml_element_name = value_xml_element_name + super(_dict_of, self).__init__() class AzureNodeDriver(NodeDriver): @@ -167,10 +1182,8 @@ def list_images(self): Lists all images :rtype: ``list`` of :class:`NodeImage` - """ - sms = ServiceManagementService(subscription_id, certificate_path) - - data = sms.list_os_images() + """ + data = self._perform_get(self._get_image_path(), Images) return [self._to_image(i) for i in data] @@ -180,9 +1193,7 @@ def list_locations(self): :rtype: ``list`` of :class:`NodeLocation` """ - sms = ServiceManagementService(subscription_id, certificate_path) - - data = sms.list_locations() + data = self._perform_get('/' + subscription_id + '/locations', Locations) return [self._to_location(l) for l in data] @@ -203,9 +1214,10 @@ def list_nodes(self, ex_cloud_service_name=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") - sms = ServiceManagementService(subscription_id, certificate_path) - - data = sms.get_hosted_service_properties(service_name=ex_cloud_service_name,embed_detail=True) + data = self._perform_get( + self._get_hosted_service_path(ex_cloud_service_name) + + '?embed-detail=True', + HostedService) try: return [self._to_node(n) for n in data.deployments[0].role_instance_list] @@ -230,7 +1242,6 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) :rtype: ``list`` of :class:`Node` """ - sms = ServiceManagementService(subscription_id, certificate_path) if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") @@ -239,18 +1250,20 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) ex_deployment_slot = "production" _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + print _deployment_name try: - result = sms.reboot_role_instance( - service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_instance_name=node.id - ) + result = self._perform_post( + self._get_deployment_path_using_name( + ex_cloud_service_name, _deployment_name) + \ + '/roleinstances/' + _str(node.id) + \ + '?comp=reboot', '', async=True) if result.request_id: return True else: return False - except Exception: + except Exception, e: + print e return False def list_volumes(self, node=None): @@ -479,24 +1492,6 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): driver=self.connection.driver ) - #operation_status = self.sms.get_operation_status(result.request_id) - - #timeout = 60 * 5 - #waittime = 0 - #interval = 5 - - #while operation_status.status == "InProgress" and waittime < timeout: - # operation_status = self.sms.get_operation_status(result.request_id) - # if operation_status.status == "Succeeded": - # break - - # waittime += interval - # time.sleep(interval) - - #if operation_status.status == "Failed": - # raise Exception(operation_status.error.message) - #return - def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): """Remove Azure Virtual Machine @@ -583,7 +1578,16 @@ def _to_node(self, data): for port in data.instance_endpoints: if port.name == 'Remote Desktop': remote_desktop_port = port.public_port + else: + remote_desktop_port = [] + + if port.name == "SSH": + ssh_port = port.public_port + else: + ssh_port = [] + # When servers are Linux, this fails due to the remote_desktop_port + # therefore we need to add a check in here. return Node( id=data.instance_name, name=data.instance_name, @@ -593,6 +1597,7 @@ def _to_node(self, data): driver=self.connection.driver, extra={ 'remote_desktop_port': remote_desktop_port, + 'ssh_port': ssh_port, 'power_state': data.power_state, 'instance_size': data.instance_size}) @@ -706,9 +1711,10 @@ def _get_deployment(self, **kwargs): _service_name = kwargs['service_name'] _deployment_slot = kwargs['deployment_slot'] - sms = ServiceManagementService(subscription_id, certificate_path) - - return sms.get_deployment_by_slot(service_name=_service_name,deployment_slot=_deployment_slot) + return self._perform_get( + self._get_deployment_path_using_slot( + _service_name, _deployment_slot), + Deployment) def _get_cloud_service_location(self, service_name=None): @@ -771,4 +1777,417 @@ def _create_storage_account(self, **kwargs): waittime += interval time.sleep(interval) - return \ No newline at end of file + return + + def _perform_get(self, path, response_type): + request = AzureHTTPRequest() + request.method = 'GET' + request.host = azure_service_management_host + request.path = path + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + if response_type is not None: + return self._parse_response(response, response_type) + + return response + + def _perform_post(self, path, body, response_type=None, async=False): + request = AzureHTTPRequest() + request.method = 'POST' + request.host = azure_service_management_host + request.path = path + request.body = self._get_request_body(body) + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + if response_type is not None: + return _parse_response(response, response_type) + + if async: + return self._parse_response_for_async_op(response) + + return None + + def _perform_request(self, request): + + connection = self.get_connection() + + try: + connection.putrequest(request.method, request.path) + + self.send_request_headers(connection, request.headers) + self.send_request_body(connection, request.body) + + resp = connection.getresponse() + status = int(resp.status) + message = resp.reason + respheader = headers = resp.getheaders() + + # for consistency across platforms, make header names lowercase + for i, value in enumerate(headers): + headers[i] = (value[0].lower(), value[1]) + + respbody = None + if resp.length is None: + respbody = resp.read() + elif resp.length > 0: + respbody = resp.read(resp.length) + + response = AzureHTTPResponse( + int(resp.status), resp.reason, headers, respbody) + if status >= 300: + raise AzureHTTPError(status, message, + respheader, respbody) + + return response + + finally: + connection.close() + + def _update_request_uri_query(self, request): + '''pulls the query string out of the URI and moves it into + the query portion of the request object. If there are already + query parameters on the request the parameters in the URI will + appear after the existing parameters''' + + if '?' in request.path: + request.path, _, query_string = request.path.partition('?') + if query_string: + query_params = query_string.split('&') + for query in query_params: + if '=' in query: + name, _, value = query.partition('=') + request.query.append((name, value)) + + request.path = url_quote(request.path, '/()$=\',') + + # add encoded queries to request.path. + if request.query: + request.path += '?' + for name, value in request.query: + if value is not None: + request.path += name + '=' + url_quote(value, '/()$=\',') + '&' + request.path = request.path[:-1] + + return request.path, request.query + + def _update_management_header(self, request): + ''' Add additional headers for management. ''' + + if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: + request.headers.append(('Content-Length', str(len(request.body)))) + + # append additional headers base on the service + request.headers.append(('x-ms-version', X_MS_VERSION)) + + # if it is not GET or HEAD request, must set content-type. + if not request.method in ['GET', 'HEAD']: + for name, _ in request.headers: + if 'content-type' == name.lower(): + break + else: + request.headers.append( + ('Content-Type', + 'application/atom+xml;type=entry;charset=utf-8')) + + return request.headers + + def send_request_headers(self, connection, request_headers): + for name, value in request_headers: + if value: + connection.putheader(name, value) + + connection.putheader('User-Agent', _USER_AGENT_STRING) + connection.endheaders() + + def send_request_body(self, connection, request_body): + if request_body: + assert isinstance(request_body, bytes) + connection.send(request_body) + elif (not isinstance(connection, HTTPSConnection) and + not isinstance(connection, HTTPConnection)): + connection.send(None) + + def _parse_response(self, response, return_type): + ''' + Parse the HTTPResponse's body and fill all the data into a class of + return_type. + ''' + return self._parse_response_body_from_xml_text(response.body, return_type) + + def _parse_response_body_from_xml_text(self, respbody, return_type): + ''' + parse the xml and fill all the data into a class of return_type + ''' + doc = minidom.parseString(respbody) + return_obj = return_type() + for node in self._get_child_nodes(doc, return_type.__name__): + self._fill_data_to_return_object(node, return_obj) + + return return_obj + + def _get_child_nodes(self, node, tagName): + return [childNode for childNode in node.getElementsByTagName(tagName) + if childNode.parentNode == node] + + def _fill_data_to_return_object(self, node, return_obj): + members = dict(vars(return_obj)) + for name, value in members.items(): + if isinstance(value, _list_of): + setattr(return_obj, + name, + self._fill_list_of(node, + value.list_type, + value.xml_element_name)) + elif isinstance(value, _scalar_list_of): + setattr(return_obj, + name, + self._fill_scalar_list_of(node, + value.list_type, + self._get_serialization_name(name), + value.xml_element_name)) + elif isinstance(value, _dict_of): + setattr(return_obj, + name, + self._fill_dict_of(node, + self._get_serialization_name(name), + value.pair_xml_element_name, + value.key_xml_element_name, + value.value_xml_element_name)) + elif isinstance(value, WindowsAzureData): + setattr(return_obj, + name, + self._fill_instance_child(node, name, value.__class__)) + elif isinstance(value, dict): + setattr(return_obj, + name, + self._fill_dict(node, self._get_serialization_name(name))) + elif isinstance(value, _Base64String): + value = self._fill_data_minidom(node, name, '') + if value is not None: + value = self._decode_base64_to_text(value) + # always set the attribute, so we don't end up returning an object + # with type _Base64String + setattr(return_obj, name, value) + else: + value = self._fill_data_minidom(node, name, value) + if value is not None: + setattr(return_obj, name, value) + + def _fill_list_of(self, xmldoc, element_type, xml_element_name): + xmlelements = self._get_child_nodes(xmldoc, xml_element_name) + return [self._parse_response_body_from_xml_node(xmlelement, element_type) \ + for xmlelement in xmlelements] + + def _parse_response_body_from_xml_node(self, node, return_type): + ''' + parse the xml and fill all the data into a class of return_type + ''' + return_obj = return_type() + self._fill_data_to_return_object(node, return_obj) + + return return_obj + + def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, + xml_element_name): + xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: + xmlelements = self._get_child_nodes(xmlelements[0], xml_element_name) + return [self._get_node_value(xmlelement, element_type) \ + for xmlelement in xmlelements] + + def _get_node_value(self, xmlelement, data_type): + value = xmlelement.firstChild.nodeValue + if data_type is datetime: + return _to_datetime(value) + elif data_type is bool: + return value.lower() != 'false' + else: + return data_type(value) + + def _get_serialization_name(self,element_name): + """converts a Python name into a serializable name""" + known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) + if known is not None: + return known + + if element_name.startswith('x_ms_'): + return element_name.replace('_', '-') + if element_name.endswith('_id'): + element_name = element_name.replace('_id', 'ID') + for name in ['content_', 'last_modified', 'if_', 'cache_control']: + if element_name.startswith(name): + element_name = element_name.replace('_', '-_') + + return ''.join(name.capitalize() for name in element_name.split('_')) + + def _fill_dict_of(self, xmldoc, parent_xml_element_name, pair_xml_element_name, + key_xml_element_name, value_xml_element_name): + return_obj = {} + + xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: + xmlelements = self._get_child_nodes(xmlelements[0], pair_xml_element_name) + for pair in xmlelements: + keys = self._get_child_nodes(pair, key_xml_element_name) + values = self._get_child_nodes(pair, value_xml_element_name) + if keys and values: + key = keys[0].firstChild.nodeValue + value = values[0].firstChild.nodeValue + return_obj[key] = value + + return return_obj + + def _fill_instance_child(self, xmldoc, element_name, return_type): + '''Converts a child of the current dom element to the specified type. + ''' + xmlelements = self._get_child_nodes( + xmldoc, self._get_serialization_name(element_name)) + + if not xmlelements: + return None + + return_obj = return_type() + self._fill_data_to_return_object(xmlelements[0], return_obj) + + return return_obj + + def _fill_dict(self, xmldoc, element_name): + xmlelements = self._get_child_nodes(xmldoc, element_name) + if xmlelements: + return_obj = {} + for child in xmlelements[0].childNodes: + if child.firstChild: + return_obj[child.nodeName] = child.firstChild.nodeValue + return return_obj + + def _encode_base64(dself, ata): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') + + def _decode_base64_to_bytes(self, data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + return base64.b64decode(data) + + def _decode_base64_to_text(self, data): + decoded_bytes = self._decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') + + def _fill_data_minidom(self, xmldoc, element_name, data_member): + xmlelements = self._get_child_nodes( + xmldoc, self._get_serialization_name(element_name)) + + if not xmlelements or not xmlelements[0].childNodes: + return None + + value = xmlelements[0].firstChild.nodeValue + + if data_member is None: + return value + elif isinstance(data_member, datetime): + return self._to_datetime(value) + elif type(data_member) is bool: + return value.lower() != 'false' + else: + return type(data_member)(value) + + def _to_datetime(self, strtime): + return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f") + + def _get_request_body(self, request_body): + if request_body is None: + return b'' + + if isinstance(request_body, WindowsAzureData): + request_body = _convert_class_to_xml(request_body) + + if isinstance(request_body, bytes): + return request_body + + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') + + request_body = str(request_body) + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') + + return request_body + + def _convert_class_to_xml(self, source, xml_prefix=True): + if source is None: + return '' + + xmlstr = '' + if xml_prefix: + xmlstr = '' + + if isinstance(source, list): + for value in source: + xmlstr += _convert_class_to_xml(value, False) + elif isinstance(source, WindowsAzureData): + class_name = source.__class__.__name__ + xmlstr += '<' + class_name + '>' + for name, value in vars(source).items(): + if value is not None: + if isinstance(value, list) or \ + isinstance(value, WindowsAzureData): + xmlstr += _convert_class_to_xml(value, False) + else: + xmlstr += ('<' + self._get_serialization_name(name) + '>' + + xml_escape(str(value)) + '') + xmlstr += '' + return xmlstr + + def _parse_response_for_async_op(self, response): + if response is None: + return None + + result = AsynchronousOperationResult() + if response.headers: + for name, value in response.headers: + if name.lower() == 'x-ms-request-id': + result.request_id = value + + return result + + def _get_deployment_path_using_name(self, service_name, + deployment_name=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments', deployment_name) + + def _get_path(self, resource, name): + path = '/' + subscription_id + '/' + resource + if name is not None: + path += '/' + _str(name) + return path + + def _lower(self, text): + return text.lower() + + def _get_image_path(self, image_name=None): + return self._get_path('services/images', image_name) + + def _get_hosted_service_path(self, service_name=None): + return self._get_path('services/hostedservices', service_name) + + def _get_deployment_path_using_slot(self, service_name, slot=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deploymentslots', slot) + + def get_connection(self): + certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" + port = HTTPS_PORT + + connection = HTTPSConnection( + azure_service_management_host, + int(port), + cert_file=certificate_path) + + return connection \ No newline at end of file From 16dfe96bc1c23f6ea73dd3bd8ac8f553fb59665e Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 20 May 2014 12:18:22 -0700 Subject: [PATCH 278/315] Re-factored list_volumes. --- libcloud/compute/drivers/azure_compute.py | 3262 +++++++++++---------- 1 file changed, 1634 insertions(+), 1628 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index f0a3e4427c..75b7e7fe5e 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -188,2006 +188,2012 @@ class AzureConnection(ConnectionUserAndKey): Connection class for Azure Compute Driver. """ -""" -XML Serializer -""" -class AzureXmlSerializer(): +class AzureNodeDriver(NodeDriver): + + _instance_types = AZURE_COMPUTE_INSTANCE_TYPES + _blob_url = ".blob.core.windows.net" + features = {'create_node': ['password']} + service_location = collections.namedtuple('service_location',['is_affinity_group', 'service_location']) + sms = ServiceManagementService(subscription_id, certificate_path) - @staticmethod - def create_storage_service_input_to_xml(service_name, description, label, - affinity_group, location, - geo_replication_enabled, - extended_properties): - return AzureXmlSerializer.doc_from_data( - 'CreateStorageServiceInput', - [('ServiceName', service_name), - ('Description', description), - ('Label', label, _encode_base64), - ('AffinityGroup', affinity_group), - ('Location', location), - ('GeoReplicationEnabled', geo_replication_enabled, _lower)], - extended_properties) + def list_sizes(self): + """ + Lists all sizes - @staticmethod - def update_storage_service_input_to_xml(description, label, - geo_replication_enabled, - extended_properties): - return AzureXmlSerializer.doc_from_data( - 'UpdateStorageServiceInput', - [('Description', description), - ('Label', label, _encode_base64), - ('GeoReplicationEnabled', geo_replication_enabled, _lower)], - extended_properties) + :rtype: ``list`` of :class:`NodeSize` + """ + sizes = [] - @staticmethod - def regenerate_keys_to_xml(key_type): - return AzureXmlSerializer.doc_from_data('RegenerateKeys', - [('KeyType', key_type)]) + for key, values in self._instance_types.items(): + node_size = self._to_node_size(copy.deepcopy(values)) + sizes.append(node_size) - @staticmethod - def update_hosted_service_to_xml(label, description, extended_properties): - return AzureXmlSerializer.doc_from_data('UpdateHostedService', - [('Label', label, _encode_base64), - ('Description', description)], - extended_properties) + return sizes - @staticmethod - def create_hosted_service_to_xml(service_name, label, description, - location, affinity_group, - extended_properties): - return AzureXmlSerializer.doc_from_data( - 'CreateHostedService', - [('ServiceName', service_name), - ('Label', label, _encode_base64), - ('Description', description), - ('Location', location), - ('AffinityGroup', affinity_group)], - extended_properties) + def list_images(self): + """ + Lists all images - @staticmethod - def create_deployment_to_xml(name, package_url, label, configuration, - start_deployment, treat_warnings_as_error, - extended_properties): - return AzureXmlSerializer.doc_from_data( - 'CreateDeployment', - [('Name', name), - ('PackageUrl', package_url), - ('Label', label, _encode_base64), - ('Configuration', configuration), - ('StartDeployment', - start_deployment, _lower), - ('TreatWarningsAsError', treat_warnings_as_error, _lower)], - extended_properties) + :rtype: ``list`` of :class:`NodeImage` + """ + data = self._perform_get(self._get_image_path(), Images) - @staticmethod - def swap_deployment_to_xml(production, source_deployment): - return AzureXmlSerializer.doc_from_data( - 'Swap', - [('Production', production), - ('SourceDeployment', source_deployment)]) + return [self._to_image(i) for i in data] - @staticmethod - def update_deployment_status_to_xml(status): - return AzureXmlSerializer.doc_from_data( - 'UpdateDeploymentStatus', - [('Status', status)]) + def list_locations(self): + """ + Lists all locations - @staticmethod - def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, - extended_properties): - return AzureXmlSerializer.doc_from_data( - 'ChangeConfiguration', - [('Configuration', configuration), - ('TreatWarningsAsError', treat_warnings_as_error, _lower), - ('Mode', mode)], - extended_properties) + :rtype: ``list`` of :class:`NodeLocation` + """ + data = self._perform_get('/' + subscription_id + '/locations', Locations) - @staticmethod - def upgrade_deployment_to_xml(mode, package_url, configuration, label, - role_to_upgrade, force, extended_properties): - return AzureXmlSerializer.doc_from_data( - 'UpgradeDeployment', - [('Mode', mode), - ('PackageUrl', package_url), - ('Configuration', configuration), - ('Label', label, _encode_base64), - ('RoleToUpgrade', role_to_upgrade), - ('Force', force, _lower)], - extended_properties) + return [self._to_location(l) for l in data] - @staticmethod - def rollback_upgrade_to_xml(mode, force): - return AzureXmlSerializer.doc_from_data( - 'RollbackUpdateOrUpgrade', - [('Mode', mode), - ('Force', force, _lower)]) + def list_nodes(self, ex_cloud_service_name=None): + """ + List all nodes - @staticmethod - def walk_upgrade_domain_to_xml(upgrade_domain): - return AzureXmlSerializer.doc_from_data( - 'WalkUpgradeDomain', - [('UpgradeDomain', upgrade_domain)]) + ex_cloud_service_name parameter is used to scope the request + to a specific Cloud Service. This is a required parameter as + nodes cannot exist outside of a Cloud Service nor be shared + between a Cloud Service within Azure. - @staticmethod - def certificate_file_to_xml(data, certificate_format, password): - return AzureXmlSerializer.doc_from_data( - 'CertificateFile', - [('Data', data), - ('CertificateFormat', certificate_format), - ('Password', password)]) + :param ex_cloud_service_name: Cloud Service name + :type ex_cloud_service_name: ``str`` - @staticmethod - def create_affinity_group_to_xml(name, label, description, location): - return AzureXmlSerializer.doc_from_data( - 'CreateAffinityGroup', - [('Name', name), - ('Label', label, _encode_base64), - ('Description', description), - ('Location', location)]) + :rtype: ``list`` of :class:`Node` + """ + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") - @staticmethod - def update_affinity_group_to_xml(label, description): - return AzureXmlSerializer.doc_from_data( - 'UpdateAffinityGroup', - [('Label', label, _encode_base64), - ('Description', description)]) + data = self._perform_get( + self._get_hosted_service_path(ex_cloud_service_name) + + '?embed-detail=True', + HostedService) - @staticmethod - def subscription_certificate_to_xml(public_key, thumbprint, data): - return AzureXmlSerializer.doc_from_data( - 'SubscriptionCertificate', - [('SubscriptionCertificatePublicKey', public_key), - ('SubscriptionCertificateThumbprint', thumbprint), - ('SubscriptionCertificateData', data)]) + try: + return [self._to_node(n) for n in data.deployments[0].role_instance_list] + except IndexError: + return None - @staticmethod - def os_image_to_xml(label, media_link, name, os): - return AzureXmlSerializer.doc_from_data( - 'OSImage', - [('Label', label), - ('MediaLink', media_link), - ('Name', name), - ('OS', os)]) + def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): + """ + Reboots a node. - @staticmethod - def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, - logical_disk_size_in_gb, media_link, - source_media_link): - return AzureXmlSerializer.doc_from_data( - 'DataVirtualHardDisk', - [('HostCaching', host_caching), - ('DiskLabel', disk_label), - ('DiskName', disk_name), - ('Lun', lun), - ('LogicalDiskSizeInGB', logical_disk_size_in_gb), - ('MediaLink', media_link), - ('SourceMediaLink', source_media_link)]) + ex_cloud_service_name parameter is used to scope the request + to a specific Cloud Service. This is a required parameter as + nodes cannot exist outside of a Cloud Service nor be shared + between a Cloud Service within Azure. - @staticmethod - def disk_to_xml(has_operating_system, label, media_link, name, os): - return AzureXmlSerializer.doc_from_data( - 'Disk', - [('HasOperatingSystem', has_operating_system, _lower), - ('Label', label), - ('MediaLink', media_link), - ('Name', name), - ('OS', os)]) + :param ex_cloud_service_name: Cloud Service name + :type ex_cloud_service_name: ``str`` - @staticmethod - def restart_role_operation_to_xml(): - return AzureXmlSerializer.doc_from_xml( - 'RestartRoleOperation', - 'RestartRoleOperation') + :param ex_deployment_name: Options are "production" (default) + or "Staging". (Optional) + :type ex_deployment_name: ``str`` - @staticmethod - def shutdown_role_operation_to_xml(): - return AzureXmlSerializer.doc_from_xml( - 'ShutdownRoleOperation', - 'ShutdownRoleOperation') + :rtype: ``list`` of :class:`Node` + """ - @staticmethod - def start_role_operation_to_xml(): - return AzureXmlSerializer.doc_from_xml( - 'StartRoleOperation', - 'StartRoleOperation') + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") - @staticmethod - def windows_configuration_to_xml(configuration): - xml = AzureXmlSerializer.data_to_xml( - [('ConfigurationSetType', configuration.configuration_set_type), - ('ComputerName', configuration.computer_name), - ('AdminPassword', configuration.admin_password), - ('ResetPasswordOnFirstLogon', - configuration.reset_password_on_first_logon, - _lower), - ('EnableAutomaticUpdates', - configuration.enable_automatic_updates, - _lower), - ('TimeZone', configuration.time_zone)]) + if not ex_deployment_slot: + ex_deployment_slot = "production" - if configuration.domain_join is not None: - xml += '' - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('Domain', configuration.domain_join.credentials.domain), - ('Username', configuration.domain_join.credentials.username), - ('Password', configuration.domain_join.credentials.password)]) - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('JoinDomain', configuration.domain_join.join_domain), - ('MachineObjectOU', - configuration.domain_join.machine_object_ou)]) - xml += '' - if configuration.stored_certificate_settings is not None: - xml += '' - for cert in configuration.stored_certificate_settings: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('StoreLocation', cert.store_location), - ('StoreName', cert.store_name), - ('Thumbprint', cert.thumbprint)]) - xml += '' - xml += '' - return xml + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + print _deployment_name - @staticmethod - def linux_configuration_to_xml(configuration): - xml = AzureXmlSerializer.data_to_xml( - [('ConfigurationSetType', configuration.configuration_set_type), - ('HostName', configuration.host_name), - ('UserName', configuration.user_name), - ('UserPassword', configuration.user_password), - ('DisableSshPasswordAuthentication', - configuration.disable_ssh_password_authentication, - _lower)]) + try: + result = self._perform_post( + self._get_deployment_path_using_name( + ex_cloud_service_name, _deployment_name) + \ + '/roleinstances/' + _str(node.id) + \ + '?comp=reboot', '', async=True) + if result.request_id: + return True + else: + return False + except Exception, e: + return False - if configuration.ssh is not None: - xml += '' - xml += '' - for key in configuration.ssh.public_keys: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('Fingerprint', key.fingerprint), - ('Path', key.path)]) - xml += '' - xml += '' - xml += '' - for key in configuration.ssh.key_pairs: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('Fingerprint', key.fingerprint), - ('Path', key.path)]) - xml += '' - xml += '' - xml += '' - return xml + def list_volumes(self, node=None): + """ + Lists volumes of the disks in the image repository that are + associated with the specificed subscription. - @staticmethod - def network_configuration_to_xml(configuration): - xml = AzureXmlSerializer.data_to_xml( - [('ConfigurationSetType', configuration.configuration_set_type)]) - xml += '' - for endpoint in configuration.input_endpoints: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('LoadBalancedEndpointSetName', - endpoint.load_balanced_endpoint_set_name), - ('LocalPort', endpoint.local_port), - ('Name', endpoint.name), - ('Port', endpoint.port)]) + Pass Node object to scope the list of volumes to a single + instance. - if endpoint.load_balancer_probe.path or\ - endpoint.load_balancer_probe.port or\ - endpoint.load_balancer_probe.protocol: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('Path', endpoint.load_balancer_probe.path), - ('Port', endpoint.load_balancer_probe.port), - ('Protocol', endpoint.load_balancer_probe.protocol)]) - xml += '' + :rtype: ``list`` of :class:`StorageVolume` + """ - xml += AzureXmlSerializer.data_to_xml( - [('Protocol', endpoint.protocol), - ('EnableDirectServerReturn', - endpoint.enable_direct_server_return, - _lower)]) + data = self._perform_get(self._get_disk_path(), Disks) - xml += '' - xml += '' - xml += '' - for name in configuration.subnet_names: - xml += AzureXmlSerializer.data_to_xml([('SubnetName', name)]) - xml += '' - return xml + volumes = [self._to_volume(volume=v,node=node) for v in data] - @staticmethod - def role_to_xml(availability_set_name, data_virtual_hard_disks, - network_configuration_set, os_virtual_hard_disk, role_name, - role_size, role_type, system_configuration_set): - xml = AzureXmlSerializer.data_to_xml([('RoleName', role_name), - ('RoleType', role_type)]) + return volumes - xml += '' + def create_node(self, ex_cloud_service_name=None, **kwargs): + """Create Azure Virtual Machine - if system_configuration_set is not None: - xml += '' - if isinstance(system_configuration_set, WindowsConfigurationSet): - xml += AzureXmlSerializer.windows_configuration_to_xml( - system_configuration_set) - elif isinstance(system_configuration_set, LinuxConfigurationSet): - xml += AzureXmlSerializer.linux_configuration_to_xml( - system_configuration_set) - xml += '' + Reference: http://bit.ly/1fIsCb7 [www.windowsazure.com/en-us/documentation/] - if network_configuration_set is not None: - xml += '' - xml += AzureXmlSerializer.network_configuration_to_xml( - network_configuration_set) - xml += '' + We default to: - xml += '' + + 3389/TCP - RDP - 1st Microsoft instance. + + RANDOM/TCP - RDP - All succeeding Microsoft instances. - if availability_set_name is not None: - xml += AzureXmlSerializer.data_to_xml( - [('AvailabilitySetName', availability_set_name)]) + + 22/TCP - SSH - 1st Linux instance + + RANDOM/TCP - SSH - All succeeding Linux instances. - if data_virtual_hard_disks is not None: - xml += '' - for hd in data_virtual_hard_disks: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('HostCaching', hd.host_caching), - ('DiskLabel', hd.disk_label), - ('DiskName', hd.disk_name), - ('Lun', hd.lun), - ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb), - ('MediaLink', hd.media_link)]) - xml += '' - xml += '' + The above replicates the standard behavior of the Azure UI. + You can retrieve the assigned ports to each instance by + using the following private function: - if os_virtual_hard_disk is not None: - xml += '' - xml += AzureXmlSerializer.data_to_xml( - [('HostCaching', os_virtual_hard_disk.host_caching), - ('DiskLabel', os_virtual_hard_disk.disk_label), - ('DiskName', os_virtual_hard_disk.disk_name), - ('MediaLink', os_virtual_hard_disk.media_link), - ('SourceImageName', os_virtual_hard_disk.source_image_name)]) - xml += '' + _get_endpoint_ports(service_name) + Returns public,private port key pair. - if role_size is not None: - xml += AzureXmlSerializer.data_to_xml([('RoleSize', role_size)]) + @inherits: :class:`NodeDriver.create_node` - return xml + :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. + :type ex_cloud_service_name: ``str`` - @staticmethod - def add_role_to_xml(role_name, system_configuration_set, - os_virtual_hard_disk, role_type, - network_configuration_set, availability_set_name, - data_virtual_hard_disks, role_size): - xml = AzureXmlSerializer.role_to_xml( - availability_set_name, - data_virtual_hard_disks, - network_configuration_set, - os_virtual_hard_disk, - role_name, - role_size, - role_type, - system_configuration_set) - return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) + :keyword ex_storage_service_name: Optional: Name of the Azure Storage Service. + :type ex_cloud_service_name: ``str`` - @staticmethod - def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, - network_configuration_set, availability_set_name, - data_virtual_hard_disks, role_size): - xml = AzureXmlSerializer.role_to_xml( - availability_set_name, - data_virtual_hard_disks, - network_configuration_set, - os_virtual_hard_disk, - role_name, - role_size, - role_type, - None) - return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) + :keyword ex_deployment_name: Optional. The name of the deployment. + If this is not passed in we default to + using the Cloud Service name. + :type ex_deployment_name: ``str`` - @staticmethod - def capture_role_to_xml(post_capture_action, target_image_name, - target_image_label, provisioning_configuration): - xml = AzureXmlSerializer.data_to_xml( - [('OperationType', 'CaptureRoleOperation'), - ('PostCaptureAction', post_capture_action)]) + :keyword ex_deployment_slot: Optional: Valid values: production|staging. + Defaults to production. + :type ex_cloud_service_name: ``str`` - if provisioning_configuration is not None: - xml += '' - if isinstance(provisioning_configuration, WindowsConfigurationSet): - xml += AzureXmlSerializer.windows_configuration_to_xml( - provisioning_configuration) - elif isinstance(provisioning_configuration, LinuxConfigurationSet): - xml += AzureXmlSerializer.linux_configuration_to_xml( - provisioning_configuration) - xml += '' + :keyword ex_linux_user_id: Optional. Defaults to 'azureuser'. + :type ex_cloud_service_name: ``str`` - xml += AzureXmlSerializer.data_to_xml( - [('TargetImageLabel', target_image_label), - ('TargetImageName', target_image_name)]) + """ + name = kwargs['name'] + size = kwargs['size'] + image = kwargs['image'] - return AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml) + password = None + auth = self._get_and_check_auth(kwargs["auth"]) + password = auth.password - @staticmethod - def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, - label, role_name, - system_configuration_set, - os_virtual_hard_disk, role_type, - network_configuration_set, - availability_set_name, - data_virtual_hard_disks, role_size, - virtual_network_name): - xml = AzureXmlSerializer.data_to_xml([('Name', deployment_name), - ('DeploymentSlot', deployment_slot), - ('Label', label)]) - xml += '' - xml += '' - xml += AzureXmlSerializer.role_to_xml( - availability_set_name, - data_virtual_hard_disks, - network_configuration_set, - os_virtual_hard_disk, - role_name, - role_size, - role_type, - system_configuration_set) - xml += '' - xml += '' - - if virtual_network_name is not None: - xml += AzureXmlSerializer.data_to_xml( - [('VirtualNetworkName', virtual_network_name)]) - - return AzureXmlSerializer.doc_from_xml('Deployment', xml) + sms = ServiceManagementService(subscription_id, certificate_path) - @staticmethod - def data_to_xml(data): - '''Creates an xml fragment from the specified data. - data: Array of tuples, where first: xml element name - second: xml element text - third: conversion function - ''' - xml = '' - for element in data: - name = element[0] - val = element[1] - if len(element) > 2: - converter = element[2] - else: - converter = None + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") - if val is not None: - if converter is not None: - text = _str(converter(_str(val))) - else: - text = _str(val) + if "ex_deployment_slot" in kwargs: + ex_deployment_slot = kwargs['ex_deployment_slot'] + else: + ex_deployment_slot = "production" # We assume production if this is not provided. - xml += ''.join(['<', name, '>', text, '']) - return xml + if "ex_linux_user_id" in kwargs: + ex_linux_user_id = kwargs['ex_linux_user_id'] + else: + # This mimics the Azure UI behavior. + ex_linux_user_id = "azureuser" - @staticmethod - def doc_from_xml(document_element_name, inner_xml): - '''Wraps the specified xml in an xml root element with default azure - namespaces''' - xml = ''.join(['<', document_element_name, - ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', - ' xmlns="http://schemas.microsoft.com/windowsazure">']) - xml += inner_xml - xml += ''.join(['']) - return xml + node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) + network_config = ConfigurationSet() + network_config.configuration_set_type = 'NetworkConfiguration' - @staticmethod - def doc_from_data(document_element_name, data, extended_properties=None): - xml = AzureXmlSerializer.data_to_xml(data) - if extended_properties is not None: - xml += AzureXmlSerializer.extended_properties_dict_to_xml_fragment( - extended_properties) - return AzureXmlSerializer.doc_from_xml(document_element_name, xml) + # We do this because we need to pass a Configuration to the + # method. This will be either Linux or Windows. + if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", image, re.I): + machine_config = WindowsConfigurationSet(name, password) + machine_config.domain_join = None - @staticmethod - def extended_properties_dict_to_xml_fragment(extended_properties): - xml = '' - if extended_properties is not None and len(extended_properties) > 0: - xml += '' - for key, val in extended_properties.items(): - xml += ''.join(['', - '', - _str(key), - '', - '', - _str(val), - '', - '']) - xml += '' - return xm + if node_list is None: + port = "3389" + else: + port = random.randint(41952,65535) + endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) -""" -Data Classes -""" + for instances in endpoints.role_instance_list: + ports = [] + for ep in instances.instance_endpoints: + ports += [ep.public_port] -class WindowsAzureData(object): + while port in ports: + port = random.randint(41952,65535) - ''' This is the base of data class. - It is only used to check whether it is instance or not. ''' - pass + endpoint = ConfigurationSetInputEndpoint( + name='Remote Desktop', + protocol='tcp', + port=port, + local_port='3389', + load_balanced_endpoint_set_name=None, + enable_direct_server_return=False + ) + else: + if node_list is None: + port = "22" + else: + port = random.randint(41952,65535) + endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) -class OSVirtualHardDisk(WindowsAzureData): + for instances in endpoints.role_instance_list: + ports = [] + for ep in instances.instance_endpoints: + ports += [ep.public_port] - def __init__(self, source_image_name=None, media_link=None, - host_caching=None, disk_label=None, disk_name=None): - self.source_image_name = source_image_name - self.media_link = media_link - self.host_caching = host_caching - self.disk_label = disk_label - self.disk_name = disk_name - self.os = u'' # undocumented, not used when adding a role + while port in ports: + port = random.randint(41952,65535) -class LinuxConfigurationSet(WindowsAzureData): + endpoint = ConfigurationSetInputEndpoint( + name='SSH', + protocol='tcp', + port=port, + local_port='22', + load_balanced_endpoint_set_name=None, + enable_direct_server_return=False + ) + machine_config = LinuxConfigurationSet(name, ex_linux_user_id, password, False) - def __init__(self, host_name=None, user_name=None, user_password=None, - disable_ssh_password_authentication=None): - self.configuration_set_type = u'LinuxProvisioningConfiguration' - self.host_name = host_name - self.user_name = user_name - self.user_password = user_password - self.disable_ssh_password_authentication =\ - disable_ssh_password_authentication - self.ssh = SSH() + network_config.input_endpoints.input_endpoints.append(endpoint) -class WindowsConfigurationSet(WindowsAzureData): + _storage_location = self._get_cloud_service_location(service_name=ex_cloud_service_name) + + # OK, bit annoying here. You must create a deployment before + # you can create an instance; however, the deployment function + # creates the first instance, but all subsequent instances + # must be created using the add_role function. + # + # So, yeah, annoying. + if node_list is None: + # This is the first node in this cloud service. + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + ex_storage_service_name = ex_cloud_service_name + ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) + if self._is_storage_service_unique(service_name=ex_storage_service_name): + self._create_storage_account( + service_name=ex_storage_service_name, + location=_storage_location.service_location, + is_affinity_group=_storage_location.is_affinity_group + ) - def __init__(self, computer_name=None, admin_password=None, - reset_password_on_first_logon=None, - enable_automatic_updates=None, time_zone=None): - self.configuration_set_type = u'WindowsProvisioningConfiguration' - self.computer_name = computer_name - self.admin_password = admin_password - self.reset_password_on_first_logon = reset_password_on_first_logon - self.enable_automatic_updates = enable_automatic_updates - self.time_zone = time_zone - self.domain_join = DomainJoin() - self.stored_certificate_settings = StoredCertificateSettings() + if "ex_deployment_name" in kwargs: + ex_deployment_name = kwargs['ex_deployment_name'] + else: + ex_deployment_name = ex_cloud_service_name -class SSH(WindowsAzureData): + blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. + media_link = blob_url + "/vhds/" + disk_name + disk_config = OSVirtualHardDisk(image, media_link) + + result = sms.create_virtual_machine_deployment( + service_name=ex_cloud_service_name, + deployment_name=ex_deployment_name, + deployment_slot=ex_deployment_slot, + label=name, + role_name=name, + system_config=machine_config, + os_virtual_hard_disk=disk_config, + network_config=network_config, + role_size=size + ) + else: + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name - def __init__(self): - self.public_keys = PublicKeys() - self.key_pairs = KeyPairs() + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + ex_storage_service_name = ex_cloud_service_name + ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) + + if self._is_storage_service_unique(service_name=ex_storage_service_name): + self._create_storage_account( + service_name=ex_storage_service_name, + location=_storage_location.service_location, + is_affinity_group=_storage_location.is_affinity_group + ) -class PublicKeys(WindowsAzureData): + blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + media_link = blob_url + "/vhds/" + disk_name + disk_config = OSVirtualHardDisk(image, media_link) - def __init__(self): - self.public_keys = _list_of(PublicKey) + result = self.sms.add_role( + service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_name=name, + system_config=machine_config, + os_virtual_hard_disk=disk_config, + network_config=network_config, + role_size=size + ) - def __iter__(self): - return iter(self.public_keys) + return Node( + id=name, + name=name, + state=NodeState.PENDING, + public_ips=[], + private_ips=[], + driver=self.connection.driver + ) - def __len__(self): - return len(self.public_keys) + def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): + """Remove Azure Virtual Machine - def __getitem__(self, index): - return self.public_keys[index] + This removes the instance, but does not + remove the disk. You will need to use destroy_volume. + Azure sometimes has an issue where it will hold onto + a blob lease for an extended amount of time. -class PublicKey(WindowsAzureData): + :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. + :type ex_cloud_service_name: ``str`` - def __init__(self, fingerprint=u'', path=u''): - self.fingerprint = fingerprint - self.path = path + :keyword ex_deployment_slot: Optional: The name of the deployment + slot. If this is not passed in we + default to production. + :type ex_deployment_slot: ``str`` + """ -class KeyPairs(WindowsAzureData): + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") - def __init__(self): - self.key_pairs = _list_of(KeyPair) + if not ex_deployment_slot: + ex_deployment_slot = "production" - def __iter__(self): - return iter(self.key_pairs) + _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + _deployment_name = _deployment.name - def __len__(self): - return len(self.key_pairs) + _server_deployment_count = len(_deployment.role_instance_list) - def __getitem__(self, index): - return self.key_pairs[index] + sms = ServiceManagementService(subscription_id, certificate_path) -class KeyPair(WindowsAzureData): + try: + if _server_deployment_count > 1: + data = sms.delete_role(service_name=ex_cloud_service_name, + deployment_name=_deployment_name, + role_name=node.id, + delete_attached_disks=True) + return True + else: + data = sms.delete_deployment(service_name=ex_cloud_service_name,deployment_name=_deployment_name,delete_attached_disks=True) + return True + except Exception: + return False - def __init__(self, fingerprint=u'', path=u''): - self.fingerprint = fingerprint - self.path = path + """ Functions not implemented + """ + def create_volume_snapshot(self): + raise NotImplementedError( + 'You cannot create snapshots of ' + 'Azure VMs at this time.') -class LoadBalancerProbe(WindowsAzureData): + def attach_volume(self): + raise NotImplementedError( + 'attach_volume is not supported ' + 'at this time.') - def __init__(self): - self.path = u'' - self.port = u'' - self.protocol = u'' + def create_volume(self): + raise NotImplementedError( + 'create_volume is not supported ' + 'at this time.') -class ConfigurationSet(WindowsAzureData): + def detach_volume(self): + raise NotImplementedError( + 'detach_volume is not supported ' + 'at this time.') - def __init__(self): - self.configuration_set_type = u'' - self.role_type = u'' - self.input_endpoints = ConfigurationSetInputEndpoints() - self.subnet_names = _scalar_list_of(str, 'SubnetName') + def destroy_volume(self): + raise NotImplementedError( + 'destroy_volume is not supported ' + 'at this time.') -class ConfigurationSetInputEndpoints(WindowsAzureData): + """Private Functions + """ - def __init__(self): - self.input_endpoints = _list_of( - ConfigurationSetInputEndpoint, 'InputEndpoint') + def _to_node(self, data): + """ + Convert the data from a Azure response object into a Node + """ - def __iter__(self): - return iter(self.input_endpoints) + if len(data.instance_endpoints) >= 1: + public_ip = data.instance_endpoints[0].vip + else: + public_ip = [] - def __len__(self): - return len(self.input_endpoints) + for port in data.instance_endpoints: + if port.name == 'Remote Desktop': + remote_desktop_port = port.public_port + else: + remote_desktop_port = [] - def __getitem__(self, index): - return self.input_endpoints[index] + if port.name == "SSH": + ssh_port = port.public_port + else: + ssh_port = [] -class ConfigurationSetInputEndpoint(WindowsAzureData): + # When servers are Linux, this fails due to the remote_desktop_port + # therefore we need to add a check in here. + return Node( + id=data.instance_name, + name=data.instance_name, + state=data.instance_status, + public_ips=[public_ip], + private_ips=[data.ip_address], + driver=self.connection.driver, + extra={ + 'remote_desktop_port': remote_desktop_port, + 'ssh_port': ssh_port, + 'power_state': data.power_state, + 'instance_size': data.instance_size}) - def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'', - load_balanced_endpoint_set_name=u'', - enable_direct_server_return=False): - self.enable_direct_server_return = enable_direct_server_return - self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name - self.local_port = local_port - self.name = name - self.port = port - self.load_balancer_probe = LoadBalancerProbe() - self.protocol = protocol + def _to_location(self, data): + """ + Convert the data from a Azure resonse object into a location + """ + country = data.display_name -class Locations(WindowsAzureData): + if "Asia" in data.display_name: + country = "Asia" - def __init__(self): - self.locations = _list_of(Location) + if "Europe" in data.display_name: + country = "Europe" - def __iter__(self): - return iter(self.locations) + if "US" in data.display_name: + country = "US" - def __len__(self): - return len(self.locations) + if "Japan" in data.display_name: + country = "Japan" - def __getitem__(self, index): - return self.locations[index] + if "Brazil" in data.display_name: + country = "Brazil" -class Location(WindowsAzureData): + return NodeLocation( + id=data.name, + name=data.display_name, + country=country, + driver=self.connection.driver) - def __init__(self): - self.name = u'' - self.display_name = u'' - self.available_services = _scalar_list_of(str, 'AvailableService') + def _to_node_size(self, data): + """ + Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize + """ + + return NodeSize( + id=data["id"], + name=data["name"], + ram=data["ram"], + disk=data["disk"], + bandwidth=data["bandwidth"], + price=data["price"], + driver=self.connection.driver, + extra={ + 'max_data_disks' : data["max_data_disks"], + 'cores' : data["cores"] + }) -class Images(WindowsAzureData): + def _to_image(self, data): - def __init__(self): - self.images = _list_of(OSImage) + return NodeImage( + id=data.name, + name=data.label, + driver=self.connection.driver, + extra={ + 'os' : data.os, + 'category' : data.category, + 'description' : data.description, + 'location' : data.location, + 'affinity_group' : data.affinity_group, + 'media_link' : data.media_link + }) - def __iter__(self): - return iter(self.images) + def _to_volume(self, volume, node): - def __len__(self): - return len(self.images) + if node: + if hasattr(volume.attached_to, 'role_name'): + if volume.attached_to.role_name == node.id: + extra = {} + extra['affinity_group'] = volume.affinity_group + if hasattr(volume.attached_to, 'hosted_service_name'): + extra['hosted_service_name'] = volume.attached_to.hosted_service_name + if hasattr(volume.attached_to, 'role_name'): + extra['role_name'] = volume.attached_to.role_name + if hasattr(volume.attached_to, 'deployment_name'): + extra['deployment_name'] = volume.attached_to.deployment_name + extra['os'] = volume.os + extra['location'] = volume.location + extra['media_link'] = volume.media_link + extra['source_image_name'] = volume.source_image_name - def __getitem__(self, index): - return self.images[index] + return StorageVolume(id=volume.name, + name=volume.name, + size=int(volume.logical_disk_size_in_gb), + driver=self.connection.driver, + extra=extra) + else: + extra = {} + extra['affinity_group'] = volume.affinity_group + if hasattr(volume.attached_to, 'hosted_service_name'): + extra['hosted_service_name'] = volume.attached_to.hosted_service_name + if hasattr(volume.attached_to, 'role_name'): + extra['role_name'] = volume.attached_to.role_name + if hasattr(volume.attached_to, 'deployment_name'): + extra['deployment_name'] = volume.attached_to.deployment_name + extra['os'] = volume.os + extra['location'] = volume.location + extra['media_link'] = volume.media_link + extra['source_image_name'] = volume.source_image_name -class HostedServices(WindowsAzureData): - - def __init__(self): - self.hosted_services = _list_of(HostedService) + return StorageVolume(id=volume.name, + name=volume.name, + size=int(volume.logical_disk_size_in_gb), + driver=self.connection.driver, + extra=extra) - def __iter__(self): - return iter(self.hosted_services) + def _get_deployment(self, **kwargs): + _service_name = kwargs['service_name'] + _deployment_slot = kwargs['deployment_slot'] - def __len__(self): - return len(self.hosted_services) + return self._perform_get( + self._get_deployment_path_using_slot( + _service_name, _deployment_slot), + Deployment) - def __getitem__(self, index): - return self.hosted_services[index] + def _get_cloud_service_location(self, service_name=None): -class HostedService(WindowsAzureData): + if not service_name: + raise ValueError("service_name is required.") - def __init__(self): - self.url = u'' - self.service_name = u'' - self.hosted_service_properties = HostedServiceProperties() - self.deployments = Deployments() + sms = ServiceManagementService(subscription_id, certificate_path) -class HostedServiceProperties(WindowsAzureData): + res = sms.get_hosted_service_properties(service_name=service_name,embed_detail=False) - def __init__(self): - self.description = u'' - self.location = u'' - self.affinity_group = u'' - self.label = _Base64String() - self.status = u'' - self.date_created = u'' - self.date_last_modified = u'' - self.extended_properties = _dict_of( - 'ExtendedProperty', 'Name', 'Value') + _affinity_group = res.hosted_service_properties.affinity_group + _cloud_service_location = res.hosted_service_properties.location -class Deployments(WindowsAzureData): + if _affinity_group is not None: + return self.service_location(True, _affinity_group) + elif _cloud_service_location is not None: + return self.service_location(False, _cloud_service_location) + else: + return None - def __init__(self): - self.deployments = _list_of(Deployment) + def _is_storage_service_unique(self, service_name=None): + if not service_name: + raise ValueError("service_name is required.") - def __iter__(self): - return iter(self.deployments) + sms = ServiceManagementService(subscription_id, certificate_path) + + _check_availability = sms.check_storage_account_name_availability(service_name=service_name) + + return _check_availability.result - def __len__(self): - return len(self.deployments) + def _create_storage_account(self, **kwargs): + sms = ServiceManagementService(subscription_id, certificate_path) - def __getitem__(self, index): - return self.deployments[index] + if kwargs['is_affinity_group'] is True: + result = sms.create_storage_account( + service_name=kwargs['service_name'], + description=kwargs['service_name'], + label=kwargs['service_name'], + affinity_group=kwargs['location']) + else: + result = sms.create_storage_account( + service_name=kwargs['service_name'], + description=kwargs['service_name'], + label=kwargs['service_name'], + location=kwargs['location']) -class Deployment(WindowsAzureData): + # We need to wait for this to be created before we can + # create the storage container and the instance. - def __init__(self): - self.name = u'' - self.deployment_slot = u'' - self.private_id = u'' - self.status = u'' - self.label = _Base64String() - self.url = u'' - self.configuration = _Base64String() - self.role_instance_list = RoleInstanceList() - self.upgrade_status = UpgradeStatus() - self.upgrade_domain_count = u'' - self.role_list = RoleList() - self.sdk_version = u'' - self.input_endpoint_list = InputEndpoints() - self.locked = False - self.rollback_allowed = False - self.persistent_vm_downtime_info = PersistentVMDowntimeInfo() - self.created_time = u'' - self.last_modified_time = u'' - self.extended_properties = _dict_of( - 'ExtendedProperty', 'Name', 'Value') + operation_status = sms.get_operation_status(result.request_id) -class RoleInstanceList(WindowsAzureData): + timeout = 60 * 5 + waittime = 0 + interval = 5 - def __init__(self): - self.role_instances = _list_of(RoleInstance) + while operation_status.status == "InProgress" and waittime < timeout: + operation_status = sms.get_operation_status(result.request_id) + if operation_status.status == "Succeeded": + break - def __iter__(self): - return iter(self.role_instances) + waittime += interval + time.sleep(interval) + return - def __len__(self): - return len(self.role_instances) + def _perform_get(self, path, response_type): + request = AzureHTTPRequest() + request.method = 'GET' + request.host = azure_service_management_host + request.path = path + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) - def __getitem__(self, index): - return self.role_instances[index] + if response_type is not None: + return self._parse_response(response, response_type) -class RoleInstance(WindowsAzureData): + return response - def __init__(self): - self.role_name = u'' - self.instance_name = u'' - self.instance_status = u'' - self.instance_upgrade_domain = 0 - self.instance_fault_domain = 0 - self.instance_size = u'' - self.instance_state_details = u'' - self.instance_error_code = u'' - self.ip_address = u'' - self.instance_endpoints = InstanceEndpoints() - self.power_state = u'' - self.fqdn = u'' - self.host_name = u'' + def _perform_post(self, path, body, response_type=None, async=False): + request = AzureHTTPRequest() + request.method = 'POST' + request.host = azure_service_management_host + request.path = path + request.body = self._get_request_body(body) + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) -class InstanceEndpoints(WindowsAzureData): + if response_type is not None: + return _parse_response(response, response_type) - def __init__(self): - self.instance_endpoints = _list_of(InstanceEndpoint) + if async: + return self._parse_response_for_async_op(response) - def __iter__(self): - return iter(self.instance_endpoints) + return None - def __len__(self): - return len(self.instance_endpoints) + def _perform_request(self, request): - def __getitem__(self, index): - return self.instance_endpoints[index] + connection = self.get_connection() -class InstanceEndpoint(WindowsAzureData): + try: + connection.putrequest(request.method, request.path) - def __init__(self): - self.name = u'' - self.vip = u'' - self.public_port = u'' - self.local_port = u'' - self.protocol = u'' + self.send_request_headers(connection, request.headers) + self.send_request_body(connection, request.body) -class InputEndpoints(WindowsAzureData): + resp = connection.getresponse() + status = int(resp.status) + message = resp.reason + respheader = headers = resp.getheaders() - def __init__(self): - self.input_endpoints = _list_of(InputEndpoint) + # for consistency across platforms, make header names lowercase + for i, value in enumerate(headers): + headers[i] = (value[0].lower(), value[1]) - def __iter__(self): - return iter(self.input_endpoints) + respbody = None + if resp.length is None: + respbody = resp.read() + elif resp.length > 0: + respbody = resp.read(resp.length) - def __len__(self): - return len(self.input_endpoints) + response = AzureHTTPResponse( + int(resp.status), resp.reason, headers, respbody) + if status >= 300: + raise AzureHTTPError(status, message, + respheader, respbody) - def __getitem__(self, index): - return self.input_endpoints[index] + return response -class InputEndpoint(WindowsAzureData): + finally: + connection.close() - def __init__(self): - self.role_name = u'' - self.vip = u'' - self.port = u'' + def _update_request_uri_query(self, request): + '''pulls the query string out of the URI and moves it into + the query portion of the request object. If there are already + query parameters on the request the parameters in the URI will + appear after the existing parameters''' -class RoleList(WindowsAzureData): + if '?' in request.path: + request.path, _, query_string = request.path.partition('?') + if query_string: + query_params = query_string.split('&') + for query in query_params: + if '=' in query: + name, _, value = query.partition('=') + request.query.append((name, value)) - def __init__(self): - self.roles = _list_of(Role) + request.path = url_quote(request.path, '/()$=\',') - def __iter__(self): - return iter(self.roles) + # add encoded queries to request.path. + if request.query: + request.path += '?' + for name, value in request.query: + if value is not None: + request.path += name + '=' + url_quote(value, '/()$=\',') + '&' + request.path = request.path[:-1] - def __len__(self): - return len(self.roles) + return request.path, request.query - def __getitem__(self, index): - return self.roles[index] + def _update_management_header(self, request): + ''' Add additional headers for management. ''' -class Role(WindowsAzureData): + if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: + request.headers.append(('Content-Length', str(len(request.body)))) - def __init__(self): - self.role_name = u'' - self.os_version = u'' + # append additional headers base on the service + request.headers.append(('x-ms-version', X_MS_VERSION)) -class PersistentVMDowntimeInfo(WindowsAzureData): + # if it is not GET or HEAD request, must set content-type. + if not request.method in ['GET', 'HEAD']: + for name, _ in request.headers: + if 'content-type' == name.lower(): + break + else: + request.headers.append( + ('Content-Type', + 'application/atom+xml;type=entry;charset=utf-8')) - def __init__(self): - self.start_time = u'' - self.end_time = u'' - self.status = u'' + return request.headers -class AsynchronousOperationResult(WindowsAzureData): + def send_request_headers(self, connection, request_headers): + for name, value in request_headers: + if value: + connection.putheader(name, value) - def __init__(self, request_id=None): - self.request_id = request_id + connection.putheader('User-Agent', _USER_AGENT_STRING) + connection.endheaders() -class AzureHTTPRequest(object): + def send_request_body(self, connection, request_body): + if request_body: + assert isinstance(request_body, bytes) + connection.send(request_body) + elif (not isinstance(connection, HTTPSConnection) and + not isinstance(connection, HTTPConnection)): + connection.send(None) - '''Represents an HTTP Request. An HTTP Request consists of the following - attributes: + def _parse_response(self, response, return_type): + ''' + Parse the HTTPResponse's body and fill all the data into a class of + return_type. + ''' + return self._parse_response_body_from_xml_text(response.body, return_type) - host: the host name to connect to - method: the method to use to connect (string such as GET, POST, PUT, etc.) - path: the uri fragment - query: query parameters specified as a list of (name, value) pairs - headers: header values specified as (name, value) pairs - body: the body of the request. - protocol_override: - specify to use this protocol instead of the global one stored in - _HTTPClient. - ''' + def _parse_response_body_from_xml_text(self, respbody, return_type): + ''' + parse the xml and fill all the data into a class of return_type + ''' + doc = minidom.parseString(respbody) + return_obj = return_type() + for node in self._get_child_nodes(doc, return_type.__name__): + self._fill_data_to_return_object(node, return_obj) - def __init__(self): - self.host = '' - self.method = '' - self.path = '' - self.query = [] # list of (name, value) - self.headers = [] # list of (header name, header value) - self.body = '' - self.protocol_override = None + return return_obj -class AzureHTTPResponse(object): + def _get_child_nodes(self, node, tagName): + return [childNode for childNode in node.getElementsByTagName(tagName) + if childNode.parentNode == node] - """Represents a response from an HTTP request. An HTTPResponse has the - following attributes: + def _fill_data_to_return_object(self, node, return_obj): + members = dict(vars(return_obj)) + for name, value in members.items(): + if isinstance(value, _list_of): + setattr(return_obj, + name, + self._fill_list_of(node, + value.list_type, + value.xml_element_name)) + elif isinstance(value, _scalar_list_of): + setattr(return_obj, + name, + self._fill_scalar_list_of(node, + value.list_type, + self._get_serialization_name(name), + value.xml_element_name)) + elif isinstance(value, _dict_of): + setattr(return_obj, + name, + self._fill_dict_of(node, + self._get_serialization_name(name), + value.pair_xml_element_name, + value.key_xml_element_name, + value.value_xml_element_name)) + elif isinstance(value, WindowsAzureData): + setattr(return_obj, + name, + self._fill_instance_child(node, name, value.__class__)) + elif isinstance(value, dict): + setattr(return_obj, + name, + self._fill_dict(node, self._get_serialization_name(name))) + elif isinstance(value, _Base64String): + value = self._fill_data_minidom(node, name, '') + if value is not None: + value = self._decode_base64_to_text(value) + # always set the attribute, so we don't end up returning an object + # with type _Base64String + setattr(return_obj, name, value) + else: + value = self._fill_data_minidom(node, name, value) + if value is not None: + setattr(return_obj, name, value) - status: the status code of the response - message: the message - headers: the returned headers, as a list of (name, value) pairs - body: the body of the response - """ + def _fill_list_of(self, xmldoc, element_type, xml_element_name): + xmlelements = self._get_child_nodes(xmldoc, xml_element_name) + return [self._parse_response_body_from_xml_node(xmlelement, element_type) \ + for xmlelement in xmlelements] - def __init__(self, status, message, headers, body): - self.status = status - self.message = message - self.headers = headers - self.body = body + def _parse_response_body_from_xml_node(self, node, return_type): + ''' + parse the xml and fill all the data into a class of return_type + ''' + return_obj = return_type() + self._fill_data_to_return_object(node, return_obj) -class AzureHTTPError(Exception): + return return_obj - ''' HTTP Exception when response status code >= 300 ''' + def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, + xml_element_name): + xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: + xmlelements = self._get_child_nodes(xmlelements[0], xml_element_name) + return [self._get_node_value(xmlelement, element_type) \ + for xmlelement in xmlelements] - def __init__(self, status, message, respheader, respbody): - '''Creates a new HTTPError with the specified status, message, - response headers and body''' - self.status = status - self.respheader = respheader - self.respbody = respbody - Exception.__init__(self, message) + def _get_node_value(self, xmlelement, data_type): + value = xmlelement.firstChild.nodeValue + if data_type is datetime: + return _to_datetime(value) + elif data_type is bool: + return value.lower() != 'false' + else: + return data_type(value) -""" -Helper classes. -""" + def _get_serialization_name(self,element_name): + """converts a Python name into a serializable name""" + known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) + if known is not None: + return known -class _Base64String(str): - pass + if element_name.startswith('x_ms_'): + return element_name.replace('_', '-') + if element_name.endswith('_id'): + element_name = element_name.replace('_id', 'ID') + for name in ['content_', 'last_modified', 'if_', 'cache_control']: + if element_name.startswith(name): + element_name = element_name.replace('_', '-_') -class _list_of(list): + return ''.join(name.capitalize() for name in element_name.split('_')) - """a list which carries with it the type that's expected to go in it. - Used for deserializaion and construction of the lists""" + def _fill_dict_of(self, xmldoc, parent_xml_element_name, pair_xml_element_name, + key_xml_element_name, value_xml_element_name): + return_obj = {} - def __init__(self, list_type, xml_element_name=None): - self.list_type = list_type - if xml_element_name is None: - self.xml_element_name = list_type.__name__ - else: - self.xml_element_name = xml_element_name - super(_list_of, self).__init__() + xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: + xmlelements = self._get_child_nodes(xmlelements[0], pair_xml_element_name) + for pair in xmlelements: + keys = self._get_child_nodes(pair, key_xml_element_name) + values = self._get_child_nodes(pair, value_xml_element_name) + if keys and values: + key = keys[0].firstChild.nodeValue + value = values[0].firstChild.nodeValue + return_obj[key] = value -class _scalar_list_of(list): + return return_obj - """a list of scalar types which carries with it the type that's - expected to go in it along with its xml element name. - Used for deserializaion and construction of the lists""" + def _fill_instance_child(self, xmldoc, element_name, return_type): + '''Converts a child of the current dom element to the specified type. + ''' + xmlelements = self._get_child_nodes( + xmldoc, self._get_serialization_name(element_name)) - def __init__(self, list_type, xml_element_name): - self.list_type = list_type - self.xml_element_name = xml_element_name - super(_scalar_list_of, self).__init__() + if not xmlelements: + return None -class _dict_of(dict): + return_obj = return_type() + self._fill_data_to_return_object(xmlelements[0], return_obj) - """a dict which carries with it the xml element names for key,val. - Used for deserializaion and construction of the lists""" + return return_obj - def __init__(self, pair_xml_element_name, key_xml_element_name, - value_xml_element_name): - self.pair_xml_element_name = pair_xml_element_name - self.key_xml_element_name = key_xml_element_name - self.value_xml_element_name = value_xml_element_name - super(_dict_of, self).__init__() + def _fill_dict(self, xmldoc, element_name): + xmlelements = self._get_child_nodes(xmldoc, element_name) + if xmlelements: + return_obj = {} + for child in xmlelements[0].childNodes: + if child.firstChild: + return_obj[child.nodeName] = child.firstChild.nodeValue + return return_obj -class AzureNodeDriver(NodeDriver): - - _instance_types = AZURE_COMPUTE_INSTANCE_TYPES - _blob_url = ".blob.core.windows.net" - features = {'create_node': ['password']} - service_location = collections.namedtuple('service_location',['is_affinity_group', 'service_location']) - sms = ServiceManagementService(subscription_id, certificate_path) + def _encode_base64(self, data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + encoded = base64.b64encode(data) + return encoded.decode('utf-8') - def list_sizes(self): - """ - Lists all sizes + def _decode_base64_to_bytes(self, data): + if isinstance(data, _unicode_type): + data = data.encode('utf-8') + return base64.b64decode(data) - :rtype: ``list`` of :class:`NodeSize` - """ - sizes = [] + def _decode_base64_to_text(self, data): + decoded_bytes = self._decode_base64_to_bytes(data) + return decoded_bytes.decode('utf-8') - for key, values in self._instance_types.items(): - node_size = self._to_node_size(copy.deepcopy(values)) - sizes.append(node_size) + def _fill_data_minidom(self, xmldoc, element_name, data_member): + xmlelements = self._get_child_nodes( + xmldoc, self._get_serialization_name(element_name)) - return sizes + if not xmlelements or not xmlelements[0].childNodes: + return None - def list_images(self): - """ - Lists all images + value = xmlelements[0].firstChild.nodeValue - :rtype: ``list`` of :class:`NodeImage` - """ - data = self._perform_get(self._get_image_path(), Images) + if data_member is None: + return value + elif isinstance(data_member, datetime): + return self._to_datetime(value) + elif type(data_member) is bool: + return value.lower() != 'false' + else: + return type(data_member)(value) - return [self._to_image(i) for i in data] + def _to_datetime(self, strtime): + return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f") - def list_locations(self): - """ - Lists all locations + def _get_request_body(self, request_body): + if request_body is None: + return b'' - :rtype: ``list`` of :class:`NodeLocation` - """ - data = self._perform_get('/' + subscription_id + '/locations', Locations) + if isinstance(request_body, WindowsAzureData): + request_body = _convert_class_to_xml(request_body) - return [self._to_location(l) for l in data] + if isinstance(request_body, bytes): + return request_body - def list_nodes(self, ex_cloud_service_name=None): - """ - List all nodes + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') - ex_cloud_service_name parameter is used to scope the request - to a specific Cloud Service. This is a required parameter as - nodes cannot exist outside of a Cloud Service nor be shared - between a Cloud Service within Azure. + request_body = str(request_body) + if isinstance(request_body, _unicode_type): + return request_body.encode('utf-8') - :param ex_cloud_service_name: Cloud Service name - :type ex_cloud_service_name: ``str`` + return request_body - :rtype: ``list`` of :class:`Node` - """ - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") + def _convert_class_to_xml(self, source, xml_prefix=True): + if source is None: + return '' - data = self._perform_get( - self._get_hosted_service_path(ex_cloud_service_name) + - '?embed-detail=True', - HostedService) + xmlstr = '' + if xml_prefix: + xmlstr = '' - try: - return [self._to_node(n) for n in data.deployments[0].role_instance_list] - except IndexError: + if isinstance(source, list): + for value in source: + xmlstr += _convert_class_to_xml(value, False) + elif isinstance(source, WindowsAzureData): + class_name = source.__class__.__name__ + xmlstr += '<' + class_name + '>' + for name, value in vars(source).items(): + if value is not None: + if isinstance(value, list) or \ + isinstance(value, WindowsAzureData): + xmlstr += _convert_class_to_xml(value, False) + else: + xmlstr += ('<' + self._get_serialization_name(name) + '>' + + xml_escape(str(value)) + '') + xmlstr += '' + return xmlstr + + def _parse_response_for_async_op(self, response): + if response is None: return None - def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): - """ - Reboots a node. + result = AsynchronousOperationResult() + if response.headers: + for name, value in response.headers: + if name.lower() == 'x-ms-request-id': + result.request_id = value - ex_cloud_service_name parameter is used to scope the request - to a specific Cloud Service. This is a required parameter as - nodes cannot exist outside of a Cloud Service nor be shared - between a Cloud Service within Azure. + return result - :param ex_cloud_service_name: Cloud Service name - :type ex_cloud_service_name: ``str`` + def _get_deployment_path_using_name(self, service_name, + deployment_name=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments', deployment_name) - :param ex_deployment_name: Options are "production" (default) - or "Staging". (Optional) - :type ex_deployment_name: ``str`` + def _get_path(self, resource, name): + path = '/' + subscription_id + '/' + resource + if name is not None: + path += '/' + _str(name) + return path - :rtype: ``list`` of :class:`Node` - """ + def _lower(self, text): + return text.lower() - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") + def _get_image_path(self, image_name=None): + return self._get_path('services/images', image_name) - if not ex_deployment_slot: - ex_deployment_slot = "production" + def _get_hosted_service_path(self, service_name=None): + return self._get_path('services/hostedservices', service_name) - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name - print _deployment_name + def _get_deployment_path_using_slot(self, service_name, slot=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deploymentslots', slot) - try: - result = self._perform_post( - self._get_deployment_path_using_name( - ex_cloud_service_name, _deployment_name) + \ - '/roleinstances/' + _str(node.id) + \ - '?comp=reboot', '', async=True) - if result.request_id: - return True - else: - return False - except Exception, e: - print e - return False + def _get_disk_path(self, disk_name=None): + return self._get_path('services/disks', disk_name) - def list_volumes(self, node=None): - """ - Lists volumes of the disks in the image repository that are - associated with the specificed subscription. + def get_connection(self): + certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" + port = HTTPS_PORT - Pass Node object to scope the list of volumes to a single - instance. + connection = HTTPSConnection( + azure_service_management_host, + int(port), + cert_file=certificate_path) - :rtype: ``list`` of :class:`StorageVolume` - """ + return connection - sms = ServiceManagementService(subscription_id, certificate_path) +""" +XML Serializer +""" +class AzureXmlSerializer(): - data = sms.list_disks() + @staticmethod + def create_storage_service_input_to_xml(service_name, description, label, + affinity_group, location, + geo_replication_enabled, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'CreateStorageServiceInput', + [('ServiceName', service_name), + ('Description', description), + ('Label', label, _encode_base64), + ('AffinityGroup', affinity_group), + ('Location', location), + ('GeoReplicationEnabled', geo_replication_enabled, _lower)], + extended_properties) - volumes = [self._to_volume(volume=v,node=node) for v in data] + @staticmethod + def update_storage_service_input_to_xml(description, label, + geo_replication_enabled, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'UpdateStorageServiceInput', + [('Description', description), + ('Label', label, _encode_base64), + ('GeoReplicationEnabled', geo_replication_enabled, _lower)], + extended_properties) - return volumes + @staticmethod + def regenerate_keys_to_xml(key_type): + return AzureXmlSerializer.doc_from_data('RegenerateKeys', + [('KeyType', key_type)]) - def create_node(self, ex_cloud_service_name=None, **kwargs): - """Create Azure Virtual Machine + @staticmethod + def update_hosted_service_to_xml(label, description, extended_properties): + return AzureXmlSerializer.doc_from_data('UpdateHostedService', + [('Label', label, _encode_base64), + ('Description', description)], + extended_properties) - Reference: http://bit.ly/1fIsCb7 [www.windowsazure.com/en-us/documentation/] + @staticmethod + def create_hosted_service_to_xml(service_name, label, description, + location, affinity_group, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'CreateHostedService', + [('ServiceName', service_name), + ('Label', label, _encode_base64), + ('Description', description), + ('Location', location), + ('AffinityGroup', affinity_group)], + extended_properties) - We default to: + @staticmethod + def create_deployment_to_xml(name, package_url, label, configuration, + start_deployment, treat_warnings_as_error, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'CreateDeployment', + [('Name', name), + ('PackageUrl', package_url), + ('Label', label, _encode_base64), + ('Configuration', configuration), + ('StartDeployment', + start_deployment, _lower), + ('TreatWarningsAsError', treat_warnings_as_error, _lower)], + extended_properties) - + 3389/TCP - RDP - 1st Microsoft instance. - + RANDOM/TCP - RDP - All succeeding Microsoft instances. + @staticmethod + def swap_deployment_to_xml(production, source_deployment): + return AzureXmlSerializer.doc_from_data( + 'Swap', + [('Production', production), + ('SourceDeployment', source_deployment)]) - + 22/TCP - SSH - 1st Linux instance - + RANDOM/TCP - SSH - All succeeding Linux instances. + @staticmethod + def update_deployment_status_to_xml(status): + return AzureXmlSerializer.doc_from_data( + 'UpdateDeploymentStatus', + [('Status', status)]) - The above replicates the standard behavior of the Azure UI. - You can retrieve the assigned ports to each instance by - using the following private function: + @staticmethod + def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, + extended_properties): + return AzureXmlSerializer.doc_from_data( + 'ChangeConfiguration', + [('Configuration', configuration), + ('TreatWarningsAsError', treat_warnings_as_error, _lower), + ('Mode', mode)], + extended_properties) - _get_endpoint_ports(service_name) - Returns public,private port key pair. + @staticmethod + def upgrade_deployment_to_xml(mode, package_url, configuration, label, + role_to_upgrade, force, extended_properties): + return AzureXmlSerializer.doc_from_data( + 'UpgradeDeployment', + [('Mode', mode), + ('PackageUrl', package_url), + ('Configuration', configuration), + ('Label', label, _encode_base64), + ('RoleToUpgrade', role_to_upgrade), + ('Force', force, _lower)], + extended_properties) - @inherits: :class:`NodeDriver.create_node` + @staticmethod + def rollback_upgrade_to_xml(mode, force): + return AzureXmlSerializer.doc_from_data( + 'RollbackUpdateOrUpgrade', + [('Mode', mode), + ('Force', force, _lower)]) - :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. - :type ex_cloud_service_name: ``str`` + @staticmethod + def walk_upgrade_domain_to_xml(upgrade_domain): + return AzureXmlSerializer.doc_from_data( + 'WalkUpgradeDomain', + [('UpgradeDomain', upgrade_domain)]) - :keyword ex_storage_service_name: Optional: Name of the Azure Storage Service. - :type ex_cloud_service_name: ``str`` + @staticmethod + def certificate_file_to_xml(data, certificate_format, password): + return AzureXmlSerializer.doc_from_data( + 'CertificateFile', + [('Data', data), + ('CertificateFormat', certificate_format), + ('Password', password)]) - :keyword ex_deployment_name: Optional. The name of the deployment. - If this is not passed in we default to - using the Cloud Service name. - :type ex_deployment_name: ``str`` + @staticmethod + def create_affinity_group_to_xml(name, label, description, location): + return AzureXmlSerializer.doc_from_data( + 'CreateAffinityGroup', + [('Name', name), + ('Label', label, _encode_base64), + ('Description', description), + ('Location', location)]) - :keyword ex_deployment_slot: Optional: Valid values: production|staging. - Defaults to production. - :type ex_cloud_service_name: ``str`` + @staticmethod + def update_affinity_group_to_xml(label, description): + return AzureXmlSerializer.doc_from_data( + 'UpdateAffinityGroup', + [('Label', label, _encode_base64), + ('Description', description)]) - :keyword ex_linux_user_id: Optional. Defaults to 'azureuser'. - :type ex_cloud_service_name: ``str`` + @staticmethod + def subscription_certificate_to_xml(public_key, thumbprint, data): + return AzureXmlSerializer.doc_from_data( + 'SubscriptionCertificate', + [('SubscriptionCertificatePublicKey', public_key), + ('SubscriptionCertificateThumbprint', thumbprint), + ('SubscriptionCertificateData', data)]) - """ - name = kwargs['name'] - size = kwargs['size'] - image = kwargs['image'] + @staticmethod + def os_image_to_xml(label, media_link, name, os): + return AzureXmlSerializer.doc_from_data( + 'OSImage', + [('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os)]) - password = None - auth = self._get_and_check_auth(kwargs["auth"]) - password = auth.password + @staticmethod + def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, + logical_disk_size_in_gb, media_link, + source_media_link): + return AzureXmlSerializer.doc_from_data( + 'DataVirtualHardDisk', + [('HostCaching', host_caching), + ('DiskLabel', disk_label), + ('DiskName', disk_name), + ('Lun', lun), + ('LogicalDiskSizeInGB', logical_disk_size_in_gb), + ('MediaLink', media_link), + ('SourceMediaLink', source_media_link)]) - sms = ServiceManagementService(subscription_id, certificate_path) + @staticmethod + def disk_to_xml(has_operating_system, label, media_link, name, os): + return AzureXmlSerializer.doc_from_data( + 'Disk', + [('HasOperatingSystem', has_operating_system, _lower), + ('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os)]) - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") + @staticmethod + def restart_role_operation_to_xml(): + return AzureXmlSerializer.doc_from_xml( + 'RestartRoleOperation', + 'RestartRoleOperation') - if "ex_deployment_slot" in kwargs: - ex_deployment_slot = kwargs['ex_deployment_slot'] - else: - ex_deployment_slot = "production" # We assume production if this is not provided. + @staticmethod + def shutdown_role_operation_to_xml(): + return AzureXmlSerializer.doc_from_xml( + 'ShutdownRoleOperation', + 'ShutdownRoleOperation') - if "ex_linux_user_id" in kwargs: - ex_linux_user_id = kwargs['ex_linux_user_id'] - else: - # This mimics the Azure UI behavior. - ex_linux_user_id = "azureuser" + @staticmethod + def start_role_operation_to_xml(): + return AzureXmlSerializer.doc_from_xml( + 'StartRoleOperation', + 'StartRoleOperation') - node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) - network_config = ConfigurationSet() - network_config.configuration_set_type = 'NetworkConfiguration' + @staticmethod + def windows_configuration_to_xml(configuration): + xml = AzureXmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type), + ('ComputerName', configuration.computer_name), + ('AdminPassword', configuration.admin_password), + ('ResetPasswordOnFirstLogon', + configuration.reset_password_on_first_logon, + _lower), + ('EnableAutomaticUpdates', + configuration.enable_automatic_updates, + _lower), + ('TimeZone', configuration.time_zone)]) - # We do this because we need to pass a Configuration to the - # method. This will be either Linux or Windows. - if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", image, re.I): - machine_config = WindowsConfigurationSet(name, password) - machine_config.domain_join = None + if configuration.domain_join is not None: + xml += '' + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Domain', configuration.domain_join.credentials.domain), + ('Username', configuration.domain_join.credentials.username), + ('Password', configuration.domain_join.credentials.password)]) + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('JoinDomain', configuration.domain_join.join_domain), + ('MachineObjectOU', + configuration.domain_join.machine_object_ou)]) + xml += '' + if configuration.stored_certificate_settings is not None: + xml += '' + for cert in configuration.stored_certificate_settings: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('StoreLocation', cert.store_location), + ('StoreName', cert.store_name), + ('Thumbprint', cert.thumbprint)]) + xml += '' + xml += '' + return xml - if node_list is None: - port = "3389" - else: - port = random.randint(41952,65535) - endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + @staticmethod + def linux_configuration_to_xml(configuration): + xml = AzureXmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type), + ('HostName', configuration.host_name), + ('UserName', configuration.user_name), + ('UserPassword', configuration.user_password), + ('DisableSshPasswordAuthentication', + configuration.disable_ssh_password_authentication, + _lower)]) - for instances in endpoints.role_instance_list: - ports = [] - for ep in instances.instance_endpoints: - ports += [ep.public_port] + if configuration.ssh is not None: + xml += '' + xml += '' + for key in configuration.ssh.public_keys: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint), + ('Path', key.path)]) + xml += '' + xml += '' + xml += '' + for key in configuration.ssh.key_pairs: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint), + ('Path', key.path)]) + xml += '' + xml += '' + xml += '' + return xml - while port in ports: - port = random.randint(41952,65535) + @staticmethod + def network_configuration_to_xml(configuration): + xml = AzureXmlSerializer.data_to_xml( + [('ConfigurationSetType', configuration.configuration_set_type)]) + xml += '' + for endpoint in configuration.input_endpoints: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('LoadBalancedEndpointSetName', + endpoint.load_balanced_endpoint_set_name), + ('LocalPort', endpoint.local_port), + ('Name', endpoint.name), + ('Port', endpoint.port)]) - endpoint = ConfigurationSetInputEndpoint( - name='Remote Desktop', - protocol='tcp', - port=port, - local_port='3389', - load_balanced_endpoint_set_name=None, - enable_direct_server_return=False - ) - else: - if node_list is None: - port = "22" - else: - port = random.randint(41952,65535) - endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + if endpoint.load_balancer_probe.path or\ + endpoint.load_balancer_probe.port or\ + endpoint.load_balancer_probe.protocol: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('Path', endpoint.load_balancer_probe.path), + ('Port', endpoint.load_balancer_probe.port), + ('Protocol', endpoint.load_balancer_probe.protocol)]) + xml += '' - for instances in endpoints.role_instance_list: - ports = [] - for ep in instances.instance_endpoints: - ports += [ep.public_port] + xml += AzureXmlSerializer.data_to_xml( + [('Protocol', endpoint.protocol), + ('EnableDirectServerReturn', + endpoint.enable_direct_server_return, + _lower)]) - while port in ports: - port = random.randint(41952,65535) + xml += '' + xml += '' + xml += '' + for name in configuration.subnet_names: + xml += AzureXmlSerializer.data_to_xml([('SubnetName', name)]) + xml += '' + return xml - endpoint = ConfigurationSetInputEndpoint( - name='SSH', - protocol='tcp', - port=port, - local_port='22', - load_balanced_endpoint_set_name=None, - enable_direct_server_return=False - ) - machine_config = LinuxConfigurationSet(name, ex_linux_user_id, password, False) + @staticmethod + def role_to_xml(availability_set_name, data_virtual_hard_disks, + network_configuration_set, os_virtual_hard_disk, role_name, + role_size, role_type, system_configuration_set): + xml = AzureXmlSerializer.data_to_xml([('RoleName', role_name), + ('RoleType', role_type)]) - network_config.input_endpoints.input_endpoints.append(endpoint) + xml += '' - _storage_location = self._get_cloud_service_location(service_name=ex_cloud_service_name) - - # OK, bit annoying here. You must create a deployment before - # you can create an instance; however, the deployment function - # creates the first instance, but all subsequent instances - # must be created using the add_role function. - # - # So, yeah, annoying. - if node_list is None: - # This is the first node in this cloud service. - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: - ex_storage_service_name = ex_cloud_service_name - ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) - if self._is_storage_service_unique(service_name=ex_storage_service_name): - self._create_storage_account( - service_name=ex_storage_service_name, - location=_storage_location.service_location, - is_affinity_group=_storage_location.is_affinity_group - ) + if system_configuration_set is not None: + xml += '' + if isinstance(system_configuration_set, WindowsConfigurationSet): + xml += AzureXmlSerializer.windows_configuration_to_xml( + system_configuration_set) + elif isinstance(system_configuration_set, LinuxConfigurationSet): + xml += AzureXmlSerializer.linux_configuration_to_xml( + system_configuration_set) + xml += '' - if "ex_deployment_name" in kwargs: - ex_deployment_name = kwargs['ex_deployment_name'] - else: - ex_deployment_name = ex_cloud_service_name + if network_configuration_set is not None: + xml += '' + xml += AzureXmlSerializer.network_configuration_to_xml( + network_configuration_set) + xml += '' - blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" - disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. - media_link = blob_url + "/vhds/" + disk_name - disk_config = OSVirtualHardDisk(image, media_link) - - result = sms.create_virtual_machine_deployment( - service_name=ex_cloud_service_name, - deployment_name=ex_deployment_name, - deployment_slot=ex_deployment_slot, - label=name, - role_name=name, - system_config=machine_config, - os_virtual_hard_disk=disk_config, - network_config=network_config, - role_size=size - ) - else: - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + xml += '' - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: - ex_storage_service_name = ex_cloud_service_name - ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) - - if self._is_storage_service_unique(service_name=ex_storage_service_name): - self._create_storage_account( - service_name=ex_storage_service_name, - location=_storage_location.service_location, - is_affinity_group=_storage_location.is_affinity_group - ) + if availability_set_name is not None: + xml += AzureXmlSerializer.data_to_xml( + [('AvailabilitySetName', availability_set_name)]) - blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" - disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) - media_link = blob_url + "/vhds/" + disk_name - disk_config = OSVirtualHardDisk(image, media_link) + if data_virtual_hard_disks is not None: + xml += '' + for hd in data_virtual_hard_disks: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('HostCaching', hd.host_caching), + ('DiskLabel', hd.disk_label), + ('DiskName', hd.disk_name), + ('Lun', hd.lun), + ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb), + ('MediaLink', hd.media_link)]) + xml += '' + xml += '' - result = self.sms.add_role( - service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_name=name, - system_config=machine_config, - os_virtual_hard_disk=disk_config, - network_config=network_config, - role_size=size - ) + if os_virtual_hard_disk is not None: + xml += '' + xml += AzureXmlSerializer.data_to_xml( + [('HostCaching', os_virtual_hard_disk.host_caching), + ('DiskLabel', os_virtual_hard_disk.disk_label), + ('DiskName', os_virtual_hard_disk.disk_name), + ('MediaLink', os_virtual_hard_disk.media_link), + ('SourceImageName', os_virtual_hard_disk.source_image_name)]) + xml += '' - return Node( - id=name, - name=name, - state=NodeState.PENDING, - public_ips=[], - private_ips=[], - driver=self.connection.driver - ) + if role_size is not None: + xml += AzureXmlSerializer.data_to_xml([('RoleSize', role_size)]) - def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): - """Remove Azure Virtual Machine + return xml - This removes the instance, but does not - remove the disk. You will need to use destroy_volume. - Azure sometimes has an issue where it will hold onto - a blob lease for an extended amount of time. + @staticmethod + def add_role_to_xml(role_name, system_configuration_set, + os_virtual_hard_disk, role_type, + network_configuration_set, availability_set_name, + data_virtual_hard_disks, role_size): + xml = AzureXmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + system_configuration_set) + return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) - :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. - :type ex_cloud_service_name: ``str`` + @staticmethod + def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, + network_configuration_set, availability_set_name, + data_virtual_hard_disks, role_size): + xml = AzureXmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + None) + return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) - :keyword ex_deployment_slot: Optional: The name of the deployment - slot. If this is not passed in we - default to production. - :type ex_deployment_slot: ``str`` - """ + @staticmethod + def capture_role_to_xml(post_capture_action, target_image_name, + target_image_label, provisioning_configuration): + xml = AzureXmlSerializer.data_to_xml( + [('OperationType', 'CaptureRoleOperation'), + ('PostCaptureAction', post_capture_action)]) - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") + if provisioning_configuration is not None: + xml += '' + if isinstance(provisioning_configuration, WindowsConfigurationSet): + xml += AzureXmlSerializer.windows_configuration_to_xml( + provisioning_configuration) + elif isinstance(provisioning_configuration, LinuxConfigurationSet): + xml += AzureXmlSerializer.linux_configuration_to_xml( + provisioning_configuration) + xml += '' - if not ex_deployment_slot: - ex_deployment_slot = "production" + xml += AzureXmlSerializer.data_to_xml( + [('TargetImageLabel', target_image_label), + ('TargetImageName', target_image_name)]) - _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) - _deployment_name = _deployment.name + return AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml) - _server_deployment_count = len(_deployment.role_instance_list) + @staticmethod + def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, + label, role_name, + system_configuration_set, + os_virtual_hard_disk, role_type, + network_configuration_set, + availability_set_name, + data_virtual_hard_disks, role_size, + virtual_network_name): + xml = AzureXmlSerializer.data_to_xml([('Name', deployment_name), + ('DeploymentSlot', deployment_slot), + ('Label', label)]) + xml += '' + xml += '' + xml += AzureXmlSerializer.role_to_xml( + availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + role_name, + role_size, + role_type, + system_configuration_set) + xml += '' + xml += '' - sms = ServiceManagementService(subscription_id, certificate_path) + if virtual_network_name is not None: + xml += AzureXmlSerializer.data_to_xml( + [('VirtualNetworkName', virtual_network_name)]) - try: - if _server_deployment_count > 1: - data = sms.delete_role(service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_name=node.id, - delete_attached_disks=True) - return True + return AzureXmlSerializer.doc_from_xml('Deployment', xml) + + @staticmethod + def data_to_xml(data): + '''Creates an xml fragment from the specified data. + data: Array of tuples, where first: xml element name + second: xml element text + third: conversion function + ''' + xml = '' + for element in data: + name = element[0] + val = element[1] + if len(element) > 2: + converter = element[2] else: - data = sms.delete_deployment(service_name=ex_cloud_service_name,deployment_name=_deployment_name,delete_attached_disks=True) - return True - except Exception: - return False + converter = None - """ Functions not implemented - """ - def create_volume_snapshot(self): - raise NotImplementedError( - 'You cannot create snapshots of ' - 'Azure VMs at this time.') + if val is not None: + if converter is not None: + text = _str(converter(_str(val))) + else: + text = _str(val) - def attach_volume(self): - raise NotImplementedError( - 'attach_volume is not supported ' - 'at this time.') + xml += ''.join(['<', name, '>', text, '']) + return xml - def create_volume(self): - raise NotImplementedError( - 'create_volume is not supported ' - 'at this time.') + @staticmethod + def doc_from_xml(document_element_name, inner_xml): + '''Wraps the specified xml in an xml root element with default azure + namespaces''' + xml = ''.join(['<', document_element_name, + ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', + ' xmlns="http://schemas.microsoft.com/windowsazure">']) + xml += inner_xml + xml += ''.join(['']) + return xml - def detach_volume(self): - raise NotImplementedError( - 'detach_volume is not supported ' - 'at this time.') + @staticmethod + def doc_from_data(document_element_name, data, extended_properties=None): + xml = AzureXmlSerializer.data_to_xml(data) + if extended_properties is not None: + xml += AzureXmlSerializer.extended_properties_dict_to_xml_fragment( + extended_properties) + return AzureXmlSerializer.doc_from_xml(document_element_name, xml) - def destroy_volume(self): - raise NotImplementedError( - 'destroy_volume is not supported ' - 'at this time.') + @staticmethod + def extended_properties_dict_to_xml_fragment(extended_properties): + xml = '' + if extended_properties is not None and len(extended_properties) > 0: + xml += '' + for key, val in extended_properties.items(): + xml += ''.join(['', + '', + _str(key), + '', + '', + _str(val), + '', + '']) + xml += '' + return xm - """Private Functions - """ +""" +Data Classes +""" - def _to_node(self, data): - """ - Convert the data from a Azure response object into a Node - """ +class WindowsAzureData(object): - if len(data.instance_endpoints) >= 1: - public_ip = data.instance_endpoints[0].vip - else: - public_ip = [] + ''' This is the base of data class. + It is only used to check whether it is instance or not. ''' + pass - for port in data.instance_endpoints: - if port.name == 'Remote Desktop': - remote_desktop_port = port.public_port - else: - remote_desktop_port = [] +class OSVirtualHardDisk(WindowsAzureData): - if port.name == "SSH": - ssh_port = port.public_port - else: - ssh_port = [] + def __init__(self, source_image_name=None, media_link=None, + host_caching=None, disk_label=None, disk_name=None): + self.source_image_name = source_image_name + self.media_link = media_link + self.host_caching = host_caching + self.disk_label = disk_label + self.disk_name = disk_name + self.os = u'' # undocumented, not used when adding a role - # When servers are Linux, this fails due to the remote_desktop_port - # therefore we need to add a check in here. - return Node( - id=data.instance_name, - name=data.instance_name, - state=data.instance_status, - public_ips=[public_ip], - private_ips=[data.ip_address], - driver=self.connection.driver, - extra={ - 'remote_desktop_port': remote_desktop_port, - 'ssh_port': ssh_port, - 'power_state': data.power_state, - 'instance_size': data.instance_size}) +class LinuxConfigurationSet(WindowsAzureData): + + def __init__(self, host_name=None, user_name=None, user_password=None, + disable_ssh_password_authentication=None): + self.configuration_set_type = u'LinuxProvisioningConfiguration' + self.host_name = host_name + self.user_name = user_name + self.user_password = user_password + self.disable_ssh_password_authentication =\ + disable_ssh_password_authentication + self.ssh = SSH() - def _to_location(self, data): - """ - Convert the data from a Azure resonse object into a location - """ - country = data.display_name +class WindowsConfigurationSet(WindowsAzureData): - if "Asia" in data.display_name: - country = "Asia" + def __init__(self, computer_name=None, admin_password=None, + reset_password_on_first_logon=None, + enable_automatic_updates=None, time_zone=None): + self.configuration_set_type = u'WindowsProvisioningConfiguration' + self.computer_name = computer_name + self.admin_password = admin_password + self.reset_password_on_first_logon = reset_password_on_first_logon + self.enable_automatic_updates = enable_automatic_updates + self.time_zone = time_zone + self.domain_join = DomainJoin() + self.stored_certificate_settings = StoredCertificateSettings() - if "Europe" in data.display_name: - country = "Europe" +class SSH(WindowsAzureData): - if "US" in data.display_name: - country = "US" + def __init__(self): + self.public_keys = PublicKeys() + self.key_pairs = KeyPairs() - if "Japan" in data.display_name: - country = "Japan" +class PublicKeys(WindowsAzureData): - if "Brazil" in data.display_name: - country = "Brazil" + def __init__(self): + self.public_keys = _list_of(PublicKey) - return NodeLocation( - id=data.name, - name=data.display_name, - country=country, - driver=self.connection.driver) + def __iter__(self): + return iter(self.public_keys) - def _to_node_size(self, data): - """ - Convert the AZURE_COMPUTE_INSTANCE_TYPES into NodeSize - """ - - return NodeSize( - id=data["id"], - name=data["name"], - ram=data["ram"], - disk=data["disk"], - bandwidth=data["bandwidth"], - price=data["price"], - driver=self.connection.driver, - extra={ - 'max_data_disks' : data["max_data_disks"], - 'cores' : data["cores"] - }) + def __len__(self): + return len(self.public_keys) - def _to_image(self, data): + def __getitem__(self, index): + return self.public_keys[index] - return NodeImage( - id=data.name, - name=data.label +class PublicKey(WindowsAzureData): + def __init__(self, fingerprint=u'', path=u''): + self.fingerprint = fingerprint + self.path = path - , - driver=self.connection.driver, - extra={ - 'os' : data.os, - 'category' : data.category, - 'description' : data.description, - 'location' : data.location, - 'affinity_group' : data.affinity_group, - 'media_link' : data.media_link - }) +class KeyPairs(WindowsAzureData): - def _to_volume(self, volume, node): + def __init__(self): + self.key_pairs = _list_of(KeyPair) - if node: - if hasattr(volume.attached_to, 'role_name'): - if volume.attached_to.role_name == node.id: - extra = {} - extra['affinity_group'] = volume.affinity_group - if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = volume.attached_to.hosted_service_name - if hasattr(volume.attached_to, 'role_name'): - extra['role_name'] = volume.attached_to.role_name - if hasattr(volume.attached_to, 'deployment_name'): - extra['deployment_name'] = volume.attached_to.deployment_name - extra['os'] = volume.os - extra['location'] = volume.location - extra['media_link'] = volume.media_link - extra['source_image_name'] = volume.source_image_name + def __iter__(self): + return iter(self.key_pairs) - return StorageVolume(id=volume.name, - name=volume.name, - size=int(volume.logical_disk_size_in_gb), - driver=self.connection.driver, - extra=extra) - else: - extra = {} - extra['affinity_group'] = volume.affinity_group - if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = volume.attached_to.hosted_service_name - if hasattr(volume.attached_to, 'role_name'): - extra['role_name'] = volume.attached_to.role_name - if hasattr(volume.attached_to, 'deployment_name'): - extra['deployment_name'] = volume.attached_to.deployment_name - extra['os'] = volume.os - extra['location'] = volume.location - extra['media_link'] = volume.media_link - extra['source_image_name'] = volume.source_image_name + def __len__(self): + return len(self.key_pairs) - return StorageVolume(id=volume.name, - name=volume.name, - size=int(volume.logical_disk_size_in_gb), - driver=self.connection.driver, - extra=extra) + def __getitem__(self, index): + return self.key_pairs[index] - def _get_deployment(self, **kwargs): - _service_name = kwargs['service_name'] - _deployment_slot = kwargs['deployment_slot'] +class KeyPair(WindowsAzureData): - return self._perform_get( - self._get_deployment_path_using_slot( - _service_name, _deployment_slot), - Deployment) + def __init__(self, fingerprint=u'', path=u''): + self.fingerprint = fingerprint + self.path = path - def _get_cloud_service_location(self, service_name=None): +class LoadBalancerProbe(WindowsAzureData): - if not service_name: - raise ValueError("service_name is required.") + def __init__(self): + self.path = u'' + self.port = u'' + self.protocol = u'' - sms = ServiceManagementService(subscription_id, certificate_path) +class ConfigurationSet(WindowsAzureData): - res = sms.get_hosted_service_properties(service_name=service_name,embed_detail=False) + def __init__(self): + self.configuration_set_type = u'' + self.role_type = u'' + self.input_endpoints = ConfigurationSetInputEndpoints() + self.subnet_names = _scalar_list_of(str, 'SubnetName') - _affinity_group = res.hosted_service_properties.affinity_group - _cloud_service_location = res.hosted_service_properties.location +class ConfigurationSetInputEndpoints(WindowsAzureData): - if _affinity_group is not None: - return self.service_location(True, _affinity_group) - elif _cloud_service_location is not None: - return self.service_location(False, _cloud_service_location) - else: - return None + def __init__(self): + self.input_endpoints = _list_of( + ConfigurationSetInputEndpoint, 'InputEndpoint') - def _is_storage_service_unique(self, service_name=None): - if not service_name: - raise ValueError("service_name is required.") + def __iter__(self): + return iter(self.input_endpoints) - sms = ServiceManagementService(subscription_id, certificate_path) - - _check_availability = sms.check_storage_account_name_availability(service_name=service_name) - - return _check_availability.result + def __len__(self): + return len(self.input_endpoints) - def _create_storage_account(self, **kwargs): - sms = ServiceManagementService(subscription_id, certificate_path) + def __getitem__(self, index): + return self.input_endpoints[index] - if kwargs['is_affinity_group'] is True: - result = sms.create_storage_account( - service_name=kwargs['service_name'], - description=kwargs['service_name'], - label=kwargs['service_name'], - affinity_group=kwargs['location']) - else: - result = sms.create_storage_account( - service_name=kwargs['service_name'], - description=kwargs['service_name'], - label=kwargs['service_name'], - location=kwargs['location']) +class ConfigurationSetInputEndpoint(WindowsAzureData): - # We need to wait for this to be created before we can - # create the storage container and the instance. + def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'', + load_balanced_endpoint_set_name=u'', + enable_direct_server_return=False): + self.enable_direct_server_return = enable_direct_server_return + self.load_balanced_endpoint_set_name = load_balanced_endpoint_set_name + self.local_port = local_port + self.name = name + self.port = port + self.load_balancer_probe = LoadBalancerProbe() + self.protocol = protocol - operation_status = sms.get_operation_status(result.request_id) +class Locations(WindowsAzureData): - timeout = 60 * 5 - waittime = 0 - interval = 5 + def __init__(self): + self.locations = _list_of(Location) - while operation_status.status == "InProgress" and waittime < timeout: - operation_status = sms.get_operation_status(result.request_id) - if operation_status.status == "Succeeded": - break + def __iter__(self): + return iter(self.locations) - waittime += interval - time.sleep(interval) - return + def __len__(self): + return len(self.locations) - def _perform_get(self, path, response_type): - request = AzureHTTPRequest() - request.method = 'GET' - request.host = azure_service_management_host - request.path = path - request.path, request.query = self._update_request_uri_query(request) - request.headers = self._update_management_header(request) - response = self._perform_request(request) + def __getitem__(self, index): + return self.locations[index] - if response_type is not None: - return self._parse_response(response, response_type) +class Location(WindowsAzureData): - return response + def __init__(self): + self.name = u'' + self.display_name = u'' + self.available_services = _scalar_list_of(str, 'AvailableService') - def _perform_post(self, path, body, response_type=None, async=False): - request = AzureHTTPRequest() - request.method = 'POST' - request.host = azure_service_management_host - request.path = path - request.body = self._get_request_body(body) - request.path, request.query = self._update_request_uri_query(request) - request.headers = self._update_management_header(request) - response = self._perform_request(request) +class Images(WindowsAzureData): - if response_type is not None: - return _parse_response(response, response_type) + def __init__(self): + self.images = _list_of(OSImage) - if async: - return self._parse_response_for_async_op(response) + def __iter__(self): + return iter(self.images) - return None + def __len__(self): + return len(self.images) - def _perform_request(self, request): + def __getitem__(self, index): + return self.images[index] - connection = self.get_connection() +class HostedServices(WindowsAzureData): - try: - connection.putrequest(request.method, request.path) + def __init__(self): + self.hosted_services = _list_of(HostedService) - self.send_request_headers(connection, request.headers) - self.send_request_body(connection, request.body) + def __iter__(self): + return iter(self.hosted_services) - resp = connection.getresponse() - status = int(resp.status) - message = resp.reason - respheader = headers = resp.getheaders() + def __len__(self): + return len(self.hosted_services) - # for consistency across platforms, make header names lowercase - for i, value in enumerate(headers): - headers[i] = (value[0].lower(), value[1]) + def __getitem__(self, index): + return self.hosted_services[index] - respbody = None - if resp.length is None: - respbody = resp.read() - elif resp.length > 0: - respbody = resp.read(resp.length) +class HostedService(WindowsAzureData): - response = AzureHTTPResponse( - int(resp.status), resp.reason, headers, respbody) - if status >= 300: - raise AzureHTTPError(status, message, - respheader, respbody) + def __init__(self): + self.url = u'' + self.service_name = u'' + self.hosted_service_properties = HostedServiceProperties() + self.deployments = Deployments() - return response +class HostedServiceProperties(WindowsAzureData): - finally: - connection.close() + def __init__(self): + self.description = u'' + self.location = u'' + self.affinity_group = u'' + self.label = _Base64String() + self.status = u'' + self.date_created = u'' + self.date_last_modified = u'' + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') - def _update_request_uri_query(self, request): - '''pulls the query string out of the URI and moves it into - the query portion of the request object. If there are already - query parameters on the request the parameters in the URI will - appear after the existing parameters''' +class Deployments(WindowsAzureData): - if '?' in request.path: - request.path, _, query_string = request.path.partition('?') - if query_string: - query_params = query_string.split('&') - for query in query_params: - if '=' in query: - name, _, value = query.partition('=') - request.query.append((name, value)) + def __init__(self): + self.deployments = _list_of(Deployment) - request.path = url_quote(request.path, '/()$=\',') + def __iter__(self): + return iter(self.deployments) - # add encoded queries to request.path. - if request.query: - request.path += '?' - for name, value in request.query: - if value is not None: - request.path += name + '=' + url_quote(value, '/()$=\',') + '&' - request.path = request.path[:-1] + def __len__(self): + return len(self.deployments) - return request.path, request.query + def __getitem__(self, index): + return self.deployments[index] - def _update_management_header(self, request): - ''' Add additional headers for management. ''' +class Deployment(WindowsAzureData): - if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: - request.headers.append(('Content-Length', str(len(request.body)))) + def __init__(self): + self.name = u'' + self.deployment_slot = u'' + self.private_id = u'' + self.status = u'' + self.label = _Base64String() + self.url = u'' + self.configuration = _Base64String() + self.role_instance_list = RoleInstanceList() + self.upgrade_status = UpgradeStatus() + self.upgrade_domain_count = u'' + self.role_list = RoleList() + self.sdk_version = u'' + self.input_endpoint_list = InputEndpoints() + self.locked = False + self.rollback_allowed = False + self.persistent_vm_downtime_info = PersistentVMDowntimeInfo() + self.created_time = u'' + self.last_modified_time = u'' + self.extended_properties = _dict_of( + 'ExtendedProperty', 'Name', 'Value') - # append additional headers base on the service - request.headers.append(('x-ms-version', X_MS_VERSION)) +class RoleInstanceList(WindowsAzureData): - # if it is not GET or HEAD request, must set content-type. - if not request.method in ['GET', 'HEAD']: - for name, _ in request.headers: - if 'content-type' == name.lower(): - break - else: - request.headers.append( - ('Content-Type', - 'application/atom+xml;type=entry;charset=utf-8')) + def __init__(self): + self.role_instances = _list_of(RoleInstance) - return request.headers + def __iter__(self): + return iter(self.role_instances) - def send_request_headers(self, connection, request_headers): - for name, value in request_headers: - if value: - connection.putheader(name, value) + def __len__(self): + return len(self.role_instances) - connection.putheader('User-Agent', _USER_AGENT_STRING) - connection.endheaders() + def __getitem__(self, index): + return self.role_instances[index] - def send_request_body(self, connection, request_body): - if request_body: - assert isinstance(request_body, bytes) - connection.send(request_body) - elif (not isinstance(connection, HTTPSConnection) and - not isinstance(connection, HTTPConnection)): - connection.send(None) +class RoleInstance(WindowsAzureData): - def _parse_response(self, response, return_type): - ''' - Parse the HTTPResponse's body and fill all the data into a class of - return_type. - ''' - return self._parse_response_body_from_xml_text(response.body, return_type) + def __init__(self): + self.role_name = u'' + self.instance_name = u'' + self.instance_status = u'' + self.instance_upgrade_domain = 0 + self.instance_fault_domain = 0 + self.instance_size = u'' + self.instance_state_details = u'' + self.instance_error_code = u'' + self.ip_address = u'' + self.instance_endpoints = InstanceEndpoints() + self.power_state = u'' + self.fqdn = u'' + self.host_name = u'' - def _parse_response_body_from_xml_text(self, respbody, return_type): - ''' - parse the xml and fill all the data into a class of return_type - ''' - doc = minidom.parseString(respbody) - return_obj = return_type() - for node in self._get_child_nodes(doc, return_type.__name__): - self._fill_data_to_return_object(node, return_obj) +class InstanceEndpoints(WindowsAzureData): - return return_obj + def __init__(self): + self.instance_endpoints = _list_of(InstanceEndpoint) - def _get_child_nodes(self, node, tagName): - return [childNode for childNode in node.getElementsByTagName(tagName) - if childNode.parentNode == node] + def __iter__(self): + return iter(self.instance_endpoints) - def _fill_data_to_return_object(self, node, return_obj): - members = dict(vars(return_obj)) - for name, value in members.items(): - if isinstance(value, _list_of): - setattr(return_obj, - name, - self._fill_list_of(node, - value.list_type, - value.xml_element_name)) - elif isinstance(value, _scalar_list_of): - setattr(return_obj, - name, - self._fill_scalar_list_of(node, - value.list_type, - self._get_serialization_name(name), - value.xml_element_name)) - elif isinstance(value, _dict_of): - setattr(return_obj, - name, - self._fill_dict_of(node, - self._get_serialization_name(name), - value.pair_xml_element_name, - value.key_xml_element_name, - value.value_xml_element_name)) - elif isinstance(value, WindowsAzureData): - setattr(return_obj, - name, - self._fill_instance_child(node, name, value.__class__)) - elif isinstance(value, dict): - setattr(return_obj, - name, - self._fill_dict(node, self._get_serialization_name(name))) - elif isinstance(value, _Base64String): - value = self._fill_data_minidom(node, name, '') - if value is not None: - value = self._decode_base64_to_text(value) - # always set the attribute, so we don't end up returning an object - # with type _Base64String - setattr(return_obj, name, value) - else: - value = self._fill_data_minidom(node, name, value) - if value is not None: - setattr(return_obj, name, value) + def __len__(self): + return len(self.instance_endpoints) - def _fill_list_of(self, xmldoc, element_type, xml_element_name): - xmlelements = self._get_child_nodes(xmldoc, xml_element_name) - return [self._parse_response_body_from_xml_node(xmlelement, element_type) \ - for xmlelement in xmlelements] + def __getitem__(self, index): + return self.instance_endpoints[index] - def _parse_response_body_from_xml_node(self, node, return_type): - ''' - parse the xml and fill all the data into a class of return_type - ''' - return_obj = return_type() - self._fill_data_to_return_object(node, return_obj) +class InstanceEndpoint(WindowsAzureData): - return return_obj + def __init__(self): + self.name = u'' + self.vip = u'' + self.public_port = u'' + self.local_port = u'' + self.protocol = u'' - def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, - xml_element_name): - xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) - if xmlelements: - xmlelements = self._get_child_nodes(xmlelements[0], xml_element_name) - return [self._get_node_value(xmlelement, element_type) \ - for xmlelement in xmlelements] +class InputEndpoints(WindowsAzureData): - def _get_node_value(self, xmlelement, data_type): - value = xmlelement.firstChild.nodeValue - if data_type is datetime: - return _to_datetime(value) - elif data_type is bool: - return value.lower() != 'false' - else: - return data_type(value) + def __init__(self): + self.input_endpoints = _list_of(InputEndpoint) - def _get_serialization_name(self,element_name): - """converts a Python name into a serializable name""" - known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) - if known is not None: - return known + def __iter__(self): + return iter(self.input_endpoints) - if element_name.startswith('x_ms_'): - return element_name.replace('_', '-') - if element_name.endswith('_id'): - element_name = element_name.replace('_id', 'ID') - for name in ['content_', 'last_modified', 'if_', 'cache_control']: - if element_name.startswith(name): - element_name = element_name.replace('_', '-_') + def __len__(self): + return len(self.input_endpoints) - return ''.join(name.capitalize() for name in element_name.split('_')) + def __getitem__(self, index): + return self.input_endpoints[index] - def _fill_dict_of(self, xmldoc, parent_xml_element_name, pair_xml_element_name, - key_xml_element_name, value_xml_element_name): - return_obj = {} +class InputEndpoint(WindowsAzureData): - xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) - if xmlelements: - xmlelements = self._get_child_nodes(xmlelements[0], pair_xml_element_name) - for pair in xmlelements: - keys = self._get_child_nodes(pair, key_xml_element_name) - values = self._get_child_nodes(pair, value_xml_element_name) - if keys and values: - key = keys[0].firstChild.nodeValue - value = values[0].firstChild.nodeValue - return_obj[key] = value + def __init__(self): + self.role_name = u'' + self.vip = u'' + self.port = u'' - return return_obj +class RoleList(WindowsAzureData): - def _fill_instance_child(self, xmldoc, element_name, return_type): - '''Converts a child of the current dom element to the specified type. - ''' - xmlelements = self._get_child_nodes( - xmldoc, self._get_serialization_name(element_name)) + def __init__(self): + self.roles = _list_of(Role) - if not xmlelements: - return None + def __iter__(self): + return iter(self.roles) - return_obj = return_type() - self._fill_data_to_return_object(xmlelements[0], return_obj) + def __len__(self): + return len(self.roles) - return return_obj + def __getitem__(self, index): + return self.roles[index] - def _fill_dict(self, xmldoc, element_name): - xmlelements = self._get_child_nodes(xmldoc, element_name) - if xmlelements: - return_obj = {} - for child in xmlelements[0].childNodes: - if child.firstChild: - return_obj[child.nodeName] = child.firstChild.nodeValue - return return_obj +class Role(WindowsAzureData): - def _encode_base64(dself, ata): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - encoded = base64.b64encode(data) - return encoded.decode('utf-8') + def __init__(self): + self.role_name = u'' + self.os_version = u'' - def _decode_base64_to_bytes(self, data): - if isinstance(data, _unicode_type): - data = data.encode('utf-8') - return base64.b64decode(data) +class PersistentVMDowntimeInfo(WindowsAzureData): - def _decode_base64_to_text(self, data): - decoded_bytes = self._decode_base64_to_bytes(data) - return decoded_bytes.decode('utf-8') + def __init__(self): + self.start_time = u'' + self.end_time = u'' + self.status = u'' - def _fill_data_minidom(self, xmldoc, element_name, data_member): - xmlelements = self._get_child_nodes( - xmldoc, self._get_serialization_name(element_name)) +class AsynchronousOperationResult(WindowsAzureData): - if not xmlelements or not xmlelements[0].childNodes: - return None + def __init__(self, request_id=None): + self.request_id = request_id - value = xmlelements[0].firstChild.nodeValue +class Disks(WindowsAzureData): - if data_member is None: - return value - elif isinstance(data_member, datetime): - return self._to_datetime(value) - elif type(data_member) is bool: - return value.lower() != 'false' - else: - return type(data_member)(value) + def __init__(self): + self.disks = _list_of(Disk) - def _to_datetime(self, strtime): - return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f") + def __iter__(self): + return iter(self.disks) - def _get_request_body(self, request_body): - if request_body is None: - return b'' + def __len__(self): + return len(self.disks) - if isinstance(request_body, WindowsAzureData): - request_body = _convert_class_to_xml(request_body) + def __getitem__(self, index): + return self.disks[index] - if isinstance(request_body, bytes): - return request_body +class Disk(WindowsAzureData): - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') + def __init__(self): + self.affinity_group = u'' + self.attached_to = AttachedTo() + self.has_operating_system = u'' + self.is_corrupted = u'' + self.location = u'' + self.logical_disk_size_in_gb = 0 + self.label = u'' + self.media_link = u'' + self.name = u'' + self.os = u'' + self.source_image_name = u'' - request_body = str(request_body) - if isinstance(request_body, _unicode_type): - return request_body.encode('utf-8') +class AttachedTo(WindowsAzureData): - return request_body + def __init__(self): + self.hosted_service_name = u'' + self.deployment_name = u'' + self.role_name = u'' - def _convert_class_to_xml(self, source, xml_prefix=True): - if source is None: - return '' +class AzureHTTPRequest(object): + def __init__(self): + self.host = '' + self.method = '' + self.path = '' + self.query = [] # list of (name, value) + self.headers = [] # list of (header name, header value) + self.body = '' + self.protocol_override = None - xmlstr = '' - if xml_prefix: - xmlstr = '' +class AzureHTTPResponse(object): + def __init__(self, status, message, headers, body): + self.status = status + self.message = message + self.headers = headers + self.body = body - if isinstance(source, list): - for value in source: - xmlstr += _convert_class_to_xml(value, False) - elif isinstance(source, WindowsAzureData): - class_name = source.__class__.__name__ - xmlstr += '<' + class_name + '>' - for name, value in vars(source).items(): - if value is not None: - if isinstance(value, list) or \ - isinstance(value, WindowsAzureData): - xmlstr += _convert_class_to_xml(value, False) - else: - xmlstr += ('<' + self._get_serialization_name(name) + '>' + - xml_escape(str(value)) + '') - xmlstr += '' - return xmlstr +class AzureHTTPError(Exception): + def __init__(self, status, message, respheader, respbody): + '''Creates a new HTTPError with the specified status, message, + response headers and body''' + self.status = status + self.respheader = respheader + self.respbody = respbody + Exception.__init__(self, message) - def _parse_response_for_async_op(self, response): - if response is None: - return None +""" +Helpers - result = AsynchronousOperationResult() - if response.headers: - for name, value in response.headers: - if name.lower() == 'x-ms-request-id': - result.request_id = value +""" - return result +class _Base64String(str): + pass - def _get_deployment_path_using_name(self, service_name, - deployment_name=None): - return self._get_path('services/hostedservices/' + _str(service_name) + - '/deployments', deployment_name) +class _list_of(list): - def _get_path(self, resource, name): - path = '/' + subscription_id + '/' + resource - if name is not None: - path += '/' + _str(name) - return path + """a list which carries with it the type that's expected to go in it. + Used for deserializaion and construction of the lists""" - def _lower(self, text): - return text.lower() + def __init__(self, list_type, xml_element_name=None): + self.list_type = list_type + if xml_element_name is None: + self.xml_element_name = list_type.__name__ + else: + self.xml_element_name = xml_element_name + super(_list_of, self).__init__() - def _get_image_path(self, image_name=None): - return self._get_path('services/images', image_name) +class _scalar_list_of(list): - def _get_hosted_service_path(self, service_name=None): - return self._get_path('services/hostedservices', service_name) + """a list of scalar types which carries with it the type that's + expected to go in it along with its xml element name. + Used for deserializaion and construction of the lists""" - def _get_deployment_path_using_slot(self, service_name, slot=None): - return self._get_path('services/hostedservices/' + _str(service_name) + - '/deploymentslots', slot) + def __init__(self, list_type, xml_element_name): + self.list_type = list_type + self.xml_element_name = xml_element_name + super(_scalar_list_of, self).__init__() - def get_connection(self): - certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" - port = HTTPS_PORT +class _dict_of(dict): - connection = HTTPSConnection( - azure_service_management_host, - int(port), - cert_file=certificate_path) + """a dict which carries with it the xml element names for key,val. + Used for deserializaion and construction of the lists""" - return connection \ No newline at end of file + def __init__(self, pair_xml_element_name, key_xml_element_name, + value_xml_element_name): + self.pair_xml_element_name = pair_xml_element_name + self.key_xml_element_name = key_xml_element_name + self.value_xml_element_name = value_xml_element_name + super(_dict_of, self).__init__() \ No newline at end of file From c2c894fb2ef349e1e731ab5355c17354f485e07c Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 20 May 2014 12:52:04 -0700 Subject: [PATCH 279/315] Re-factored destroy_node to not rely on the azure sdk library. --- libcloud/compute/drivers/azure_compute.py | 46 ++++++++++++++++++----- 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index 75b7e7fe5e..e644acb429 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -179,7 +179,7 @@ def _str(value): __version__ = '0.8.0' _USER_AGENT_STRING = 'pyazure/' + __version__ -X_MS_VERSION = '2012-03-01' +X_MS_VERSION = '2013-08-01' class AzureConnection(ConnectionUserAndKey): @@ -550,19 +550,26 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None _server_deployment_count = len(_deployment.role_instance_list) - sms = ServiceManagementService(subscription_id, certificate_path) - try: if _server_deployment_count > 1: - data = sms.delete_role(service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_name=node.id, - delete_attached_disks=True) + path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id) + path += '?comp=media' # forces deletion of attached disks + + data = self._perform_delete(path, async=True) + return True else: - data = sms.delete_deployment(service_name=ex_cloud_service_name,deployment_name=_deployment_name,delete_attached_disks=True) + path = self._get_deployment_path_using_name( + ex_cloud_service_name, + _deployment_name) + + path += '?comp=media' + + data = self._perform_delete(path,async=True) + return True - except Exception: + except Exception, e: + print e return False """ Functions not implemented @@ -831,13 +838,27 @@ def _perform_post(self, path, body, response_type=None, async=False): response = self._perform_request(request) if response_type is not None: - return _parse_response(response, response_type) + return self._parse_response(response, response_type) if async: return self._parse_response_for_async_op(response) return None + def _perform_delete(self, path, async=False): + request = AzureHTTPRequest() + request.method = 'DELETE' + request.host = azure_service_management_host + request.path = path + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + if async: + return self._parse_response_for_async_op(response) + + return None + def _perform_request(self, request): connection = self.get_connection() @@ -1211,6 +1232,11 @@ def _get_deployment_path_using_slot(self, service_name, slot=None): def _get_disk_path(self, disk_name=None): return self._get_path('services/disks', disk_name) + def _get_role_path(self, service_name, deployment_name, role_name=None): + return self._get_path('services/hostedservices/' + _str(service_name) + + '/deployments/' + deployment_name + + '/roles', role_name) + def get_connection(self): certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" port = HTTPS_PORT From d58683e521228494abd3bbdd45aec64c9e2f5976 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 20 May 2014 15:06:25 -0700 Subject: [PATCH 280/315] Re-factored create_node to not rely on the azure sdk library. All functions are now refactored and the import of that library is not required anymore. Onto testing. --- libcloud/compute/drivers/azure_compute.py | 166 +++++++++++++++------- 1 file changed, 114 insertions(+), 52 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index e644acb429..8fd82e8175 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -26,9 +26,6 @@ import copy import base64 -from azure import * -from azure.servicemanagement import * - from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot @@ -194,7 +191,6 @@ class AzureNodeDriver(NodeDriver): _blob_url = ".blob.core.windows.net" features = {'create_node': ['password']} service_location = collections.namedtuple('service_location',['is_affinity_group', 'service_location']) - sms = ServiceManagementService(subscription_id, certificate_path) def list_sizes(self): """ @@ -364,8 +360,6 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): auth = self._get_and_check_auth(kwargs["auth"]) password = auth.password - sms = ServiceManagementService(subscription_id, certificate_path) - if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") @@ -470,18 +464,23 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image, media_link) - - result = sms.create_virtual_machine_deployment( - service_name=ex_cloud_service_name, - deployment_name=ex_deployment_name, - deployment_slot=ex_deployment_slot, - label=name, - role_name=name, - system_config=machine_config, - os_virtual_hard_disk=disk_config, - network_config=network_config, - role_size=size - ) + + result = self._perform_post( + self._get_deployment_path_using_name(ex_cloud_service_name), + AzureXmlSerializer.virtual_machine_deployment_to_xml( + ex_deployment_name, + ex_deployment_slot, + name, + name, + machine_config, + disk_config, + 'PersistentVMRole', + network_config, + None, + None, + size, + None), + async=True) else: _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name @@ -503,15 +502,19 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image, media_link) - result = self.sms.add_role( - service_name=ex_cloud_service_name, - deployment_name=_deployment_name, - role_name=name, - system_config=machine_config, - os_virtual_hard_disk=disk_config, - network_config=network_config, - role_size=size - ) + result = self._perform_post( + self._get_role_path(ex_cloud_service_name, + _deployment_name), + AzureXmlSerializer.add_role_to_xml( + name, # role_name + machine_config, # system_config + disk_config, # os_virtual_hard_disk + 'PersistentVMRole', # role_type + network_config, # network_config + None, # availability_set_name + None, # data_virtual_hard_disks + size), # role_size + async=True) return Node( id=name, @@ -755,9 +758,10 @@ def _get_cloud_service_location(self, service_name=None): if not service_name: raise ValueError("service_name is required.") - sms = ServiceManagementService(subscription_id, certificate_path) - - res = sms.get_hosted_service_properties(service_name=service_name,embed_detail=False) + res = self._perform_get( + self._get_hosted_service_path(service_name) + + '?embed-detail=False', + HostedService) _affinity_group = res.hosted_service_properties.affinity_group _cloud_service_location = res.hosted_service_properties.location @@ -773,39 +777,51 @@ def _is_storage_service_unique(self, service_name=None): if not service_name: raise ValueError("service_name is required.") - sms = ServiceManagementService(subscription_id, certificate_path) - - _check_availability = sms.check_storage_account_name_availability(service_name=service_name) - + _check_availability = self._perform_get( + self._get_storage_service_path() + + '/operations/isavailable/' + + _str(service_name) + '', + AvailabilityResponse) + return _check_availability.result def _create_storage_account(self, **kwargs): - sms = ServiceManagementService(subscription_id, certificate_path) - if kwargs['is_affinity_group'] is True: - result = sms.create_storage_account( - service_name=kwargs['service_name'], - description=kwargs['service_name'], - label=kwargs['service_name'], - affinity_group=kwargs['location']) + result = self._perform_post( + self._get_storage_service_path(), + AzureXmlSerializer.create_storage_service_input_to_xml( + kwargs['service_name'], + kwargs['service_name'], + kwargs['service_name'], + kwargs['location'], + None, # Location + True, # geo_replication_enabled + None), # extended_properties + async=True) else: - result = sms.create_storage_account( - service_name=kwargs['service_name'], - description=kwargs['service_name'], - label=kwargs['service_name'], - location=kwargs['location']) + result = self._perform_post( + self._get_storage_service_path(), + AzureXmlSerializer.create_storage_service_input_to_xml( + kwargs['service_name'], + kwargs['service_name'], + kwargs['service_name'], + None, # Affinity Group + kwargs['location'], # Location + True, # geo_replication_enabled + None), # extended_properties + async=True) # We need to wait for this to be created before we can # create the storage container and the instance. - operation_status = sms.get_operation_status(result.request_id) + operation_status = self._get_operation_status(result.request_id) timeout = 60 * 5 waittime = 0 interval = 5 while operation_status.status == "InProgress" and waittime < timeout: - operation_status = sms.get_operation_status(result.request_id) + operation_status = self._get_operation_status(result.request_id) if operation_status.status == "Succeeded": break @@ -813,6 +829,11 @@ def _create_storage_account(self, **kwargs): time.sleep(interval) return + def _get_operation_status(self, request_id): + return self._perform_get( + '/' + subscription_id + '/operations/' + _str(request_id), + Operation) + def _perform_get(self, path, response_type): request = AzureHTTPRequest() request.method = 'GET' @@ -1216,9 +1237,6 @@ def _get_path(self, resource, name): path += '/' + _str(name) return path - def _lower(self, text): - return text.lower() - def _get_image_path(self, image_name=None): return self._get_path('services/images', image_name) @@ -1237,6 +1255,9 @@ def _get_role_path(self, service_name, deployment_name, role_name=None): '/deployments/' + deployment_name + '/roles', role_name) + def _get_storage_service_path(self, service_name=None): + return self._get_path('services/storageservices', service_name) + def get_connection(self): certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" port = HTTPS_PORT @@ -1251,6 +1272,9 @@ def get_connection(self): """ XML Serializer """ +def _lower(text): + return text.lower() + class AzureXmlSerializer(): @staticmethod @@ -1488,6 +1512,9 @@ def windows_configuration_to_xml(configuration): ('Thumbprint', cert.thumbprint)]) xml += '' xml += '' + + #xml += AzureXmlSerializer.data_to_xml( + # [('AdminUsername', configuration.admin_user_name)]) return xml @staticmethod @@ -1765,7 +1792,7 @@ def extended_properties_dict_to_xml_fragment(extended_properties): '', '']) xml += '' - return xm + return xml """ Data Classes @@ -1867,6 +1894,20 @@ def __init__(self): self.port = u'' self.protocol = u'' +class ConfigurationSets(WindowsAzureData): + + def __init__(self): + self.configuration_sets = _list_of(ConfigurationSet) + + def __iter__(self): + return iter(self.configuration_sets) + + def __len__(self): + return len(self.configuration_sets) + + def __getitem__(self, index): + return self.configuration_sets[index] + class ConfigurationSet(WindowsAzureData): def __init__(self): @@ -2011,6 +2052,13 @@ def __init__(self): self.extended_properties = _dict_of( 'ExtendedProperty', 'Name', 'Value') +class UpgradeStatus(WindowsAzureData): + + def __init__(self): + self.upgrade_type = u'' + self.current_upgrade_domain_state = u'' + self.current_upgrade_domain = u'' + class RoleInstanceList(WindowsAzureData): def __init__(self): @@ -2154,6 +2202,20 @@ def __init__(self): self.deployment_name = u'' self.role_name = u'' +class OperationError(WindowsAzureData): + + def __init__(self): + self.code = u'' + self.message = u'' + +class Operation(WindowsAzureData): + + def __init__(self): + self.id = u'' + self.status = u'' + self.http_status_code = u'' + self.error = OperationError() + class AzureHTTPRequest(object): def __init__(self): self.host = '' From e09449dd29921d92a6ab372e4afc6a8d54cbb784 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 20 May 2014 15:07:56 -0700 Subject: [PATCH 281/315] incremented the version --- libcloud/compute/drivers/azure_compute.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index 8fd82e8175..e5e0cb0f4b 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -14,7 +14,7 @@ # limitations under the License. """ Azure Compute driver -Version: 1.0 +Version: 2.0 """ import uuid import re From dff91c830fcf9a48c63fef477300cdf8b37b2e4f Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 20 May 2014 19:31:44 -0700 Subject: [PATCH 282/315] Added Libcloud HTTP Error responses, node status map, cleaning things up. --- libcloud/compute/drivers/azure_compute.py | 247 ++++++++++++++++++---- 1 file changed, 205 insertions(+), 42 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index e5e0cb0f4b..a20fb22d1a 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -12,9 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -""" -Azure Compute driver -Version: 2.0 +"""Azure Compute driver + """ import uuid import re @@ -31,6 +30,7 @@ from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot from libcloud.compute.base import KeyPair, NodeAuthPassword from libcloud.compute.types import NodeState, KeyPairDoesNotExistError +from libcloud.common.types import LibcloudError from libcloud.common.base import ConnectionUserAndKey from datetime import datetime @@ -38,7 +38,6 @@ from xml.sax.saxutils import escape as xml_escape from httplib import ( HTTPSConnection, - HTTP_PORT, HTTPS_PORT, ) @@ -169,16 +168,16 @@ def _str(value): 'copy_id': 'CopyId', } +# To be removed once auth has been refactored. subscription_id = "aff4792f-fc2c-4fa8-88f4-bab437747469" certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" azure_service_management_host = 'management.core.windows.net' -__version__ = '0.8.0' -_USER_AGENT_STRING = 'pyazure/' + __version__ +__version__ = '1.0.0' +_USER_AGENT_STRING = 'libcloudazurecompute/' + __version__ X_MS_VERSION = '2013-08-01' - class AzureConnection(ConnectionUserAndKey): """AzureConnection @@ -192,6 +191,26 @@ class AzureNodeDriver(NodeDriver): features = {'create_node': ['password']} service_location = collections.namedtuple('service_location',['is_affinity_group', 'service_location']) + NODE_STATE_MAP = { + 'RoleStateUnknown': NodeState.UNKNOWN, + 'CreatingVM': NodeState.PENDING, + 'StartingVM': NodeState.PENDING, + 'CreatingRole': NodeState.PENDING, + 'StartingRole': NodeState.PENDING, + 'ReadyRole': NodeState.RUNNING, + 'BusyRole': NodeState.PENDING, + 'StoppingRole': NodeState.PENDING, + 'StoppingVM': NodeState.PENDING, + 'DeletingVM': NodeState.PENDING, + 'StoppedVM': NodeState.STOPPED, + 'RestartingRole': NodeState.REBOOTING, + 'CyclingRole': NodeState.TERMINATED, + 'FailedStartingRole': NodeState.TERMINATED, + 'FailedStartingVM': NodeState.TERMINATED, + 'UnresponsiveRole': NodeState.TERMINATED, + 'StoppedDeallocated': NodeState.TERMINATED, + } + def list_sizes(self): """ Lists all sizes @@ -337,7 +356,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): :type ex_cloud_service_name: ``str`` :keyword ex_storage_service_name: Optional: Name of the Azure Storage Service. - :type ex_cloud_service_name: ``str`` + :type ex_storage_service_name: ``str`` :keyword ex_deployment_name: Optional. The name of the deployment. If this is not passed in we default to @@ -346,10 +365,10 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): :keyword ex_deployment_slot: Optional: Valid values: production|staging. Defaults to production. - :type ex_cloud_service_name: ``str`` + :type ex_deployment_slot: ``str`` - :keyword ex_linux_user_id: Optional. Defaults to 'azureuser'. - :type ex_cloud_service_name: ``str`` + :keyword ex_admin_user_id: Optional. Defaults to 'azureuser'. + :type ex_admin_user_id: ``str`` """ name = kwargs['name'] @@ -368,11 +387,11 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): else: ex_deployment_slot = "production" # We assume production if this is not provided. - if "ex_linux_user_id" in kwargs: - ex_linux_user_id = kwargs['ex_linux_user_id'] + if "ex_admin_user_id" in kwargs: + ex_admin_user_id = kwargs['ex_admin_user_id'] else: # This mimics the Azure UI behavior. - ex_linux_user_id = "azureuser" + ex_admin_user_id = "azureuser" node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) network_config = ConfigurationSet() @@ -381,7 +400,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): # We do this because we need to pass a Configuration to the # method. This will be either Linux or Windows. if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", image, re.I): - machine_config = WindowsConfigurationSet(name, password) + machine_config = WindowsConfigurationSet(computer_name=name, admin_password=password, admin_user_name=ex_admin_user_id) machine_config.domain_join = None if node_list is None: @@ -429,7 +448,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): load_balanced_endpoint_set_name=None, enable_direct_server_return=False ) - machine_config = LinuxConfigurationSet(name, ex_linux_user_id, password, False) + machine_config = LinuxConfigurationSet(name, ex_admin_user_id, password, False) network_config.input_endpoints.input_endpoints.append(endpoint) @@ -626,12 +645,11 @@ def _to_node(self, data): else: ssh_port = [] - # When servers are Linux, this fails due to the remote_desktop_port - # therefore we need to add a check in here. return Node( - id=data.instance_name, - name=data.instance_name, - state=data.instance_status, + id=data.role_name, + name=data.role_name, + state=self.NODE_STATE_MAP.get( + data.instance_status, NodeState.UNKNOWN), public_ips=[public_ip], private_ips=[data.ip_address], driver=self.connection.driver, @@ -908,8 +926,8 @@ def _perform_request(self, request): response = AzureHTTPResponse( int(resp.status), resp.reason, headers, respbody) if status >= 300: - raise AzureHTTPError(status, message, - respheader, respbody) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (message, respbody, status), + driver=self) return response @@ -1269,8 +1287,9 @@ def get_connection(self): return connection -""" -XML Serializer +"""XML Serializer + +Borrowed from the Azure SDK for Python. """ def _lower(text): return text.lower() @@ -1513,8 +1532,8 @@ def windows_configuration_to_xml(configuration): xml += '' xml += '' - #xml += AzureXmlSerializer.data_to_xml( - # [('AdminUsername', configuration.admin_user_name)]) + xml += AzureXmlSerializer.data_to_xml( + [('AdminUsername', configuration.admin_user_name)]) return xml @staticmethod @@ -1794,8 +1813,9 @@ def extended_properties_dict_to_xml_fragment(extended_properties): xml += '' return xml -""" -Data Classes +"""Data Classes + +Borrowed from the Azure SDK for Python. """ class WindowsAzureData(object): @@ -1831,16 +1851,66 @@ class WindowsConfigurationSet(WindowsAzureData): def __init__(self, computer_name=None, admin_password=None, reset_password_on_first_logon=None, - enable_automatic_updates=None, time_zone=None): + enable_automatic_updates=None, time_zone=None, admin_user_name=None): self.configuration_set_type = u'WindowsProvisioningConfiguration' self.computer_name = computer_name self.admin_password = admin_password self.reset_password_on_first_logon = reset_password_on_first_logon self.enable_automatic_updates = enable_automatic_updates self.time_zone = time_zone + self.admin_user_name = admin_user_name self.domain_join = DomainJoin() self.stored_certificate_settings = StoredCertificateSettings() +class DomainJoin(WindowsAzureData): + + def __init__(self): + self.credentials = Credentials() + self.join_domain = u'' + self.machine_object_ou = u'' + +class Credentials(WindowsAzureData): + + def __init__(self): + self.domain = u'' + self.username = u'' + self.password = u'' + +class StoredCertificateSettings(WindowsAzureData): + + def __init__(self): + self.stored_certificate_settings = _list_of(CertificateSetting) + + def __iter__(self): + return iter(self.stored_certificate_settings) + + def __len__(self): + return len(self.stored_certificate_settings) + + def __getitem__(self, index): + return self.stored_certificate_settings[index] + +class CertificateSetting(WindowsAzureData): + + ''' + Initializes a certificate setting. + + thumbprint: + Specifies the thumbprint of the certificate to be provisioned. The + thumbprint must specify an existing service certificate. + store_name: + Specifies the name of the certificate store from which retrieve + certificate. + store_location: + Specifies the target certificate store location on the virtual machine. + The only supported value is LocalMachine. + ''' + + def __init__(self, thumbprint=u'', store_name=u'', store_location=u''): + self.thumbprint = thumbprint + self.store_name = store_name + self.store_location = store_location + class SSH(WindowsAzureData): def __init__(self): @@ -1979,6 +2049,20 @@ def __len__(self): def __getitem__(self, index): return self.images[index] +class OSImage(WindowsAzureData): + + def __init__(self): + self.affinity_group = u'' + self.category = u'' + self.location = u'' + self.logical_size_in_gb = 0 + self.label = u'' + self.media_link = u'' + self.name = u'' + self.os = u'' + self.eula = u'' + self.description = u'' + class HostedServices(WindowsAzureData): def __init__(self): @@ -2216,6 +2300,96 @@ def __init__(self): self.http_status_code = u'' self.error = OperationError() +class OperatingSystem(WindowsAzureData): + + def __init__(self): + self.version = u'' + self.label = _Base64String() + self.is_default = True + self.is_active = True + self.family = 0 + self.family_label = _Base64String() + +class OperatingSystems(WindowsAzureData): + + def __init__(self): + self.operating_systems = _list_of(OperatingSystem) + + def __iter__(self): + return iter(self.operating_systems) + + def __len__(self): + return len(self.operating_systems) + + def __getitem__(self, index): + return self.operating_systems[index] + +class OperatingSystemFamily(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.label = _Base64String() + self.operating_systems = OperatingSystems() + +class OperatingSystemFamilies(WindowsAzureData): + + def __init__(self): + self.operating_system_families = _list_of(OperatingSystemFamily) + + def __iter__(self): + return iter(self.operating_system_families) + + def __len__(self): + return len(self.operating_system_families) + + def __getitem__(self, index): + return self.operating_system_families[index] + +class Subscription(WindowsAzureData): + + def __init__(self): + self.subscription_id = u'' + self.subscription_name = u'' + self.subscription_status = u'' + self.account_admin_live_email_id = u'' + self.service_admin_live_email_id = u'' + self.max_core_count = 0 + self.max_storage_accounts = 0 + self.max_hosted_services = 0 + self.current_core_count = 0 + self.current_hosted_services = 0 + self.current_storage_accounts = 0 + self.max_virtual_network_sites = 0 + self.max_local_network_sites = 0 + self.max_dns_servers = 0 + +class AvailabilityResponse(WindowsAzureData): + + def __init__(self): + self.result = False + +class SubscriptionCertificates(WindowsAzureData): + + def __init__(self): + self.subscription_certificates = _list_of(SubscriptionCertificate) + + def __iter__(self): + return iter(self.subscription_certificates) + + def __len__(self): + return len(self.subscription_certificates) + + def __getitem__(self, index): + return self.subscription_certificates[index] + +class SubscriptionCertificate(WindowsAzureData): + + def __init__(self): + self.subscription_certificate_public_key = u'' + self.subscription_certificate_thumbprint = u'' + self.subscription_certificate_data = u'' + self.created = u'' + class AzureHTTPRequest(object): def __init__(self): self.host = '' @@ -2233,18 +2407,7 @@ def __init__(self, status, message, headers, body): self.headers = headers self.body = body -class AzureHTTPError(Exception): - def __init__(self, status, message, respheader, respbody): - '''Creates a new HTTPError with the specified status, message, - response headers and body''' - self.status = status - self.respheader = respheader - self.respbody = respbody - Exception.__init__(self, message) - -""" -Helpers - +"""Helper Functions """ class _Base64String(str): From 4519b7cb5aec3ffdb807690571ddd385466b2a84 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Wed, 21 May 2014 13:03:29 -0700 Subject: [PATCH 283/315] general clean-up --- libcloud/compute/drivers/azure_compute.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index a20fb22d1a..6e713dbcc6 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -298,7 +298,6 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) ex_deployment_slot = "production" _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name - print _deployment_name try: result = self._perform_post( @@ -591,7 +590,6 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None return True except Exception, e: - print e return False """ Functions not implemented @@ -652,7 +650,7 @@ def _to_node(self, data): data.instance_status, NodeState.UNKNOWN), public_ips=[public_ip], private_ips=[data.ip_address], - driver=self.connection.driver, + driver=AzureNodeDriver, extra={ 'remote_desktop_port': remote_desktop_port, 'ssh_port': ssh_port, From d75fbd7bbb816d5e003fa0185fc39ffdde983449 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Wed, 21 May 2014 13:17:07 -0700 Subject: [PATCH 284/315] general clean-up --- libcloud/compute/drivers/azure_compute.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure_compute.py index 6e713dbcc6..8597579e3a 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure_compute.py @@ -62,6 +62,16 @@ def _str(value): _str = str _unicode_type = str +__version__ = '1.0.0' + +# To be removed once auth has been refactored. +subscription_id = "aff4792f-fc2c-4fa8-88f4-bab437747469" +certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" + +azure_service_management_host = 'management.core.windows.net' +_USER_AGENT_STRING = 'libcloudazurecompute/' + __version__ +X_MS_VERSION = '2013-08-01' + """ Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx @@ -168,16 +178,6 @@ def _str(value): 'copy_id': 'CopyId', } -# To be removed once auth has been refactored. -subscription_id = "aff4792f-fc2c-4fa8-88f4-bab437747469" -certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" - -azure_service_management_host = 'management.core.windows.net' - -__version__ = '1.0.0' -_USER_AGENT_STRING = 'libcloudazurecompute/' + __version__ -X_MS_VERSION = '2013-08-01' - class AzureConnection(ConnectionUserAndKey): """AzureConnection From 2c0691e46c0aa1a9b7a00019c1b753e2082e76ea Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Fri, 23 May 2014 17:17:12 +0100 Subject: [PATCH 285/315] added connection --- libcloud/common/azure.py | 58 ++++++++++++- libcloud/common/base.py | 31 ++++++- .../drivers/{azure_compute.py => azure.py} | 84 +++++++------------ libcloud/compute/providers.py | 2 + libcloud/compute/types.py | 1 + 5 files changed, 121 insertions(+), 55 deletions(-) rename libcloud/compute/drivers/{azure_compute.py => azure.py} (97%) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index 104cca817b..46ba62dfcd 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -14,6 +14,7 @@ # limitations under the License. import copy +import os import time import base64 import hmac @@ -31,7 +32,7 @@ from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError, MalformedResponseError -from libcloud.common.base import ConnectionUserAndKey, RawResponse +from libcloud.common.base import ConnectionUserAndKey, RawResponse, CertificateConnection from libcloud.common.base import XmlResponse # Azure API version @@ -187,3 +188,58 @@ def _get_azure_auth_signature(self, method, headers, params, ) return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8')) + +class AzureBaseDriver(object): + name = "Microsoft Azure Service Management API" + +class AzureServiceManagementConnection(CertificateConnection): + # This needs the following approach - + # 1. Make request using LibcloudHTTPSConnection which is a overloaded class which takes in a client certificate + # 2. Depending on the type of operation use a PollingConnection when the response id is returned + # 3. The Response can be used in an AzureServiceManagementResponse + """Authentication class for "Service Account" authentication.""" + driver = AzureBaseDriver + responseCls = AzureResponse + rawResponseCls = AzureRawResponse + name = 'Azure Service Management API Connection' + host = 'management.core.windows.net' + keyfile = "" + def __init__(self, subscription_id, key_file, *args, **kwargs): + """ + Check to see if PyCrypto is available, and convert key file path into a + key string if the key is in a file. + + :param user_id: Email address to be used for Service Account + authentication. + :type user_id: ``str`` + + :param key: The RSA Key or path to file containing the key. + :type key: ``str`` + """ + + keypath = os.path.expanduser(key_file) + self.keyfile = keypath; + is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) + if not is_file_path: + raise InvalidCredsError('pem file needed to authenticate to Microsoft Azure') + self.key_file = key_file + #if is_file_path: + # with open(keypath, 'r') as f: + # key = f.read() + super(AzureServiceManagementConnection, self).__init__( + subscription_id, key_file, *args, **kwargs) + + def add_default_headers(self, headers): + """ + @inherits: :class:`Connection.add_default_headers` + TODO: move to constant.. + """ + headers['x-ms-version'] = "2014-05-01" + headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime()) + #headers['host'] = self.host + return headers + + + + + diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 75200b411e..212430ab43 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -539,7 +539,7 @@ def _tuple_from_url(self, url): return (host, port, secure, request_path) - def connect(self, host=None, port=None, base_url=None): + def connect(self, host=None, port=None, base_url=None, **kwargs): """ Establish a connection with the API server. @@ -565,7 +565,19 @@ def connect(self, host=None, port=None, base_url=None): host = host or self.host port = port or self.port - kwargs = {'host': host, 'port': int(port)} + if not hasattr(kwargs, 'host'): + kwargs.update({'host': host}) + + if not hasattr(kwargs, 'port'): + kwargs.update({'port': port}) + + if not hasattr(kwargs, 'key_file'): + kwargs.update({'key_file': self.key_file}) + + if not hasattr(kwargs, 'cert_file'): + kwargs.update({'cert_file': self.cert_file}) + + #kwargs = {'host': host, 'port': int(port)} # Timeout is only supported in Python 2.6 and later # http://docs.python.org/library/httplib.html#httplib.HTTPConnection @@ -936,6 +948,21 @@ def __init__(self, key, secure=True, host=None, port=None, url=None, proxy_url=proxy_url) self.key = key +class CertificateConnection(Connection): + """ + Base connection class which accepts a single ``key`` argument. + """ + def __init__(self, subscription_id, cert_file, secure=True, host=None, port=None, url=None, + timeout=None): + """ + Initialize `user_id` and `key`; set `secure` to an ``int`` based on + passed value. + """ + super(CertificateConnection, self).__init__(secure=secure, host=host, + port=port, url=url, timeout=timeout) + + self.cert_file = cert_file + self.subscription_id = subscription_id class ConnectionUserAndKey(ConnectionKey): """ diff --git a/libcloud/compute/drivers/azure_compute.py b/libcloud/compute/drivers/azure.py similarity index 97% rename from libcloud/compute/drivers/azure_compute.py rename to libcloud/compute/drivers/azure.py index 8597579e3a..97bcc4b5c4 100644 --- a/libcloud/compute/drivers/azure_compute.py +++ b/libcloud/compute/drivers/azure.py @@ -24,6 +24,7 @@ import os import copy import base64 +from libcloud.common.azure import AzureServiceManagementConnection from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize @@ -64,9 +65,6 @@ def _str(value): __version__ = '1.0.0' -# To be removed once auth has been refactored. -subscription_id = "aff4792f-fc2c-4fa8-88f4-bab437747469" -certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" azure_service_management_host = 'management.core.windows.net' _USER_AGENT_STRING = 'libcloudazurecompute/' + __version__ @@ -178,14 +176,14 @@ def _str(value): 'copy_id': 'CopyId', } -class AzureConnection(ConnectionUserAndKey): - """AzureConnection - Connection class for Azure Compute Driver. - """ class AzureNodeDriver(NodeDriver): - + connectionCls = AzureServiceManagementConnection + name = "Azure Node Provider" + website = 'http://windowsazure.com' + type = Provider.AZURE + _instance_types = AZURE_COMPUTE_INSTANCE_TYPES _blob_url = ".blob.core.windows.net" features = {'create_node': ['password']} @@ -211,6 +209,16 @@ class AzureNodeDriver(NodeDriver): 'StoppedDeallocated': NodeState.TERMINATED, } + def __init__(self, subscription_id=None, key_file=None, **kwargs): + """ + subscription_id contains the Azure subscription id in the form of GUID + key_file contains the Azure X509 certificate in .pem form + """ + self.subscription_id = subscription_id + self.key_file = key_file + super(AzureNodeDriver, self).__init__(self.subscription_id, self.key_file, + secure=True, **kwargs) + def list_sizes(self): """ Lists all sizes @@ -241,7 +249,7 @@ def list_locations(self): :rtype: ``list`` of :class:`NodeLocation` """ - data = self._perform_get('/' + subscription_id + '/locations', Locations) + data = self._perform_get('/' + self.subscription_id + '/locations', Locations) return [self._to_location(l) for l in data] @@ -300,11 +308,8 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name try: - result = self._perform_post( - self._get_deployment_path_using_name( - ex_cloud_service_name, _deployment_name) + \ - '/roleinstances/' + _str(node.id) + \ - '?comp=reboot', '', async=True) + result = self._perform_post(self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name) + '/roleinstances/' + _str(node.id) + '?comp=reboot' + , '', async=True) if result.request_id: return True else: @@ -898,39 +903,14 @@ def _perform_delete(self, path, async=False): def _perform_request(self, request): - connection = self.get_connection() - try: - connection.putrequest(request.method, request.path) - - self.send_request_headers(connection, request.headers) - self.send_request_body(connection, request.body) - - resp = connection.getresponse() - status = int(resp.status) - message = resp.reason - respheader = headers = resp.getheaders() - - # for consistency across platforms, make header names lowercase - for i, value in enumerate(headers): - headers[i] = (value[0].lower(), value[1]) + return self.connection.request(action="https://%s/%s" % (request.host, request.path), data=request.body, method=request.method) + except Exception, e: + print e.message - respbody = None - if resp.length is None: - respbody = resp.read() - elif resp.length > 0: - respbody = resp.read(resp.length) - response = AzureHTTPResponse( - int(resp.status), resp.reason, headers, respbody) - if status >= 300: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (message, respbody, status), - driver=self) - return response - finally: - connection.close() def _update_request_uri_query(self, request): '''pulls the query string out of the URI and moves it into @@ -1236,7 +1216,7 @@ def _parse_response_for_async_op(self, response): result = AsynchronousOperationResult() if response.headers: - for name, value in response.headers: + for name, value in response.headers.items(): if name.lower() == 'x-ms-request-id': result.request_id = value @@ -1248,7 +1228,7 @@ def _get_deployment_path_using_name(self, service_name, '/deployments', deployment_name) def _get_path(self, resource, name): - path = '/' + subscription_id + '/' + resource + path = '/' + self.subscription_id + '/' + resource if name is not None: path += '/' + _str(name) return path @@ -1274,16 +1254,16 @@ def _get_role_path(self, service_name, deployment_name, role_name=None): def _get_storage_service_path(self, service_name=None): return self._get_path('services/storageservices', service_name) - def get_connection(self): - certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" - port = HTTPS_PORT + #def get_connection(self): + # certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" + # port = HTTPS_PORT - connection = HTTPSConnection( - azure_service_management_host, - int(port), - cert_file=certificate_path) + # connection = HTTPSConnection( + # azure_service_management_host, + # int(port), + # cert_file=certificate_path) - return connection + # return connection """XML Serializer diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index 4e1b6c6664..f17a5352af 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -27,6 +27,8 @@ "get_driver"] DRIVERS = { + Provider.AZURE: + ('libcloud.compute.drivers.azure', 'AzureNodeDriver'), Provider.DUMMY: ('libcloud.compute.drivers.dummy', 'DummyNodeDriver'), Provider.EC2_US_EAST: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index 472ded716b..398a2694e2 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -80,6 +80,7 @@ class Provider(object): :cvar VULTR: vultr driver. :cvar AZURE: Azure driver. """ + AZURE = 'azure' DUMMY = 'dummy' EC2 = 'ec2_us_east' RACKSPACE = 'rackspace' From dde6370ff78d59d24d3daf57ac2b1918876f9346 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Tue, 27 May 2014 13:34:42 +0100 Subject: [PATCH 286/315] modified how headers are added to the request, ensured create_node works --- libcloud/compute/drivers/azure.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 97bcc4b5c4..6704618a21 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -552,7 +552,7 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None """Remove Azure Virtual Machine This removes the instance, but does not - remove the disk. You will need to use destroy_volume. + remove the disk. You will need to use destroy_volume. Azure sometimes has an issue where it will hold onto a blob lease for an extended amount of time. @@ -904,7 +904,7 @@ def _perform_delete(self, path, async=False): def _perform_request(self, request): try: - return self.connection.request(action="https://%s/%s" % (request.host, request.path), data=request.body, method=request.method) + return self.connection.request(action="https://%s%s" % (request.host, request.path), data=request.body, headers=request.headers, method=request.method) except Exception, e: print e.message @@ -943,20 +943,18 @@ def _update_management_header(self, request): ''' Add additional headers for management. ''' if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: - request.headers.append(('Content-Length', str(len(request.body)))) + request.headers['Content-Length'] = str(len(request.body)) # append additional headers base on the service - request.headers.append(('x-ms-version', X_MS_VERSION)) + #request.headers.append(('x-ms-version', X_MS_VERSION)) # if it is not GET or HEAD request, must set content-type. if not request.method in ['GET', 'HEAD']: - for name, _ in request.headers: - if 'content-type' == name.lower(): + for key in request.headers: + if 'content-type' == key.lower(): break else: - request.headers.append( - ('Content-Type', - 'application/atom+xml;type=entry;charset=utf-8')) + request.headers['Content-Type']='application/xml' return request.headers @@ -2374,7 +2372,7 @@ def __init__(self): self.method = '' self.path = '' self.query = [] # list of (name, value) - self.headers = [] # list of (header name, header value) + self.headers = {} # list of (header name, header value) self.body = '' self.protocol_override = None From 2310fa5a7513f455c3327adbf6e07d4984fc68ca Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Tue, 27 May 2014 17:02:31 +0100 Subject: [PATCH 287/315] added delete unit tests --- libcloud/compute/drivers/azure.py | 18 ++- ...es_oddkinz1_deploymentslots_Production.xml | 1 + ...es_oddkinz2_deploymentslots_Production.xml | 1 + libcloud/test/compute/test_azure.py | 152 ++++++++++++++++++ 4 files changed, 168 insertions(+), 4 deletions(-) create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/test_azure.py diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 6704618a21..f63073558e 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -15,6 +15,7 @@ """Azure Compute driver """ +import httplib import uuid import re import time @@ -571,12 +572,12 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None if not ex_deployment_slot: ex_deployment_slot = "production" - _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) - _deployment_name = _deployment.name + try: + _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + _deployment_name = _deployment.name - _server_deployment_count = len(_deployment.role_instance_list) + _server_deployment_count = len(_deployment.role_instance_list) - try: if _server_deployment_count > 1: path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id) path += '?comp=media' # forces deletion of attached disks @@ -864,6 +865,11 @@ def _perform_get(self, path, response_type): request.headers = self._update_management_header(request) response = self._perform_request(request) + #prob not the best way to do this. but there is no point parsing + #the response as a particular type if it is not the expected type of response + if response.status >= 400: # or resp is not 'OK' or response.error is not None: + raise Exception(response.error) + if response_type is not None: return self._parse_response(response, response_type) @@ -896,6 +902,10 @@ def _perform_delete(self, path, async=False): request.headers = self._update_management_header(request) response = self._perform_request(request) + #ensure we raise an exception if the response was an error + if response.status >= 400: + raise Exception(response.error) + if async: return self._parse_response_for_async_op(response) diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml new file mode 100644 index 0000000000..fa0aaa9db4 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml @@ -0,0 +1 @@ +dc01Production5a62b73e8a2e448398e3485a112c5232Runninghttp://dcoddkinztest01.cloudapp.net/PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJkYzAxIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQo8L1NlcnZpY2VDb25maWd1cmF0aW9uPg==dc01dc01ReadyRole00Small10.77.46.71SSH168.63.66.2042222tcpStarteddc017e1eac23032be4029fc02f676c866ddf1.02014-05-27T15:12:29ZUnknownNotReadyen-USVM Agent is unresponsive. Status was last reported at 2014-05-27T14:59:30Z.1dc01PersistentVMRoleNetworkConfiguration22SSH22tcp168.63.66.204falseReadWritedc01-dc01-0-201405271459270089http://mtlytics.blob.core.windows.net/vhds/dcoddkinztest01-dc01-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltruefalsefalse2014-05-27T14:59:23Z2014-05-27T15:12:27Z2014-04-11T18:28:18Z2014-04-13T18:28:18ZPersistentVMUpdateCompleted
168.63.66.204
truedc01ContractContract
dcoddkinztest01.f3.internal.cloudapp.net
\ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml new file mode 100644 index 0000000000..de15d8bcce --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml @@ -0,0 +1 @@ +dc03Productionf1f3169a845a4a2c9b76101934d84d72Runninghttp://oddkinz2.cloudapp.net/PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJkYzAzIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQogIDxSb2xlIG5hbWU9Im9kZGtpbnoxIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQo8L1NlcnZpY2VDb25maWd1cmF0aW9uPg==dc03dc03ReadyRole00Small100.92.66.69SSH191.235.135.622222tcpStarteddc03685099736b68f503fdbcb0164369b2471.02014-05-27T15:38:47ZUnknownNotReadyen-USStatus not available for role dc03.oddkinz1oddkinz1ReadyRole00Small100.92.60.91SSH191.235.135.626331322tcpStartedoddkinz10318c2b4abba6f83e3e24185c36f876e1.02014-05-27T15:38:47ZUnknownNotReadyen-USStatus not available for role oddkinz1.1dc03PersistentVMRoleNetworkConfiguration22SSH22tcp191.235.135.62falseReadWritedc03-dc03-0-201405271530050398http://mtlytics.blob.core.windows.net/vhds/oddkinz2-dc03-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltrueoddkinz1PersistentVMRoleNetworkConfiguration22SSH63313tcp191.235.135.62falseReadWritedc03-oddkinz1-0-201405271534090109http://mtlytics.blob.core.windows.net/vhds/oddkinz2-oddkinz1-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltruefalsefalse2014-05-27T15:30:01Z2014-05-27T15:38:45Z2014-04-01T23:49:45Z2014-04-03T23:49:45ZPersistentVMUpdateCompleted
191.235.135.62
truedc03ContractContract
oddkinz2.f5.internal.cloudapp.net
\ No newline at end of file diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py new file mode 100644 index 0000000000..12b3fcf356 --- /dev/null +++ b/libcloud/test/compute/test_azure.py @@ -0,0 +1,152 @@ +import libcloud + +__author__ = 'david' + +import sys + +import httplib +import unittest +import urlparse +import libcloud.security +from libcloud.test import MockHttp +from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.compute.types import Provider +from libcloud.compute.providers import get_driver + +class AzureNodeDriverTests(unittest.TestCase) : + + libcloud.security.VERIFY_SSL_CERT = False + + SUBSCRIPTION_ID = '5191b16a-673d-426c-8c55-fdd912858e4e' + KEY_FILE = 'C:\\Users\\david\\Desktop\\libcloud.pem' + + def setUp(self): + Azure = get_driver(Provider.AZURE) + Azure.connectionCls.conn_classes = (None, AzureMockHttp) + self.driver = Azure(self.SUBSCRIPTION_ID, self.KEY_FILE, pem_key_file = self.KEY_FILE ) + + def test_locations_returned_successfully(self): + locations = self.driver.list_locations() + self.assertEqual(len(locations), 7) + + locationNamesResult = list(a.name for a in locations) + locationNamesExpected = ['East Asia','Southeast Asia','North Europe','West Europe','East US','North Central US','West US'] + + self.assertListEqual(locationNamesResult, locationNamesExpected) + + matchedLocation = next(location for location in locations if location.name == 'Southeast Asia') + servicesResult = matchedLocation.services + servicesExpected = ['Compute','Storage','PersistentVMRole','HighMemory'] + self.assertListEqual(servicesResult, servicesExpected) + + vmRoleSizesResult = matchedLocation.vmRoleSizes + + vmRoleSizesExpected = ['A5','A6','A7','Basic_A0','Basic_A1','Basic_A2','Basic_A3','Basic_A4','ExtraLarge','ExtraSmall','Large','Medium','Small'] + self.assertListEqual(vmRoleSizesResult, vmRoleSizesExpected) + + def test_images_returned_successfully(self): + images = self.driver.list_images() + self.assertEquals(len(images), 212 ) + + def test_images_returned_successfully_filter_by_location(self): + images = self.driver.list_images("West US") + self.assertEquals(len(images), 206 ) + + def test_vmimages_returned_successfully(self): + vmimages = self.driver.list_nodes(cloudServiceName="oddkinz") + self.assertEqual(len(vmimages), 5) + + img0 = vmimages[0] + self.assertEquals(img0.id,"c3Rvcm0x") + self.assertEquals(img0.image,"Linux") + self.assertEquals(img0.location,"North Europe") + self.assertEquals(img0.name,"cloudredis") + self.assertListEqual(img0.public_ips,["100.86.90.81"]) + self.assertEquals(img0.serviceName,"oddkinz") + self.assertEquals(img0.size,"Medium") + self.assertEquals(img0.state,"ReadyRole") + self.assertEquals(img0.deploymentName,"storm1") + self.assertTrue(isinstance(img0.extra,dict)) + + def test_list_nodes_cloud_service_not_found(self): + with self.assertRaises(ValueError): + self.driver.list_nodes(cloudServiceName="424324") + + def test_vmimages_restart_node_success(self): + node = dict() + node["name"]="cloudredis" + node["serviceName"]="oddkinz" + node["deploymentName"]="storm1" + + result = self.driver.reboot_node(node) + + self.assertTrue(result) + + #simulating attempting to reboot a node that ifas already rebooting + def test_vmimages_restart_node_fail(self): + node = dict() + node["name"]="cloudredis" + node["serviceName"]="oddkinz" + node["deploymentName"]="oddkinz1" + + result = self.driver.reboot_node(node) + + self.assertFalse(result) + + def test_destroy_node_success_single_node_in_cloud_service(self): + + node = type('Node', (object,), dict(id="oddkinz1")) + + result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz1", ex_deployment_slot="Production") + self.assertTrue(result) + + def test_destroy_node_success_multiple_nodes_in_cloud_service(self): + + node = type('Node', (object,), dict(id="oddkinz1")) + + result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") + self.assertTrue(result) + + def test_destroy_node_fail_node_does_not_exist(self): + + node = type('Node', (object,), dict(id="oddkinz2")) + + result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") + self.assertFalse(result) + + def test_destroy_node_success_cloud_service_not_found(self): + + node = dict() + node["name"]="cloudredis" + + result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production" ) + + print result + +class AzureMockHttp(MockHttp): + + fixtures = ComputeFileFixtures('azure') + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deployments_dc01(self, method, url, body, headers): + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz1(self, method, url, body, headers): + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz2(self, method, url, body, headers): + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + +if __name__ == '__main__': + sys.exit(unittest.main()) From 5891cdf1e1095bcf080ab1d46d281607cd9184a3 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Wed, 28 May 2014 15:17:28 +0100 Subject: [PATCH 288/315] added additional unit tests, modified locations to return supported vm role sizes supported --- libcloud/compute/drivers/azure.py | 77 +++++++--- ..._673d_426c_8c55_fdd912858e4e_locations.xml | 1 + ...ervices_hostedservices_dcoddkinztest01.xml | 1 + ...01_deployments_dc03_roleinstances_dc13.xml | 1 + ...dkinztest01_deploymentslots_Production.xml | 1 + ...dkinztest02_deploymentslots_Production.xml | 1 + ...ervices_hostedservices_dcoddkinztest03.xml | 1 + ...dkinztest03_deploymentslots_Production.xml | 1 + ...ervices_hostedservices_dcoddkinztest04.xml | 1 + ...es_oddkinz5_deploymentslots_Production.xml | 1 + ...426c_8c55_fdd912858e4e_services_images.xml | 1 + libcloud/test/compute/test_azure.py | 144 ++++++++++++++---- 12 files changed, 179 insertions(+), 52 deletions(-) create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index f63073558e..2c0a767307 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -234,7 +234,7 @@ def list_sizes(self): return sizes - def list_images(self): + def list_images(self, location=None): """ Lists all images @@ -242,7 +242,12 @@ def list_images(self): """ data = self._perform_get(self._get_image_path(), Images) - return [self._to_image(i) for i in data] + images = [self._to_image(i) for i in data] + + if location != None: + images = [image for image in images if location in image.extra["location"]] + + return images def list_locations(self): """ @@ -271,10 +276,14 @@ def list_nodes(self, ex_cloud_service_name=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") - data = self._perform_get( + response = self._perform_get( self._get_hosted_service_path(ex_cloud_service_name) + '?embed-detail=True', - HostedService) + None) + if response.status != 200 : + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + data = self._parse_response(response, HostedService) try: return [self._to_node(n) for n in data.deployments[0].role_instance_list] @@ -311,6 +320,8 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) try: result = self._perform_post(self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name) + '/roleinstances/' + _str(node.id) + '?comp=reboot' , '', async=True) + + if result.request_id: return True else: @@ -684,11 +695,13 @@ def _to_location(self, data): if "Brazil" in data.display_name: country = "Brazil" - return NodeLocation( + return AzureNodeLocation( id=data.name, name=data.display_name, country=country, - driver=self.connection.driver) + driver=self.connection.driver, + available_services =data.available_services, + virtual_machine_role_sizes = (data.compute_capabilities).virtual_machines_role_sizes) def _to_node_size(self, data): """ @@ -768,12 +781,17 @@ def _to_volume(self, volume, node): def _get_deployment(self, **kwargs): _service_name = kwargs['service_name'] - _deployment_slot = kwargs['deployment_slot'] + _deployment_slot = kwargs['deployment_slot'] - return self._perform_get( + response = self._perform_get( self._get_deployment_path_using_slot( - _service_name, _deployment_slot), - Deployment) + _service_name, _deployment_slot), None) + + if response.status != 200: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + return self._parse_response(response, Deployment) + def _get_cloud_service_location(self, service_name=None): @@ -865,11 +883,6 @@ def _perform_get(self, path, response_type): request.headers = self._update_management_header(request) response = self._perform_request(request) - #prob not the best way to do this. but there is no point parsing - #the response as a particular type if it is not the expected type of response - if response.status >= 400: # or resp is not 'OK' or response.error is not None: - raise Exception(response.error) - if response_type is not None: return self._parse_response(response, response_type) @@ -902,9 +915,8 @@ def _perform_delete(self, path, async=False): request.headers = self._update_management_header(request) response = self._perform_request(request) - #ensure we raise an exception if the response was an error - if response.status >= 400: - raise Exception(response.error) + if response.status != 202: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) if async: return self._parse_response_for_async_op(response) @@ -2020,6 +2032,19 @@ def __init__(self): self.name = u'' self.display_name = u'' self.available_services = _scalar_list_of(str, 'AvailableService') + self.compute_capabilities = ComputeCapability() + + + +class ComputeCapability(WindowsAzureData): + + def __init__(self): + self.virtual_machines_role_sizes = _scalar_list_of(str, 'RoleSize') + +class VirtualMachinesRoleSizes(WindowsAzureData): + + def __init__(self): + self.role_size = _scalar_list_of(str, 'RoleSize') class Images(WindowsAzureData): @@ -2433,4 +2458,18 @@ def __init__(self, pair_xml_element_name, key_xml_element_name, self.pair_xml_element_name = pair_xml_element_name self.key_xml_element_name = key_xml_element_name self.value_xml_element_name = value_xml_element_name - super(_dict_of, self).__init__() \ No newline at end of file + super(_dict_of, self).__init__() + +class AzureNodeLocation(NodeLocation): + + # we can also have something in here for available services which is an extra to the API with Azure + def __init__(self, id, name, country, driver, available_services, virtual_machine_role_sizes): + super(AzureNodeLocation, self).__init__(id, name, country, driver) + self.available_services = available_services + self.virtual_machine_role_sizes = virtual_machine_role_sizes + + def __repr__(self): + return (('') + % (self.id, self.name, self.country, + self.driver.name, ','.join(self.available_service), ','.join(self.virtual_machine_role_sizes))) \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml new file mode 100644 index 0000000000..a7b496e221 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml @@ -0,0 +1 @@ +East AsiaEast AsiaComputeStoragePersistentVMRoleHighMemoryA5A6A7ExtraLargeExtraSmallLargeMediumSmallA5A6A7Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmallSoutheast AsiaSoutheast AsiaComputeStoragePersistentVMRoleHighMemoryA5A6A7ExtraLargeExtraSmallLargeMediumSmallA5A6A7Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmallNorth EuropeNorth EuropeComputeStoragePersistentVMRoleHighMemoryA5A6A7A8A9ExtraLargeExtraSmallLargeMediumSmallA5A6A7A8A9Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmallWest EuropeWest EuropeComputeStoragePersistentVMRoleHighMemoryA5A6A7A8A9ExtraLargeExtraSmallLargeMediumSmallA5A6A7A8A9Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmallEast USEast USComputeStoragePersistentVMRoleHighMemoryA5A6A7A8A9ExtraLargeExtraSmallLargeMediumSmallA5A6A7A8A9Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmallNorth Central USNorth Central USComputeStoragePersistentVMRoleHighMemoryA5A6A7A8A9ExtraLargeExtraSmallLargeMediumSmallA5A6A7A8A9Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmallWest USWest USComputeStoragePersistentVMRoleHighMemoryA5A6A7A8A9ExtraLargeExtraSmallLargeMediumSmallA5A6A7A8A9Basic_A0Basic_A1Basic_A2Basic_A3Basic_A4ExtraLargeExtraSmallLargeMediumSmall \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml new file mode 100644 index 0000000000..56f9d77334 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml @@ -0,0 +1 @@ +https://management.core.windows.net/5191b16a-673d-426c-8c55-fdd912858e4e/services/hostedservices/dcoddkinztest01dcoddkinztest01North EuropeCreated2014-05-23T12:32:51Z2014-05-27T15:30:15Zdc03Productionf1f3169a845a4a2c9b76101934d84d72Runninghttp://dcoddkinztest01.cloudapp.net/PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJkYzAzIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQogIDxSb2xlIG5hbWU9Im9kZGtpbnoyIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQo8L1NlcnZpY2VDb25maWd1cmF0aW9uPg==dc03dc03ReadyRole00Small100.92.66.69SSH191.235.135.622222tcpStarteddc03685099736b68f503fdbcb0164369b2471.02014-05-28T10:09:42ZUnknownNotReadyen-USVM Agent is unresponsive. Status was last reported at 2014-05-27T15:30:08Z.oddkinz2oddkinz2ReadyRole00Small100.92.60.107SSH191.235.135.625770622tcpStartedoddkinz20c2115da20a7293aedf34bfe4dca9db21.02014-05-28T10:09:42ZUnknownNotReadyen-USVM Agent is unresponsive. Status was last reported at 2014-05-27T15:49:58Z.1dc03PersistentVMRoleNetworkConfiguration22SSH22tcp191.235.135.62falseReadWritedc03-dc03-0-201405271530050398http://mtlytics.blob.core.windows.net/vhds/dcoddkinztest01-dc03-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltrueoddkinz2PersistentVMRoleNetworkConfiguration22SSH57706tcp191.235.135.62falseReadWritedc03-oddkinz2-0-201405271549530290http://mtlytics.blob.core.windows.net/vhds/dcoddkinztest01-oddkinz2-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltruefalsefalse2014-05-27T15:30:01Z2014-05-28T10:09:41Z2014-04-01T23:49:45Z2014-04-03T23:49:45ZPersistentVMUpdateCompleted
191.235.135.62
truedc03ContractContract
dcoddkinztest01.f5.internal.cloudapp.net
\ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml new file mode 100644 index 0000000000..d9e772093e --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml @@ -0,0 +1 @@ +ResourceNotFoundRole instance name is not valid. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml new file mode 100644 index 0000000000..0a0a7c43b9 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml @@ -0,0 +1 @@ +dc03Productionf1f3169a845a4a2c9b76101934d84d72Runninghttp://dcoddkinztest01.cloudapp.net/PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJkYzAzIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQogIDxSb2xlIG5hbWU9Im9kZGtpbnoyIj4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQo8L1NlcnZpY2VDb25maWd1cmF0aW9uPg==dc03dc03ReadyRole00Small100.92.66.69SSH191.235.135.622222tcpStarteddc03685099736b68f503fdbcb0164369b2471.02014-05-28T09:20:27ZUnknownNotReadyen-USVM Agent is unresponsive. Status was last reported at 2014-05-27T15:30:08Z.oddkinz2oddkinz2ReadyRole00Small100.92.60.107SSH191.235.135.625770622tcpStartedoddkinz20c2115da20a7293aedf34bfe4dca9db21.02014-05-28T09:20:27ZUnknownNotReadyen-USVM Agent is unresponsive. Status was last reported at 2014-05-27T15:49:58Z.1dc03PersistentVMRoleNetworkConfiguration22SSH22tcp191.235.135.62falseReadWritedc03-dc03-0-201405271530050398http://mtlytics.blob.core.windows.net/vhds/dcoddkinztest01-dc03-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltrueoddkinz2PersistentVMRoleNetworkConfiguration22SSH57706tcp191.235.135.62falseReadWritedc03-oddkinz2-0-201405271549530290http://mtlytics.blob.core.windows.net/vhds/dcoddkinztest01-oddkinz2-2014-05-27.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxSmalltruefalsefalse2014-05-27T15:30:01Z2014-05-28T09:20:26Z2014-04-01T23:49:45Z2014-04-03T23:49:45ZPersistentVMUpdateCompleted
191.235.135.62
truedc03ContractContract
dcoddkinztest01.f5.internal.cloudapp.net
\ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml new file mode 100644 index 0000000000..35c40bde04 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml @@ -0,0 +1 @@ +ResourceNotFoundNo deployments were found. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml new file mode 100644 index 0000000000..2e8e30f6f5 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml @@ -0,0 +1 @@ +ResourceNotFoundThe hosted service does not exist. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml new file mode 100644 index 0000000000..35c40bde04 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml @@ -0,0 +1 @@ +ResourceNotFoundNo deployments were found. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml new file mode 100644 index 0000000000..2e8e30f6f5 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml @@ -0,0 +1 @@ +ResourceNotFoundThe hosted service does not exist. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml new file mode 100644 index 0000000000..35c40bde04 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml @@ -0,0 +1 @@ +ResourceNotFoundNo deployments were found. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml new file mode 100644 index 0000000000..5d31d5ef23 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml @@ -0,0 +1 @@ +PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.2-x64-v5.8.8.1Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.3 with RightLink 5.8.RightScale Linux v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.3-x64-v5.8.8Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.2 with RightLink 5.8.RightScale Linux v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.3-x64-v5.8.8.5Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.3 with RightLink 5.8.RightScale Linux v13false2012-10-12T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.3-x64-v5.8.8.6Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.3 with RightLink 5.8.RightScale Linux v13false2012-11-12T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.3-x64-v5.8.8.7Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.3 with RightLink 5.8.RightScale Linux v13false2012-01-15T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.3-x64-v5.8.8.8Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.3 with RightLink 5.8.RightScale Linux v13false2012-01-25T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.3-x64-v5.8.8.9Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.3 with RightLink 5.8.RightScale Linux v13false2013-03-01T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.4-x64-v13.4Linuxfalse2013-04-19T00:00:00ZfalseRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.4-x64-v13.5.0.1Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.4 with RightLink 5.8.RightScale Linux v13false2013-07-11T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.4-x64-v13.5.0.2Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.4 with RightLink 5.8.RightScale Linux v13false2013-07-22T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.5-x64-v13.5.2Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.5 with RightLink 5.8.RightScale Linux v13false2013-12-26T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US100b11de9248dd4d87b18621318e037d37__RightImage-CentOS-6.5-x64-v13.5.3Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementCentOS 6.5 with RightLink 5.8.RightScale Linux v13false2014-04-17T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v13.4Linuxfalse2013-04-19T00:00:00ZfalseRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v13.5.0.1Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2013-07-11T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v13.5.2Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2013-12-26T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v13.5.3Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2014-04-17T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v5.8.8Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v5.8.8.5Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2012-10-12T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v5.8.8.7Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2013-01-15T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US100b11de9248dd4d87b18621318e037d37__RightImage-Ubuntu-12.04-x64-v5.8.8.8Linuxhttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementUbuntu 12.04 with RightLink 5.8.RightScale Linux v13false2013-01-25T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with LinuxPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__JDK-1.6.0_71-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321312[Java Platform|http://www.oracle.com/java|_blank], Standard Edition 6 (update 71) enables development of secure, portable, high-performance applications and includes a Java Development Kit (JDK), Java Runtime Environment (JRE), and tools for developing, debugging, and monitoring Java applications. WARNING: These older versions of the JRE and JDK are provided to help developers debug issues in older systems. They are not recommended for use in production. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://www.windowsazure.com/en-us/documentation/articles/virtual-machines-java-run-tomcat-application-server/|_blank]JDK 6 on Windows Server 20122014-05-01T07:00:00ZtrueJava6_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321694SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386544Java6_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321310[Java Platform|http://www.oracle.com/java|_blank], Standard Edition 7 (update 51) enables development of secure, portable, high-performance applications and includes a Java Development Kit (JDK), Java Runtime Environment (JRE), and tools for developing, debugging, and monitoring Java applications. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://www.windowsazure.com/en-us/documentation/articles/virtual-machines-java-run-tomcat-application-server|_blank]JDK 7 on Windows Server 20122014-05-01T07:00:00ZtrueJava7_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321701SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkID=386543Java7_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__JDK-1.8.0-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321310[Java Platform|http://www.oracle.com/java|_blank], Standard Edition 8 enables development of secure, portable, high-performance applications and includes a Java Development Kit (JDK), Java Runtime Environment (JRE), and tools for developing, debugging, and monitoring Java applications. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank].JDK 8 on Windows Server 2012 R22014-05-01T07:00:00ZtrueJava7_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321701SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkID=386543Java7_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-EE-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321683[Oracle Database|http://www.oracle.com/database|_blank] 11g R2 Enterprise Edition (11.2.0.4.0) provides comprehensive features to easily manage the most demanding transaction processing, business intelligence, and content management applications. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank].Oracle Database 11g R2 Enterprise Edition on Windows Server 2008 R22014-05-01T07:00:00ZtrueOracleDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321692SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386538OracleDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-EE-WebLogic-10.3.6-EE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321684[Oracle Database|http://www.oracle.com/database|_blank] 11g R2 Enterprise Edition (11.2.0.4.0) provides comprehensive features to easily manage the most demanding transaction processing, business intelligence, and content management applications. [Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 11g Enterprise Edition (10.3.6) is a leading Java application server for modern data centers. It takes full advantage of the latest hardware architectures including 64-bit addressable memory, multi-core computing systems and high-speed networks. Minimum recommended virtual machine size for this image is [Medium|http://go.microsoft.com/fwlink/?LinkID=309169|_blank].Oracle Database 11g R2 and WebLogic Server 11g Enterprise Edition on Windows Server 2008 R22014-05-01T07:00:00ZtrueOracleWeblogicDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321691MediumMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386541OracleWeblogicDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-SE-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321686[Oracle Database|http://www.oracle.com/database|_blank] 11g R2 Standard Edition (11.2.0.4.0) is an affordable, full-featured data management solution that is ideal for midsize companies. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank].Oracle Database 11g R2 Standard Edition on Windows Server 2008 R22014-05-01T07:00:00ZtrueOracleDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321689SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386537OracleDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-SE-WebLogic-10.3.6-SE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321687[Oracle Database|http://www.oracle.com/database|_blank] 11g R2 Standard Edition (11.2.0.4.0) is an affordable, full-featured data management solution that is ideal for midsize companies. [Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 11g Standard Edition (10.3.6) is a leading Java application server for enterprises of all sizes, providing developers with the tools and technologies to write enterprise applications and services quickly and operations teams with the administration capabilities to keep them up and running. Minimum recommended virtual machine size for this image is [Medium|http://go.microsoft.com/fwlink/?LinkID=309169|_blank].Oracle Database 11g R2 and WebLogic Server 11g Standard Edition on Windows Server 2008 R22014-05-01T07:00:00ZtrueOracleWeblogicDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321688MediumMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386542OracleWeblogicDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-0514-EE-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321314[Oracle Database|http://www.oracle.com/database|_blank] 12c Enterprise Edition (12.1.0.1.0) is a next-generation database designed for the cloud, providing a new multitenant architecture on top of a fast, scalable, reliable, and secure database platform. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn439775.aspx|_blank]Oracle Database 12c Enterprise Edition on Windows Server 20122014-05-01T07:00:00ZtrueOracleDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321699SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386538OracleDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-0514-SE-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321317[Oracle Database|http://www.oracle.com/database|_blank] 12c Standard Edition (12.1.0.1.0) is an affordable, full-featured data management solution that is ideal for midsize companies. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn439775.aspx|_blank]Oracle Database 12c Standard Edition on Windows Server 20122014-05-01T07:00:00ZtrueOracleDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321696SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386537OracleDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-EE-WebLogic-12.1.2.0-EE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321315[Oracle Database|http://www.oracle.com/database|_blank] 12c Enterprise Edition (12.1.0.1.0) is a next-generation database designed for the cloud, providing a new multitenant architecture on top of a fast, scalable, reliable, and secure database platform. [Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 12c Enterprise Edition (12.1.2.0) is a leading Java EE application server. Minimum recommended virtual machine size for this image is [Medium|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn466427.aspx|_blank]Oracle Database 12c and WebLogic Server 12c Enterprise Edition on Windows Server 20122014-05-01T07:00:00ZtrueOracleWeblogicDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321698MediumMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386541OracleWeblogicDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-SE-WebLogic-12.1.2.0-SE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321318[Oracle Database|http://www.oracle.com/database|_blank] 12c Standard Edition (12.1.0.1.0) is an affordable, full-featured data management solution that is ideal for midsize companies. [Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 12c Standard Edition (12.1.2.0) is a leading Java EE application server. Minimum recommended virtual machine size for this image is [Medium|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn466427.aspx|_blank]Oracle Database 12c and WebLogic Server 12c Standard Edition on Windows Server 20122014-05-01T07:00:00ZtrueOracleWeblogicDatabase12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321695MediumMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386542OracleWeblogicDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__WebLogic-12.1.2.0-EE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321313[Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 12c Enterprise Edition (12.1.2.0) is a leading Java EE application server, delivering next-generation applications on a mission-critical cloud platform, with native cloud management, and integrated tools. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn439774.aspx|_blank]Oracle WebLogic Server 12c Enterprise Edition on Windows Server 20122014-05-01T07:00:00ZtrueOracleWeblogic12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321700SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386540OracleWeblogic12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__WebLogic-12.1.2.0-SE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321316[Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 12c Standard Edition (12.1.2.0) is a leading Java EE application server, delivering next-generation applications on a mission-critical cloud platform, with native cloud management, and integrated tools. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn439774.aspx|_blank]Oracle WebLogic Server 12c Standard Edition on Windows Server 20122014-05-01T07:00:00ZtrueOracleWeblogic12_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321697SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386539OracleWeblogic12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Weblogic-10.3.6-EE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkID=321682[Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 11g Enterprise Edition (10.3.6) is a leading Java application server for modern data centers. It takes full advantage of the latest hardware architectures including 64-bit addressable memory, multi-core computing systems and high-speed networks. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn466428.aspx|_blank]Oracle WebLogic Server 11g Enterprise Edition on Windows Server 2008 R22014-05-01T07:00:00ZtrueOracleWeblogic11_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321693SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386540OracleWeblogic11_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1280c0083a6d9a24f2d91800e52cad83950__Weblogic-10.3.6-SE-JDK-1.7.0_51-0514-Win-GAWindowshttp://go.microsoft.com/fwlink/?LinkId=321685[Oracle WebLogic Server|http://www.oracle.com/weblogicserver|_blank] 11g Standard Edition (10.3.6) is a leading Java application server for enterprises of all sizes, providing developers with the tools and technologies to write enterprise applications and services quickly and operations teams with the administration capabilities to keep them up and running. Minimum recommended virtual machine size for this image is [Small|http://go.microsoft.com/fwlink/?LinkID=309169|_blank]. [Learn More|http://msdn.microsoft.com/en-us/library/dn466428.aspx|_blank]Oracle WebLogic Server 11g Standard Edition on Windows Server 2008 R22014-05-01T07:00:00ZtrueOracleWeblogic11_100.pnghttp://go.microsoft.com/fwlink/?LinkId=321690SmallMicrosoft Open Technologies, Inc.http://go.microsoft.com/fwlink/?LinkId=386539OracleWeblogic11_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1272cdc6229df6344129ee553dd3499f0d3__BizTalk-Server-2013-EnterpriseWindowshttp://go.microsoft.com/fwlink/?LinkID=296354;http://go.microsoft.com/fwlink/?LinkID=131004This image contains the Enterprise edition of BizTalk Server 2013. Some BizTalk Server components like accelerators require additional setup before use. Medium is the recommended size for this image.trueBizTalkServer2013_100.pnghttp://go.microsoft.com/fwlink/?LinkID=131004MediumMicrosoft BizTalk Server Grouphttp://go.microsoft.com/fwlink/?LinkID=280328BizTalkServer2013_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1272cdc6229df6344129ee553dd3499f0d3__BizTalk-Server-2013-StandardWindowshttp://go.microsoft.com/fwlink/?LinkID=296355;http://go.microsoft.com/fwlink/?LinkID=131004This image contains the Standard edition of BizTalk Server 2013. Some BizTalk Server components like accelerators require additional setup before use. Medium is the recommended size for this image.trueBizTalkServer2013_100.pnghttp://go.microsoft.com/fwlink/?LinkID=131004MediumMicrosoft BizTalk Server Grouphttp://go.microsoft.com/fwlink/?LinkID=280327BizTalkServer2013_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1273a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20131018-enusWindowsThis image contains the Windows Server 2012 R2 Datacenter operating system with the Windows Server Essentials Experience role installed. The new Windows Server Essentials Experience server role on Windows Server 2012 R2 Datacenter includes features, such as Remote Web Access, that were previously available only in Windows Server Essentials. Before creating a virtual machine, you must configure a valid virtual network to use VPN connections. For more information about how to set up Windows Server Essentials Experience, see [here|http://go.microsoft.com/fwlink/?LinkId=322143].Windows Server Essentials Experience on Windows Server 2012 R22013-10-18T00:00:00ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Essentials GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1273a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20131127-enusWindowsThis image contains the Windows Server 2012 R2 Datacenter operating system with the Windows Server Essentials Experience role installed. The new Windows Server Essentials Experience server role on Windows Server 2012 R2 Datacenter includes features, such as Remote Web Access, that were previously available only in Windows Server Essentials. Before creating a virtual machine, you must configure a valid virtual network to use VPN connections. For more information about how to set up Windows Server Essentials Experience, see [here|http://go.microsoft.com/fwlink/?LinkId=322143].Windows Server Essentials Experience on Windows Server 2012 R22013-11-29T00:00:00ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Essentials GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1273a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20131217-enusWindowsThis image contains the Windows Server 2012 R2 Datacenter operating system with the Windows Server Essentials Experience role installed. The new Windows Server Essentials Experience server role on Windows Server 2012 R2 Datacenter includes features, such as Remote Web Access, that were previously available only in Windows Server Essentials. Before creating a virtual machine, you must configure a valid virtual network to use VPN connections. For more information about how to set up Windows Server Essentials Experience, see [here|http://go.microsoft.com/fwlink/?LinkId=322143].Windows Server Essentials Experience on Windows Server 2012 R22013-12-23T00:00:00ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Essentials GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1273a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20140213-enusWindowsThis image contains the Windows Server 2012 R2 Datacenter operating system with the Windows Server Essentials Experience role installed. The new Windows Server Essentials Experience server role on Windows Server 2012 R2 Datacenter includes features, such as Remote Web Access, that were previously available only in Windows Server Essentials. Before creating a virtual machine, you must configure a valid virtual network to use VPN connections. For more information about how to set up Windows Server Essentials Experience, see [here|http://go.microsoft.com/fwlink/?LinkId=322143].Windows Server Essentials Experience on Windows Server 2012 R22014-01-23T00:00:00ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Essentials GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1273a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20140306-enusWindowsThis image contains the Windows Server 2012 R2 Datacenter operating system with the Windows Server Essentials Experience role installed. The new Windows Server Essentials Experience server role on Windows Server 2012 R2 Datacenter includes features, such as Remote Web Access, that were previously available only in Windows Server Essentials. Before creating a virtual machine, you must configure a valid virtual network to use VPN connections. For more information about how to set up Windows Server Essentials Experience, see [here|http://go.microsoft.com/fwlink/?LinkId=322143].Windows Server Essentials Experience on Windows Server 2012 R22014-03-05T16:00:00ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Essentials GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US1273a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20140327-enusWindowsThis image contains the Windows Server 2012 R2 Datacenter operating system with the Windows Server Essentials Experience role installed. The new Windows Server Essentials Experience server role on Windows Server 2012 R2 Datacenter includes features, such as Remote Web Access, that were previously available only in Windows Server Essentials. Before creating a virtual machine, you must configure a valid virtual network to use VPN connections. For more information about how to set up Windows Server Essentials Experience, see [here|http://go.microsoft.com/fwlink/?LinkId=322143].Windows Server Essentials Experience on Windows Server 2012 R22014-03-26T16:00:00ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Essentials GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US305112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415Linuxhttp://www.openlogic.com/azure/service-agreement/This distribution of Linux is based on CentOS version 6.5 and is provided by OpenLogic. It contains an installation of the Basic Server packages.2014-04-15T00:00:00ZfalseCentOS6_100.pngOpenLogicCentOS6_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128a699494373c04fc0bc8f2bb1389d6106__Win2K8R2SP1-Datacenter-201403.01-en.us-127GB.vhdWindowsWindows Server 2008 R2 is a multi-purpose server designed to increase the reliability and flexibility of your server or private cloud infrastructure, helping you to save time and reduce costs. It provides you with powerful tools to react to business needs with greater control and confidence.Windows Server 2008 R2 SP12014-03-17T00:00:00ZfalseWindowsServer2008R2_100.pngMicrosoft Windows Server GroupWindowsServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128a699494373c04fc0bc8f2bb1389d6106__Win2K8R2SP1-Datacenter-201404.01-en.us-127GB.vhdWindowsWindows Server 2008 R2 is a multi-purpose server designed to increase the reliability and flexibility of your server or private cloud infrastructure, helping you to save time and reduce costs. It provides you with powerful tools to react to business needs with greater control and confidence.Windows Server 2008 R2 SP12014-04-17T00:00:00ZfalseWindowsServer2008R2_100.pngMicrosoft Windows Server GroupWindowsServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-Datacenter-201403.01-en.us-127GB.vhdWindowsWindows Server 2012 incorporates Microsoft's experience building and operating public clouds, resulting in a dynamic, highly available server platform. It offers a scalable, dynamic and multi-tenant-aware infrastructure that helps securely connect across premises.Windows Server 2012 Datacenter2014-03-17T00:00:00ZfalseWindowsServer2012_100.pngMicrosoft Windows Server GroupWindowsServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-Datacenter-201404.01-en.us-127GB.vhdWindowsWindows Server 2012 incorporates Microsoft's experience building and operating public clouds, resulting in a dynamic, highly available server platform. It offers a scalable, dynamic and multi-tenant-aware infrastructure that helps securely connect across premises.Windows Server 2012 Datacenter2014-04-17T00:00:00ZfalseWindowsServer2012_100.pngMicrosoft Windows Server GroupWindowsServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-R2-201403.01-en.us-127GB.vhdWindowsAt the heart of the Microsoft Cloud OS vision, Windows Server 2012 R2 brings Microsoft's experience delivering global-scale cloud services into your infrastructure. It offers enterprise-class performance, flexibility for your applications and excellent economics for your datacenter and hybrid cloud environment.Windows Server 2012 R2 Datacenter2014-03-17T00:00:00ZfalseWindowsServer2012R2_100.pngMicrosoft Windows Server GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-R2-201404.01-en.us-127GB.vhdWindowsAt the heart of the Microsoft Cloud OS vision, Windows Server 2012 R2 brings Microsoft's experience delivering global-scale cloud services into your infrastructure. It offers enterprise-class performance, flexibility for your applications and excellent economics for your datacenter and hybrid cloud environment. This image includes Windows Server 2012 R2 Update.Windows Server 2012 R2 Datacenter2014-04-17T00:00:00ZfalseWindowsServer2012R2_100.pngMicrosoft Windows Server GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127ad072bd3082149369c449ba5832401ae__RdshOnWindowsServer2012R2.20140305.127GB.vhdWindowsThis image contains the Windows Server 2012 R2 operating system with the Remote Desktop Session Host (RD Session Host) role installed. This image has been pre-configured for Windows Azure. RD Session Host enables a server to host RemoteApp programs or session-based desktops.Windows Server Remote Desktop Session Host on Windows Server 2012 R2false2014-03-05T23:38:03.7394082ZfalseWindowsServer2012R2_100.pngLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-RDSHwO13P-on-Windows-Server-2012-R2-20140421-1748WindowsThis image can be used by authorized Microsoft Service Providers only.Windows Server RDSHwO13P on Windows Server 2012 R2false2014-04-21T19:09:55.3775121ZfalseWindowsServer2012R2_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statementLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-RDSHwO13P-on-Windows-Server-2012-R2-20140502-1817WindowsThis image can be used by authorized Microsoft Service Providers only.Windows Server RDSHwO13P on Windows Server 2012 R2false2014-05-02T19:00:47.4740507ZfalseWindowsServer2012R2_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statementLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-RDSHwO13P-on-Windows-Server-2012-R2-20140509-1817WindowsThis image can be used by authorized Microsoft Service Providers only.Windows Server RDSHwO13P on Windows Server 2012 R2false2014-05-09T19:06:56.874678ZfalseWindowsServer2012R2_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statementLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-RDSHwO13P-on-Windows-Server-2012-R2-20140512-2037WindowsThis image can be used by authorized Microsoft Service Providers only.Windows Server RDSHwO13P on Windows Server 2012 R2false2014-05-12T21:28:12.4214603ZfalseWindowsServer2012R2_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statementLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-RDSHwO13P-on-Windows-Server-2012-R2-20140514-1702WindowsThis image can be used by authorized Microsoft Service Providers only.Windows Server RDSHwO13P on Windows Server 2012 R2false2014-05-14T17:59:34.290158ZfalseWindowsServer2012R2_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statementLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128ad072bd3082149369c449ba5832401ae__Windows-Server-Remote-Desktop-Session-Host-on-Windows-Server-2012-R2-20140514-1852WindowsThis image contains the Windows Server 2012 R2 operating system with the Remote Desktop Session Host (RD Session Host) role installed. This image has been pre-configured for Windows Azure. RD Session Host enables a server to host RemoteApp programs or session-based desktops.Windows Server Remote Desktop Session Host on Windows Server 2012 R2false2014-05-14T19:33:36.5822032ZfalseWindowsServer2012R2_100.pngLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20121218-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.10 (amd64 20121218) for Windows Azure. This image is DEPRECATED and was reached its END OF LIFE on 2014-04-18. This image is provided for archival purposes only. Please see [Ubuntu Release Wiki|https://wiki.ubuntu.com/Releases|_blank] for information about successor releases and the Ubuntu life-cycle.Ubuntu Server 12.10false2012-12-18T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130225-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130225) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-02-25T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130325-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130325) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTSfalse2013-03-25T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130415-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130415) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTSfalse2013-04-15T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130516-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130516) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-05-17T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130527-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130527) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-05-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130603-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130603) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-06-03T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_2-LTS-amd64-server-20130624-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130624) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-06-24T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20130827-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130827) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-08-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20130909-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130909) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-09-09T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20130916.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20130916.1) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-09-16T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20131003-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20131003) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-10-03T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20131024-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20131024) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 12.04 LTStrue2013-10-24T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20131111-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20131111) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2013-11-11T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20131114-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20131114) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2013-11-14T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20131205-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20131205) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2013-12-05T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20140127-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20140127) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2014-01-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_3-LTS-amd64-server-20140130-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.3 LTS (amd64 20140130) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.3 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2014-01-30T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140227-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.4 LTS (amd64 20140227) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.4 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2014-02-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140408-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.4 LTS (amd64 20140408) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.4 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2014-04-08T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140428-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.4 LTS (amd64 20140428) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.4 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2014-04-28T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140514-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 12.04.4 LTS (amd64 20140514) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 12.04.4 LTS will be available until 2017-04-26. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 12.04 LTStrue2014-05-15T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130808-alpha3-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20130808) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 13.10false2013-08-08T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130905-beta1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20130905) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 13.10false2013-09-05T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20130925-beta2-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20130925) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 13.10false2013-09-26T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20131015-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20131015) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see http://www.ubuntu.com/cloud and http://juju.ubuntu.com.Ubuntu Server 13.10true2013-10-15T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20131113-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20131113) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2013-11-13T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20131204-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20131204) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2013-12-04T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20131215-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20131215) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2013-12-15T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140108-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140108) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-01-08T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140112-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140112) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-01-12T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140119-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140119) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-01-19T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140129-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140129) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-01-29T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140202-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140202) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-02-02T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140212-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140212) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-02-12T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140226-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140226) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-02-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140409.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140409.1) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-04-10T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140427-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140427) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-04-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140507-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 13.10 (amd64 20140507) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 13.10 will be available until 2014-07-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 13.10true2014-05-07T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140122.1-alpha2-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyMILESTONE RELEASE: This is a milestone release and is considered experimental. This build is unsupported and is for development and preview reference only. Ubuntu Server 14.04 LTS (amd64 20140122.1) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 14.04 LTS will be available until 2019-04-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 14.04 LTStrue2014-01-23T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140226.1-beta1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyMILESTONE RELEASE: This is a milestone release and is considered experimental. This build is unsupported and is for development and preview reference only. Ubuntu Server 14.04 LTS (amd64 20140226.1) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 14.04 LTS will be available until 2019-04-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 14.04 LTStrue2014-02-28T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140326-beta2-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyMILESTONE RELEASE: This is a milestone release and is considered experimental. This build is unsupported and is for development and preview reference only. Ubuntu Server 14.04 LTS (amd64 20140326) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 14.04 LTS will be available until 2019-04-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 14.04 LTSfalse2014-03-27T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140414-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 14.04 LTS (amd64 20140414) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 14.04 LTS will be available until 2019-04-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 14.04 LTStrue2014-04-14T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140416.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyUbuntu Server 14.04 LTS (amd64 20140416.1) for Windows Azure. Ubuntu Server is the world's most popular Linux for cloud environments. Updates and patches for Ubuntu 14.04 LTS will be available until 2019-04-17. Ubuntu Server is the perfect platform for all workloads from web applications to NoSQL databases and Hadoop. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank].Ubuntu Server 14.04 LTStrue2014-04-17T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131125-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20131125 of Ubuntu Server 12.04.3 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 12.04 LTS DAILYfalse2013-11-25T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_4-LTS-amd64-server-20140514-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140514 of Ubuntu Server 12.04.4 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 12.04 LTS DAILYfalse2014-05-15T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_4-LTS-amd64-server-20140515-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140515 of Ubuntu Server 12.04.4 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 12.04 LTS DAILYfalse2014-05-15T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_4-LTS-amd64-server-20140519-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140519 of Ubuntu Server 12.04.4 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 12.04 LTS DAILYfalse2014-05-19T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_4-LTS-amd64-server-20140526-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140526 of Ubuntu Server 12.04.4 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 12.04 LTS DAILYfalse2014-05-26T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-saucy-13_10-amd64-server-20140507-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140507 of Ubuntu Server 13.10 DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 13.10 DAILYfalse2014-05-07T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-saucy-13_10-amd64-server-20140511-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140511 of Ubuntu Server 13.10 DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 13.10 DAILYfalse2014-05-11T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-saucy-13_10-amd64-server-20140514-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140514 of Ubuntu Server 13.10 DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 13.10 DAILYfalse2014-05-14T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-saucy-13_10-amd64-server-20140518-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140518 of Ubuntu Server 13.10 DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 13.10 DAILYfalse2014-05-18T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-saucy-13_10-amd64-server-20140521-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140521 of Ubuntu Server 13.10 DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 13.10 DAILYfalse2014-05-21T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-trusty-14_04-LTS-amd64-server-20140517.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140517.1 of Ubuntu Server 14.04 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 14.04 LTS DAILYfalse2014-05-17T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-trusty-14_04-LTS-amd64-server-20140519.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140519.1 of Ubuntu Server 14.04 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 14.04 LTS DAILYfalse2014-05-19T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-trusty-14_04-LTS-amd64-server-20140521.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140521.1 of Ubuntu Server 14.04 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 14.04 LTS DAILYfalse2014-05-21T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-trusty-14_04-LTS-amd64-server-20140524.1-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140524.1 of Ubuntu Server 14.04 LTS DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 14.04 LTS DAILYfalse2014-05-24T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-utopic-14_10-amd64-server-20140521-en-us-30GBLinuxhttp://www.ubuntu.com/project/about-ubuntu/licensing;http://www.ubuntu.com/aboutus/privacypolicyDAILY BUILD 20140521 of Ubuntu Server 14.10 DAILY (amd64) for Windows Azure. Daily builds are up-to-date builds of the regular release images for Ubuntu Server. While every effort is made to make sure that these are production quality, these images come with no warranty. In the event of a support issue, you may be asked to update to an official released build. For more information see [Ubuntu Cloud|http://www.ubuntu.com/cloud|_blank] and [using Juju to deploy your workloads|http://juju.ubuntu.com|_blank]. Ubuntu Server 14.10 DAILYfalse2014-05-21T00:00:00ZfalseUbuntu-cof-100.pnghttp://www.ubuntu.com/aboutus/privacypolicyCanonicalUbuntu-cof-45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-Prio-v202Linuxhttp://www.novell.com/licensing/eula/SUSE Linux Enterprise Server Premium Image with PRIORITY Support: SUSE Linux Enterprise Server is a highly reliable, scalable and secure server operating system, built to power physical, virtual and cloud-based mission-critical workloads. With this affordable, interoperable and manageable open source foundation, enterprises can cost-effectively deliver core business services, enable secure networks and easily manage their heterogeneous IT resources, maximizing efficiency and value.  Customization of images can be done at [http://susestudio.com|http://susestudio.com]. The Premium Image with PRIORITY support includes updates, patches, and support through 24x7 web, email, chat and phone from SUSE. VMs created from this image incur per-hour support fees, in addition to Azure platform fees. An Azure support plan is required (developer or above). Support incidents are initiated through Azure.SUSE Linux Enterprise Server 11 SP3 (Premium Image)2014-05-08T00:00:00ZtrueSuse11_100.pngSUSEhttp://go.microsoft.com/fwlink/?LinkId=299677Suse11_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-SAP-CAL-v101Linuxhttp://www.novell.com/licensing/eula/The SUSE Linux Enterprise Server 11 SP3 for SAP Cloud Appliance Library image is the base image for SAP's Cloud Appliance Library. It will be used automatically when deploying an SAP Cloud Appliance Library instance and has the same features as the native SLES image, however customized to fit SAP's CAL needs. For further description and usage guide lines please refer to the description of the SUSE Linux Enterprise image.  Customization of images can be done at [http://susestudio.com|http://susestudio.com].SUSE Linux Enterprise Server 11 SP3 for SAP Cloud Appliance Library2014-05-14T00:00:00ZfalseSuse11_100.pngSUSESuse11_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-v202Linuxhttp://www.novell.com/licensing/eula/SUSE Linux Enterprise Server is a highly reliable, scalable and secure server operating system, built to power physical, virtual and cloud-based mission-critical workloads. With this affordable, interoperable and manageable open source foundation, enterprises can cost-effectively deliver core business services, enable secure networks and easily manage their heterogeneous IT resources, maximizing efficiency and value.   Customization of images can be done at [http://susestudio.com|http://susestudio.com].SUSE Linux Enterprise Server 11 SP32014-01-16T00:00:00ZfalseSuse11_100.pngSUSESuse11_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30b4590d9e3ed742e4a1d46e5424aa335e__openSUSE-13.1-v101LinuxopenSUSE 13.1 brings updated desktop environments and software, lot of polishing, a brand new KDE theme, complete systemd integration and many other features. Customization of these images can be done at http://susestudio.comopenSUSE 13.12014-04-15T00:00:00ZfalseOpenSuse12_100.pngSUSEOpenSuse12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-iis75-v5.8.8Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with IIS 7.5 and RightLink 5.8.RightScale Windows v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-iis75-v5.8.8.11Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with IIS 7.5 and RightLink 5.8.RightScale Windows v13false2012-12-07T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-sqlsvr2012-v5.8.8Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with MS SQL Server 2012 Standard and RightLink 5.8.RightScale Windows v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-sqlsvr2012-v5.8.8.1Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with MS SQL Server 2012 Standard and RightLink 5.8.RightScale Windows v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-sqlsvr2012-v5.8.8.12Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with MS SQL Server 2012 Standard and RightLink 5.8.RightScale Windows v13false2012-12-12T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-sqlsvr2012-v5.8.8.15Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with MS SQL Server 2012 Standard and RightLink 5.8.RightScale Windows v13false2013-01-05T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-v5.8.8Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.8.RightScale Windows v13false2012-08-28T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-SP1-x64-v5.8.8.11Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.8.RightScale Windows v13false2012-12-07T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US31bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-iis7.5-v13.4.12.2Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with IIS 7.5 and RightLink 5.8RightScale Windows v13false2013-05-30T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-iis7.5-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with IIS 7.5 and RightLink v5.8RightScale Windows v13false2013-08-16T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-iis7.5-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2012-v13.4.3.1Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with MS SQL Server 2012 Standard and RightLink v5.8RightScale Windows v13false2013-06-06T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2012-v13.5.1Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with MS SQL Server 2012 Standard and RightLink v5.8RightScale Windows v13false2013-08-19T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2012-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2012ent-v13.4.3.1Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with MS SQL Server 2012 Enterprise and RightLink v5.8RightScale Windows v13false2013-06-06T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2012ent-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with MS SQL Server 2012 Enterprise and RightLink v5.8RightScale Windows v13false2013-08-16T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2012ent-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2k8r2-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with MS SQL Server 2008R2 and RightLink v5.8RightScale Windows v13false2013-08-16T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2k8r2-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2k8r2ent-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with MS SQL Server 2008R2 Enterprise and RightLink v5.8RightScale Windows v13false2013-08-15T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-sqlsvr2k8r2ent-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US31bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-v13.4.12.2Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.8RightScale Windows v13false2013-05-30T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink v5.8RightScale Windows v13false2013-08-15T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2008R2-x64-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2008R2 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US31bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-iis8-v13.4.12.2Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with IIS 8 and RightLink 5.8RightScale Windows v13false2013-05-30T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-iis8-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with IIS 8 and RightLink v5.8RightScale Windows v13false2013-08-15T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-iis8-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-iis8-v5.8.8.12Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with IIS 8 and RightLink 5.8.RightScale Windows v13false2012-12-12T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-sqlsvr2012-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with MS SQL Server 2012 Standard and RightLink v5.8RightScale Windows v13false2013-08-15T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-sqlsvr2012-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271257PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-sqlsvr2012ent-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with MS SQL Server 2012 Enterprise and RightLink v5.8RightScale Windows v13false2013-08-15T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-sqlsvr2012ent-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Ztruehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows Serverhttp://go.microsoft.com/fwlink/?LinkId=271259PublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US31bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.4.12.2Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.8RightScale Windows v13false2013-05-30T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v13.5Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink v5.8RightScale Windows v13false2013-08-15T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v14Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.9RightScale Windows v14false2014-03-24T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30bd507d3a70934695bc2128e3e5a255ba__RightImage-Windows-2012-x64-v5.8.8.12Windowshttp://support.rightscale.com/12-Guides/RightLink/RightLink_End_User_License_AgreeementWindows 2012 with RightLink 5.8.RightScale Windows v13false2012-12-12T00:00:00Zfalsehttp://www.rightscale.com/privacy_policy.phpRightScale with Windows ServerPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US40c290a6b031d841e09f2da759bbabe71f__Oracle-Database-121010.v1-EE-LnxLinuxhttp://www.oracle.com/technetwork/licenses/oracle-license-2016066.htmlOracle Database 12c Enterprise Edition is a next-generation database designed for the cloud, providing a new multitenant architecture on top of a fast, scalable, reliable, and secure database platform. [Learn more|http://www.oracle.com/database|_blank]Oracle DatabasefalseOracleDatabase12_100.pnghttp://www.oracle.com/us/legal/privacy/privacy-policy-078584.htmlOracleOracleDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US40c290a6b031d841e09f2da759bbabe71f__Oracle-Database-121010.v3-SE-LnxLinuxhttp://www.oracle.com/technetwork/licenses/oracle-license-2016066.htmlOracle Database 12c Standard Edition is an affordable, full-featured data management solution that is ideal for midsize companies. [Learn more|http://www.oracle.com/database|_blank]Oracle Database StandardfalseOracleDatabase12_100.pnghttp://www.oracle.com/us/legal/privacy/privacy-policy-078584.htmlOracleOracleDatabase12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US10c290a6b031d841e09f2da759bbabe71f__Oracle-Linux-6Linuxhttp://www.oracle.com/technetwork/licenses/oracle-license-2016066.htmlOracle Linux 6 brings the latest Linux innovations to market, delivering extreme performance, advanced scalability, and reliability for enterprise applications and systems. [Learn more|http://www.oracle.com/linux|_blank]Oracle LinuxfalseOracleLinux6_100.pnghttp://www.oracle.com/us/legal/privacy/privacy-policy-078584.htmlOracleOracleLinux6_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30c290a6b031d841e09f2da759bbabe71f__WebLogic-Server-12c.v1-LnxLinuxhttp://www.oracle.com/technetwork/licenses/oracle-license-2016066.htmlOracle WebLogic Server 12c Enterprise Edition is a leading Java EE application server, delivering next-generation applications on a mission-critical cloud platform, with native cloud management, and integrated tools. [Learn more|http://www.oracle.com/weblogicserver|_blank]Oracle WeblogicfalseOracleWeblogic12_100.pnghttp://www.oracle.com/us/legal/privacy/privacy-policy-078584.htmlOracleOracleWeblogic12_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US128c6e0f177abd8496e934234bd27f46c5d__SharePoint-2013-Trial-1-7-2014Windowshttp://www.microsoft.com/en-us/download/details.aspx?id=38417Microsoft SharePoint Server 2013 Trial on Windows Server 2012 Datacenter. Virtual Machines created with this trial image will expire on July 6, 2014. This image includes a complete installation of SharePoint Server 2013. Some SharePoint Server 2013 components require additional setup and configuration. You can set-up Active Directory and SQL Server required for your SharePoint farm by provisioning additional virtual machines. Minimum recommended virtual machine size for this image is Large. To evaluate the advanced capabilities of SharePoint Server 2013, we recommend that you use a virtual machine size of Extra Large.SharePoint Server 2013 Trial2014-01-07T00:00:00ZfalseSharePoint2013_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statement/ExtraLargeMicrosoft SharePoint GroupSharePoint2013_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;West US30de89c2ed05c748f5aded3ddc75fdcce4__PuppetEnterpriseMaster-3_2_1-amd64-server-20140408-en-us-30GBLinuxhttp://puppetlabs.com/solutions/microsoft#EulaThis image includes a pre-configured Puppet Master with the Ubuntu Linux distribution for easy deployment of Puppet Enterprise. The evaluation will end on May 31, 2014 per Puppet Labs' End User License Agreement (EULA). To set up a Puppet Enterprise environment, please refer to the Getting Started Guide for Deploying a Puppet Master with Windows Azure at http://puppetlabs.com/solutions/microsoftPuppet Enterprise 3.22014-04-10T17:33:44.796315ZfalsePuppetLabs_100x100.pnghttp://puppetlabs.com/solutions/microsoftMediumPuppet LabsPuppetLabs_45x45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US30de89c2ed05c748f5aded3ddc75fdcce4__PuppetEnterpriseMaster-3_2_2-amd64-server-20140408-en-us-30GBLinuxhttp://puppetlabs.com/solutions/microsoft#EulaThis image includes a pre-configured Puppet Master with the Ubuntu Linux distribution for easy deployment of Puppet Enterprise. The evaluation will end on May 31, 2014 per Puppet Labs' End User License Agreement (EULA). To set up a Puppet Enterprise environment, please refer to the Getting Started Guide for Deploying a Puppet Master with Windows Azure at http://puppetlabs.com/solutions/microsoftPuppet Enterprise 3.22014-04-16T20:41:05.3700261ZfalsePuppetLabs_100x100.pnghttp://puppetlabs.com/solutions/microsoftMediumPuppet LabsPuppetLabs_45x45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2008R2SP2-Enterprise-CY13SU04-SQL2008-SP2-10.50.4021.0Windowshttp://go.microsoft.com/fwlink/?LinkID=285681This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2008 R2 SP2 Enterprise on Windows Server 2008 R22013-04-16T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxLargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2008R2SP2-Standard-CY13SU04-SQL2008-SP2-10.50.4021.0Windowshttp://go.microsoft.com/fwlink/?LinkID=285685This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2008 R2 SP2 Standard on Windows Server 2008 R22013-04-16T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxMediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-CU4-11.0.3368.0-Enterprise-ENU-Win2012Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 20122013-06-26T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-CU4-11.0.3368.0-Enterprise-ENU-Win2K8R2Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 2008 R22013-07-10T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-CU4-11.0.3368.0-Standard-ENU-Win2012Windowshttp://go.microsoft.com/fwlink/?LinkID=285691Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Standard on Windows Server 20122013-07-15T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Enterprise-CY13SU04-SQL11-SP1-CU3-11.0.3350.0-BWindowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server 2012 components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 2008 R22013-04-16T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Enterprise-CY13SU04-SQL2012-SP1-11.0.3350.0-Win2012Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 20122013-04-16T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Standard-CY13SU04-SQL11-SP1-CU3-11.0.3350.0-BWindowshttp://go.microsoft.com/fwlink/?LinkID=285691Some SQL Server 2012 components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Standard on Windows Server 2008 R22013-04-16T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Standard-CY13SU04-SQL2012-SP1-11.0.3350.0-Win2012Windowshttp://go.microsoft.com/fwlink/?LinkID=285685Some SQL Server 2012 components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Standard on Windows Server 20122013-04-16T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Web-CY13SU04-SQL11-SP1-CU3-11.0.3350.0Windowshttp://go.microsoft.com/fwlink/?LinkID=286424Some SQL Server 2012 components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Web on Windows Server 2008 R22013-04-16T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Enterprise-ENU-Win2K8R2-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285681This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2008 R2 SP2 Enterprise on Windows Server 2008 R22013-10-22T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxLargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Enterprise-ENU-Win2K8R2-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285681We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2008 R2 SP2 Enterprise on Windows Server 2008 R22013-12-23T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxLargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Enterprise-ENU-Win2K8R2-CY14SU02Windowshttp://go.microsoft.com/fwlink/?LinkID=285681We recommend that you use a virtual machine size of A3 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2008 R2 SP2 Enterprise on Windows Server 2008 R22014-02-21T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxA3Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Standard-ENU-Win2K8R2-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285685Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2008 R2 SP2 Standard on Windows Server 2008 R22013-10-22T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxMediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Standard-ENU-Win2K8R2-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285685We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2008 R2 SP2 Standard on Windows Server 2008 R22013-12-23T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxMediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Standard-ENU-Win2K8R2-CY14SU02Windowshttp://go.microsoft.com/fwlink/?LinkID=285685We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2008 R2 SP2 Standard on Windows Server 2008 R22014-02-21T00:00:00ZtrueSqlserver2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxA2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Web-ENU-Win2K8R2Windowshttp://go.microsoft.com/fwlink/?LinkID=285686Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2008 R2 SP2 Web on Windows Server 2008 R22013-07-29T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxMediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Web-ENU-Win2K8R2-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285686Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2008 R2 SP2 Web on Windows Server 2008 R22013-10-22T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxMediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Web-ENU-Win2K8R2-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285686We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2008 R2 SP2 Web on Windows Server 2008 R22013-12-23T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxMediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Web-ENU-Win2K8R2-CY14SU02Windowshttp://go.microsoft.com/fwlink/?LinkID=285686We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2008 R2 SP2 Web on Windows Server 2008 R22014-02-21T00:00:00ZtrueSqlServer2008R2_100.pnghttp://msdn.microsoft.com/library/ms143384(v=sql.105).aspxA2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2008R2_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-DataWarehousing-ENU-WS2012Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This Enterprise Edition image uses Fast Track optimization rated for 400 GB data warehousing workloads. After the portal deployment completes, you need to attach the disks. To do this, connect to the VM and then follow the on-screen instructions. Before using the VM, review the recommendations at http://go.microsoft.com/fwlink/p/?LinkId=320441. For best performance, we recommend the A6 VM size. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 for Data Warehousing on Windows Server 20122013-09-30T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A6Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-DataWarehousing-ENU-WS2012-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This Enterprise Edition image uses Fast Track optimization rated for 400 GB data warehousing workloads. After the portal deployment completes, you need to attach the disks. To do this, connect to the VM and then follow the on-screen instructions. Before using the VM, review the recommendations [here|http://go.microsoft.com/fwlink/p/?LinkId=320441|_blank]. For best performance, we recommend the A6 VM size. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 for Data Warehousing on Windows Server 20122013-12-23T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A6Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-DataWarehousing-ENU-WS2012-CY14SU02Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This Enterprise Edition image uses Fast Track optimization rated for 400 GB data warehousing workloads. After the portal deployment completes, you need to attach the disks. To do this, connect to the VM and then follow the on-screen instructions. Before using the VM, review the recommendations [here|http://go.microsoft.com/fwlink/p/?LinkId=320441|_blank]. For best performance, we recommend the A6 VM size. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 for Data Warehousing on Windows Server 20122014-02-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A6Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Enterprise-ENU-WS2008R2-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 2008 R22013-10-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Enterprise-ENU-Win2012Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 20122013-08-06T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Enterprise-ENU-Win2012-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of large or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Enterprise on Windows Server 20122013-10-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067LargeMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Enterprise-ENU-Win2012-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image contains the full version of SQL Server. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of A3 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Enterprise on Windows Server 20122013-12-23T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A3Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Enterprise-ENU-Win2K8R2-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285687This image will be removed on 5/30/2014. We recommend that you use a virtual machine size of A3 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Enterprise on Windows Server 2008 R22013-12-23T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A3Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Standard-ENU-WS2008R2Windowshttp://go.microsoft.com/fwlink/?LinkID=285687Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Standard on Windows Server 2008 R22013-09-04T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Standard-ENU-Win2012-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285691Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Standard on Windows Server 20122013-10-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Standard-ENU-Win2012-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285691Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Standard on Windows Server 20122013-12-23T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Standard-ENU-Win2K8R2-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=285691Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Standard on Windows Server 2008 R22013-10-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Standard-ENU-Win2K8R2-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=285691We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Standard on Windows Server 2008 R22013-12-23T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Standard-ENU-Win2K8R2-CY14SU02Windowshttp://go.microsoft.com/fwlink/?LinkID=285691This image will be removed on 5/30/2014. We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Standard on Windows Server 2008 R22014-02-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Web-ENU-WS2008R2Windowshttp://go.microsoft.com/fwlink/?LinkID=286424Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Web on Windows Server 2008 R22013-09-04T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Web-ENU-WS2008R2-CY13SU10Windowshttp://go.microsoft.com/fwlink/?LinkID=286424Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database.SQL Server 2012 SP1 Web on Windows Server 2008 R22013-10-24T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Web-ENU-Win2K8R2-CY13SU12Windowshttp://go.microsoft.com/fwlink/?LinkID=286424Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of medium or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Web on Windows Server 2008 R22013-12-23T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067MediumMicrosoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-Web-ENU-Win2K8R2-CY14SU02Windowshttp://go.microsoft.com/fwlink/?LinkID=286424This image will be removed on 5/30/2014. Some SQL Server components require additional setup and configuration before use. We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2012 SP1 Web on Windows Server 2008 R22014-02-21T00:00:00ZtrueSqlServer2012_100.pnghttp://www.microsoft.com/en-us/download/details.aspx?id=29067A2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2012_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014RTM-12.0.2000.8-DataWarehousing-ENU-WS2012R2-AprilGAWindowshttp://go.microsoft.com/fwlink/?LinkID=298186This image is optimized for data warehousing workloads with data sizes up to 1TB using clustered columnstore indexes. After the portal deployment completes, you need to attach disks to the virtual machine. To do this, connect to the VM and follow the on-screen instructions. Before using the VM, review the recommendations [here|http://msdn.microsoft.com/library/dn387396.aspx|_blank]. For best performance, we recommend using a VM size of A7. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2014 RTM DataWarehousing on Windows Server 2012 R22014-04-01T07:00:00ZtrueSQLServer2014_100.pnghttp://go.microsoft.com/fwlink/?LinkID=282418A7Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2014_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014RTM-12.0.2000.8-ENTCORE-ENU-WS2012R2-AprilGAWindowshttp://go.microsoft.com/fwlink/?LinkID=298186This image contains the full version of SQL Server. We recommend that you use a virtual machine size of A3 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2014 RTM Enterprise on Windows Server 2012 R22014-04-01T07:00:00ZtrueSqlServer2014_100.pnghttp://go.microsoft.com/fwlink/?LinkID=282418A3Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271259SqlServer2014_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014RTM-12.0.2000.8-Standard-ENU-WS2012R2-AprilGAWindowshttp://go.microsoft.com/fwlink/?LinkID=298186This image contains the full version of SQL Server. We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2014 RTM Standard on Windows Server 2012 R22014-04-01T07:00:00ZtrueSqlServer2014_100.pnghttp://go.microsoft.com/fwlink/?LinkID=282418A2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271257SqlServer2014_45.pngPublicEast Asia;Southeast Asia;North Europe;West Europe;East US;North Central US;West US127fb83b3509582419d99629ce476bcb5c8__SQL-Server-2014RTM-12.0.2000.8-Web-ENU-WS2012R2-AprilGAWindowshttp://go.microsoft.com/fwlink/?LinkID=298186This image contains the full version of SQL Server. We recommend that you use a virtual machine size of A2 or higher. This image has been pre-configured for Windows Azure, including enabling CEIP which can be disabled, for more info see [here|http://msdn.microsoft.com/en-us/library/windowsazure/dn133151.aspx#database|_blank].SQL Server 2014 RTM Web on Windows Server 2012 R22014-04-01T07:00:00ZtrueSqlServer2014_100.pnghttp://go.microsoft.com/fwlink/?LinkID=282418A2Microsoft SQL Server Grouphttp://go.microsoft.com/fwlink/?LinkId=271258SqlServer2014_45.pngPublicEast US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-RDSHwO13P-on-Windows-Server-2012-R2-20140417-1824WindowsThis image can be used by authorized Microsoft Service Providers only.Windows Server RDSHwO13P on Windows Server 2012 R2false2014-04-17T19:21:27.3838648ZfalseWindowsServer2012R2_100.pnghttp://www.windowsazure.com/en-us/support/legal/privacy-statementLargeMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngPublicEast US;West US127ad072bd3082149369c449ba5832401ae__Windows-Server-Remote-Desktop-Session-Host-on-Windows-Server-2012-R2-20140403-2126WindowsThis image contains the Windows Server 2012 R2 operating system with the Remote Desktop Session Host (RD Session Host) role installed. This image has been pre-configured for Windows Azure. RD Session Host enables a server to host RemoteApp programs or session-based desktops.Windows Server Remote Desktop Session Host on Windows Server 2012 R2false2014-04-03T22:17:28.0548753ZfalseWindowsServer2012R2_100.pngMediumMicrosoft Windows Server Remote Desktop GroupWindowsServer2012R2_45.pngUserNorth Europe30http://mtlytics.blob.core.windows.net/vhds/eqowwq2s.a51201312281344410973.vhdambaribaseLinuxThe base Ambari InstancefalseUserNorth Europe30http://mtlytics.blob.core.windows.net/communityimages/community-271-bcee18c0-e6f5-4595-a9ae-b64ebb9cb8a9-2.vhdAzure-Data-AnalysisLinuxhttp://spark.elastacloud.com{"description":"Includes R, openmpi and loads of packages. Include Spark 0.8, Shark 0.7 and Storm and Kafka. Also includes iPython notebook + a whole heap of libraries.","publisherUrl":"https://vmdepot.msopentech.com/User/Show?user=271","publisher":"elastacloud","imageUrl":"https://vmdepot.msopentech.com/Vhd/Show?VhdID=8689&version=10769","icons":{"Small":"https://vmdepotwestus.blob.core.windows.net/images/271/6c415744-cfed-4ed5-8493-361f47716623_45x45.png","Large":"https://vmdepotwestus.blob.core.windows.net/images/271/6c415744-cfed-4ed5-8493-361f47716623_100x100.png","ExtraLarge":"https://vmdepotwestus.blob.core.windows.net/images/271/6c415744-cfed-4ed5-8493-361f47716623_215x215.png"}}CommunityfalseUserNorth Europe30https://neelasta5191b16a673d426c.blob.core.windows.net/elastaimage/elastaspark0.vhdelastaspark0Linux2014-04-14T13:40:06ZfalseGeneralizedUserNorth Europe30https://neelasta5191b16a673d426c.blob.core.windows.net/elastaimage/neelastaspark0.vhdneelastaspark0Linux2014-05-27T12:10:18ZfalseGeneralizedUserNorth Europe30https://neelasta5191b16a673d426c.blob.core.windows.net/elastaimage/neelastaspark1.vhdneelastaspark1Linux2014-05-27T11:24:59ZfalseGeneralizedUserWest Europe30https://portalvhdsj5170kkqq0ftq.blob.core.windows.net/elastaimage/sparkius1.vhdsparkius1Linux2014-02-04T11:36:22ZfalseUserNorth Europe30http://mtlytics.blob.core.windows.net/vhds/m15pycz2.ync201403171254370011.vhdstorm.elactacloud.baseLinuxfalseUserNorth Europe30http://portalvhds3c3c6cj9bpgwh.blob.core.windows.net/vhds/hqbn1y3e.gya201404221049490886.vhdzk-drill-src-0LinuxfalseGeneralized \ No newline at end of file diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 12b3fcf356..301529b1d0 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,4 +1,5 @@ import libcloud +from libcloud.common.types import LibcloudError __author__ = 'david' @@ -35,62 +36,76 @@ def test_locations_returned_successfully(self): self.assertListEqual(locationNamesResult, locationNamesExpected) matchedLocation = next(location for location in locations if location.name == 'Southeast Asia') - servicesResult = matchedLocation.services + servicesResult = matchedLocation.available_services servicesExpected = ['Compute','Storage','PersistentVMRole','HighMemory'] self.assertListEqual(servicesResult, servicesExpected) - vmRoleSizesResult = matchedLocation.vmRoleSizes + vmRoleSizesResult = matchedLocation.virtual_machine_role_sizes vmRoleSizesExpected = ['A5','A6','A7','Basic_A0','Basic_A1','Basic_A2','Basic_A3','Basic_A4','ExtraLarge','ExtraSmall','Large','Medium','Small'] self.assertListEqual(vmRoleSizesResult, vmRoleSizesExpected) def test_images_returned_successfully(self): images = self.driver.list_images() - self.assertEquals(len(images), 212 ) + self.assertEquals(len(images), 215 ) def test_images_returned_successfully_filter_by_location(self): images = self.driver.list_images("West US") - self.assertEquals(len(images), 206 ) + self.assertEquals(len(images), 207 ) - def test_vmimages_returned_successfully(self): - vmimages = self.driver.list_nodes(cloudServiceName="oddkinz") - self.assertEqual(len(vmimages), 5) + def test_list_nodes_returned_successfully(self): + vmimages = self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest01") + self.assertEqual(len(vmimages), 2) img0 = vmimages[0] - self.assertEquals(img0.id,"c3Rvcm0x") - self.assertEquals(img0.image,"Linux") - self.assertEquals(img0.location,"North Europe") - self.assertEquals(img0.name,"cloudredis") - self.assertListEqual(img0.public_ips,["100.86.90.81"]) - self.assertEquals(img0.serviceName,"oddkinz") - self.assertEquals(img0.size,"Medium") - self.assertEquals(img0.state,"ReadyRole") - self.assertEquals(img0.deploymentName,"storm1") + self.assertEquals(img0.id,"dc03") + self.assertEquals(img0.name,u"dc03") + self.assertListEqual(img0.public_ips,["191.235.135.62"]) + self.assertListEqual(img0.private_ips,["100.92.66.69"]) + self.assertEquals(img0.size,None) + self.assertEquals(img0.state,0) self.assertTrue(isinstance(img0.extra,dict)) + extra = img0.extra + self.assertEquals(extra["instance_size"], u'Small') + self.assertEquals(extra["power_state"], u'Started') + self.assertEquals(extra["ssh_port"], u'22') - def test_list_nodes_cloud_service_not_found(self): - with self.assertRaises(ValueError): - self.driver.list_nodes(cloudServiceName="424324") + def test_list_nodes_returned_no_deployments(self): + vmimages = self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest03") + self.assertIsNone(vmimages) - def test_vmimages_restart_node_success(self): - node = dict() - node["name"]="cloudredis" - node["serviceName"]="oddkinz" - node["deploymentName"]="storm1" + def test_list_nodes_returned_no_cloud_service(self): + with self.assertRaises(LibcloudError): + self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest04") + + def test_restart_node_success(self): - result = self.driver.reboot_node(node) + node = type('Node', (object,), dict(id="dc03")) + result = self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") self.assertTrue(result) #simulating attempting to reboot a node that ifas already rebooting - def test_vmimages_restart_node_fail(self): - node = dict() - node["name"]="cloudredis" - node["serviceName"]="oddkinz" - node["deploymentName"]="oddkinz1" + def test_restart_node_fail_no_deployment(self): + + node = type('Node', (object,), dict(id="dc03")) + + with self.assertRaises(LibcloudError): + self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest02", ex_deployment_slot="Production") + + def test_restart_node_fail_no_cloud_service(self): + + node = type('Node', (object,), dict(id="dc03")) - result = self.driver.reboot_node(node) + with self.assertRaises(LibcloudError): + self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest03", ex_deployment_slot="Production") + def test_restart_node_fail_node_not_found(self): + + node = type('Node', (object,), dict(id="dc13")) + + + result = self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") self.assertFalse(result) def test_destroy_node_success_single_node_in_cloud_service(self): @@ -119,9 +134,9 @@ def test_destroy_node_success_cloud_service_not_found(self): node = dict() node["name"]="cloudredis" - result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production" ) + result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) + self.assertFalse(result) - print result class AzureMockHttp(MockHttp): @@ -148,5 +163,68 @@ def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deplo def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz2(self, method, url, body, headers): return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml') + + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc03(self, method, url, body, headers): + headers["x-ms-request-id"]="acc33f6756cda6fd96826394fce4c9f3" + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml') + + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml') + + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml') + + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml') + + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_images(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_locations(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) From d5ad0c834fecb79eb19ef0950229e7d9546a6232 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Wed, 28 May 2014 15:22:18 +0100 Subject: [PATCH 289/315] modified destroy node logic so exception is thrown if deployment or node do not exist --- libcloud/compute/drivers/azure.py | 34 ++++++++++++++--------------- libcloud/test/compute/test_azure.py | 9 ++++---- 2 files changed, 21 insertions(+), 22 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 2c0a767307..655f7e0cd0 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -583,31 +583,29 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None if not ex_deployment_slot: ex_deployment_slot = "production" - try: - _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) - _deployment_name = _deployment.name + _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + _deployment_name = _deployment.name - _server_deployment_count = len(_deployment.role_instance_list) + _server_deployment_count = len(_deployment.role_instance_list) - if _server_deployment_count > 1: - path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id) - path += '?comp=media' # forces deletion of attached disks + if _server_deployment_count > 1: + path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id) + path += '?comp=media' # forces deletion of attached disks - data = self._perform_delete(path, async=True) + data = self._perform_delete(path, async=True) - return True - else: - path = self._get_deployment_path_using_name( - ex_cloud_service_name, - _deployment_name) + return True + else: + path = self._get_deployment_path_using_name( + ex_cloud_service_name, + _deployment_name) - path += '?comp=media' + path += '?comp=media' - data = self._perform_delete(path,async=True) + data = self._perform_delete(path,async=True) + + return True - return True - except Exception, e: - return False """ Functions not implemented """ diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 301529b1d0..12a7126b56 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -126,16 +126,17 @@ def test_destroy_node_fail_node_does_not_exist(self): node = type('Node', (object,), dict(id="oddkinz2")) - result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") - self.assertFalse(result) + with self.assertRaises(LibcloudError): + self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") def test_destroy_node_success_cloud_service_not_found(self): node = dict() node["name"]="cloudredis" - result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) - self.assertFalse(result) + with self.assertRaises(LibcloudError): + self.driver.destroy_node(node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) + class AzureMockHttp(MockHttp): From 599bc8464ff3d183b03b69d31a3c9f181ba00013 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Thu, 29 May 2014 13:53:28 +0100 Subject: [PATCH 290/315] added create/destroy cloud service. added unit tests for create node --- libcloud/compute/drivers/azure.py | 80 ++++++++++--- ...e4e_services_hostedservices_testdc1234.xml | 1 + ...8e4e_services_hostedservices_testdcabc.xml | 1 + ...e4e_services_hostedservices_testdcabc2.xml | 1 + ..._hostedservices_testdcabc2_deployments.xml | 1 + ..._testdcabc2_deploymentslots_Production.xml | 1 + ...s_hostedservices_testdcabc_deployments.xml | 1 + libcloud/test/compute/test_azure.py | 105 ++++++++++++++++++ 8 files changed, 177 insertions(+), 14 deletions(-) create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml create mode 100644 libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 655f7e0cd0..d7609eb631 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -606,6 +606,58 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None return True + def create_cloud_service(self, ex_cloud_service_name, location, description=None, extended_properties=None): + + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + + if not location: + raise ValueError("location is required.") + + response = self._perform_cloud_service_create( + self._get_hosted_service_path(), + AzureXmlSerializer.create_hosted_service_to_xml(ex_cloud_service_name, self._encode_base64(ex_cloud_service_name), description, + location, None, + extended_properties)) + + if response.status != 201: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + return True + + def _perform_cloud_service_create(self, path, data): + request = AzureHTTPRequest() + request.method = 'POST' + request.host = azure_service_management_host + request.path = path + request.body = data + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + return response + + def destroy_cloud_service(self, ex_cloud_service_name): + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + #add check to ensure all nodes have been deleted + response = self._perform_cloud_service_delete(self._get_hosted_service_path(ex_cloud_service_name)) + + if response.status != 200: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + return True + + def _perform_cloud_service_delete(self, path): + request = AzureHTTPRequest() + request.method = 'DELETE' + request.host = azure_service_management_host + request.path = path + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + return response """ Functions not implemented """ @@ -869,7 +921,7 @@ def _create_storage_account(self, **kwargs): def _get_operation_status(self, request_id): return self._perform_get( - '/' + subscription_id + '/operations/' + _str(request_id), + '/' + self.subscription_id + '/operations/' + _str(request_id), Operation) def _perform_get(self, path, response_type): @@ -991,7 +1043,7 @@ def send_request_body(self, connection, request_body): assert isinstance(request_body, bytes) connection.send(request_body) elif (not isinstance(connection, HTTPSConnection) and - not isinstance(connection, HTTPConnection)): + not isinstance(connection, httplib.HTTPConnection)): connection.send(None) def _parse_response(self, response, return_type): @@ -1085,7 +1137,7 @@ def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, def _get_node_value(self, xmlelement, data_type): value = xmlelement.firstChild.nodeValue if data_type is datetime: - return _to_datetime(value) + return self._to_datetime(value) elif data_type is bool: return value.lower() != 'false' else: @@ -1188,7 +1240,7 @@ def _get_request_body(self, request_body): return b'' if isinstance(request_body, WindowsAzureData): - request_body = _convert_class_to_xml(request_body) + request_body = self._convert_class_to_xml(request_body) if isinstance(request_body, bytes): return request_body @@ -1212,7 +1264,7 @@ def _convert_class_to_xml(self, source, xml_prefix=True): if isinstance(source, list): for value in source: - xmlstr += _convert_class_to_xml(value, False) + xmlstr += self._convert_class_to_xml(value, False) elif isinstance(source, WindowsAzureData): class_name = source.__class__.__name__ xmlstr += '<' + class_name + '>' @@ -1220,7 +1272,7 @@ def _convert_class_to_xml(self, source, xml_prefix=True): if value is not None: if isinstance(value, list) or \ isinstance(value, WindowsAzureData): - xmlstr += _convert_class_to_xml(value, False) + xmlstr += self._convert_class_to_xml(value, False) else: xmlstr += ('<' + self._get_serialization_name(name) + '>' + xml_escape(str(value)) + 'ResourceNotFoundThe hosted service does not exist. \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml new file mode 100644 index 0000000000..d4bf9e8761 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml @@ -0,0 +1 @@ +https://management.core.windows.net/5191b16a-673d-426c-8c55-fdd912858e4e/services/hostedservices/testdc123testdc123North EuropeCreated2014-05-29T12:04:22Z2014-05-29T12:04:23Z \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml new file mode 100644 index 0000000000..a5c66a63d9 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml @@ -0,0 +1 @@ +https://management.core.windows.net/5191b16a-673d-426c-8c55-fdd912858e4e/services/hostedservices/testdc123testdc123North EuropeCreated2014-05-29T12:04:22Z2014-05-29T12:05:06Zdcoddkinztest02Productionf18bf4439e6848138d9692cdcfa08769Runninghttp://testdc123.cloudapp.net/PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJkYzE0Ij4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQogIDxSb2xlIG5hbWU9ImRjb2Rka2luenRlc3QwMyI+DQogICAgPEluc3RhbmNlcyBjb3VudD0iMSIgLz4NCiAgPC9Sb2xlPg0KPC9TZXJ2aWNlQ29uZmlndXJhdGlvbj4=dc14dc14ReadyRole00ExtraSmall100.86.126.44SSH137.116.228.1094581022tcpStarteddc148bd92d2a2bd27182427a02ba38dcd16ddcoddkinztest03dcoddkinztest03ReadyRole00ExtraSmall100.86.132.14SSH137.116.228.1092222tcpStarteddcoddkinztest031332d4b5da308f258b37facbe92457bd1dc14PersistentVMRoleNetworkConfiguration22SSH45810tcp137.116.228.109falseReadWritedcoddkinztest02-dc14-0-201405291222150139http://mtlytics.blob.core.windows.net/vhds/testdc123-dc14-2014-05-29.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxExtraSmalldcoddkinztest03PersistentVMRoleNetworkConfiguration22SSH22tcp137.116.228.109falseReadWritedcoddkinztest02-dcoddkinztest03-0-201405291204550075http://mtlytics.blob.core.windows.net/vhds/testdc123-dcoddkinztest03-2014-05-29.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxExtraSmallfalsefalse2014-05-29T12:04:51Z2014-05-29T12:29:34Z2014-04-11T18:28:14Z2014-04-13T18:28:14ZPersistentVMUpdateCompleted
137.116.228.109
truedcoddkinztest02ContractContract
testdc123.f2.internal.cloudapp.net
\ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml new file mode 100644 index 0000000000..50d5a6f12a --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml @@ -0,0 +1 @@ +https://management.core.windows.net/5191b16a-673d-426c-8c55-fdd912858e4e/services/hostedservices/testdc123testdc123North EuropeCreated2014-05-29T12:04:22Z2014-05-29T12:05:06Z \ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml new file mode 100644 index 0000000000..92be7bd858 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml @@ -0,0 +1 @@ +dcoddkinztest02Productionf18bf4439e6848138d9692cdcfa08769Runninghttp://testdc123.cloudapp.net/PFNlcnZpY2VDb25maWd1cmF0aW9uIHhtbG5zOnhzZD0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEiIHhtbG5zOnhzaT0iaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2UiIHhtbG5zPSJodHRwOi8vc2NoZW1hcy5taWNyb3NvZnQuY29tL1NlcnZpY2VIb3N0aW5nLzIwMDgvMTAvU2VydmljZUNvbmZpZ3VyYXRpb24iPg0KICA8Um9sZSBuYW1lPSJkYzE0Ij4NCiAgICA8SW5zdGFuY2VzIGNvdW50PSIxIiAvPg0KICA8L1JvbGU+DQogIDxSb2xlIG5hbWU9ImRjb2Rka2luenRlc3QwMyI+DQogICAgPEluc3RhbmNlcyBjb3VudD0iMSIgLz4NCiAgPC9Sb2xlPg0KPC9TZXJ2aWNlQ29uZmlndXJhdGlvbj4=dc14dc14ReadyRole00ExtraSmall100.86.126.44SSH137.116.228.1094581022tcpStarteddc148bd92d2a2bd27182427a02ba38dcd16ddcoddkinztest03dcoddkinztest03ReadyRole00ExtraSmall100.86.132.14SSH137.116.228.1092222tcpStarteddcoddkinztest031332d4b5da308f258b37facbe92457bd1dc14PersistentVMRoleNetworkConfiguration22SSH45810tcp137.116.228.109falseReadWritedcoddkinztest02-dc14-0-201405291222150139http://mtlytics.blob.core.windows.net/vhds/testdc123-dc14-2014-05-29.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxExtraSmalldcoddkinztest03PersistentVMRoleNetworkConfiguration22SSH22tcp137.116.228.109falseReadWritedcoddkinztest02-dcoddkinztest03-0-201405291204550075http://mtlytics.blob.core.windows.net/vhds/testdc123-dcoddkinztest03-2014-05-29.vhd5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415LinuxExtraSmallfalsefalse2014-05-29T12:04:51Z2014-05-29T12:29:34Z2014-04-11T18:28:14Z2014-04-13T18:28:14ZPersistentVMUpdateCompleted
137.116.228.109
truedcoddkinztest02ContractContract
testdc123.f2.internal.cloudapp.net
\ No newline at end of file diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml new file mode 100644 index 0000000000..a9e980dcb6 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml @@ -0,0 +1 @@ +https://management.core.windows.net/5191b16a-673d-426c-8c55-fdd912858e4e/services/hostedservices/testdc123testdc123North EuropeCreated2014-05-29T12:04:22Z2014-05-29T12:04:23Z \ No newline at end of file diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 12a7126b56..9f37b3816e 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,5 +1,6 @@ import libcloud from libcloud.common.types import LibcloudError +from libcloud.compute.base import NodeAuthPassword __author__ = 'david' @@ -137,7 +138,60 @@ def test_destroy_node_success_cloud_service_not_found(self): with self.assertRaises(LibcloudError): self.driver.destroy_node(node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) + def test_create_cloud_service(self): + result = self.driver.create_cloud_service("testdc123", "North Europe") + self.assertTrue(result) + + def test_create_cloud_service_service_exists(self): + + with self.assertRaises(LibcloudError): + self.driver.create_cloud_service("testdc1234", "North Europe") + + def test_destroy_cloud_service(self): + + result = self.driver.destroy_cloud_service("testdc123") + self.assertTrue(result) + def test_destroy_cloud_service_service_does_not_exist(self): + + with self.assertRaises(LibcloudError): + self.driver.destroy_cloud_service("testdc1234") + + def test_create_node_and_deployment_one_node(self): + kwargs = {} + #kwargs["ex_cloud_service_name"]="dcoddkinztest02" + kwargs["ex_storage_service_name"]="mtlytics" + kwargs["ex_deployment_name"]="dcoddkinztest02" + kwargs["ex_deployment_slot"]="Production" + kwargs["ex_admin_user_id"]="azurecoder" + auth = NodeAuthPassword("Pa55w0rd", False) + + kwargs["auth"]= auth + + kwargs["size"]= "ExtraSmall" + kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["name"] = "dcoddkinztest03" + + node = type('Node', (object,), dict(id="dc13")) + result = self.driver.create_node(ex_cloud_service_name="testdcabc", **kwargs) + + def test_create_node_and_deployment_second_node(self): + kwargs = {} + #kwargs["ex_cloud_service_name"]="dcoddkinztest02" + kwargs["ex_storage_service_name"]="mtlytics" + kwargs["ex_deployment_name"]="dcoddkinztest02" + kwargs["ex_deployment_slot"]="Production" + kwargs["ex_admin_user_id"]="azurecoder" + auth = NodeAuthPassword("Pa55w0rd", False) + + kwargs["auth"]= auth + + kwargs["size"]= "ExtraSmall" + kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["name"] = "dcoddkinztest03" + + node = type('Node', (object,), dict(id="dc14")) + result = self.driver.create_node(ex_cloud_service_name="testdcabc2", **kwargs) class AzureMockHttp(MockHttp): @@ -227,5 +281,56 @@ def _5191b16a_673d_426c_8c55_fdd912858e4e_locations(self, method, url, body, hea body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices(self, method, url, body, headers): + # request url is the same irrespective of serviceName, only way to differentiate + if "testdc123" in body: + return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) + elif "testdc1234" in body: + return (httplib.CONFLICT, body, headers, httplib.responses[httplib.CONFLICT]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc123(self, method, url, body, headers): + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234.xml') + + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments_dcoddkinztest02_roles(self, method, url, body, headers): + + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + if __name__ == '__main__': sys.exit(unittest.main()) From 34f160d62b7ed78799749e7056735c51ff83b759 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Thu, 29 May 2014 15:01:23 +0100 Subject: [PATCH 291/315] modified asyn call so that an explicit check for status codes can be achieved. Added 307 to the list of possible errors, sometimes these are sent by azure --- libcloud/common/azure.py | 2 +- libcloud/compute/drivers/azure.py | 62 +++++++++++++++++++---------- libcloud/test/compute/test_azure.py | 40 +++++++++++++++++++ 3 files changed, 81 insertions(+), 23 deletions(-) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index 46ba62dfcd..48caa42f93 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -45,7 +45,7 @@ class AzureResponse(XmlResponse): valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, - httplib.BAD_REQUEST] + httplib.BAD_REQUEST, httplib.TEMPORARY_REDIRECT] # added TEMPORARY_REDIRECT as this can sometimes be sent by azure instead of a success or fail response def success(self): i = int(self.status) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index d7609eb631..7b6a37f4f4 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -318,11 +318,13 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name try: - result = self._perform_post(self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name) + '/roleinstances/' + _str(node.id) + '?comp=reboot' - , '', async=True) + response = self._perform_post(self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name) + '/roleinstances/' + _str(node.id) + '?comp=reboot' + , '') + if response.status != 202: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) - if result.request_id: + if self._parse_response_for_async_op(response): return True else: return False @@ -500,7 +502,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image, media_link) - result = self._perform_post( + response = self._perform_post( self._get_deployment_path_using_name(ex_cloud_service_name), AzureXmlSerializer.virtual_machine_deployment_to_xml( ex_deployment_name, @@ -514,8 +516,12 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): None, None, size, - None), - async=True) + None)) + + if response.status != 200: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + result = self._parse_response_for_async_op(response) else: _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name @@ -537,7 +543,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image, media_link) - result = self._perform_post( + response = self._perform_post( self._get_role_path(ex_cloud_service_name, _deployment_name), AzureXmlSerializer.add_role_to_xml( @@ -548,8 +554,12 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): network_config, # network_config None, # availability_set_name None, # data_virtual_hard_disks - size), # role_size - async=True) + size)) # role_size) + + if response.status != 202: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + result = self._parse_response_for_async_op(response) return Node( id=name, @@ -592,7 +602,7 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id) path += '?comp=media' # forces deletion of attached disks - data = self._perform_delete(path, async=True) + data = self._perform_delete(path) return True else: @@ -602,7 +612,7 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None path += '?comp=media' - data = self._perform_delete(path,async=True) + data = self._perform_delete(path) return True @@ -877,7 +887,7 @@ def _is_storage_service_unique(self, service_name=None): def _create_storage_account(self, **kwargs): if kwargs['is_affinity_group'] is True: - result = self._perform_post( + response = self._perform_post( self._get_storage_service_path(), AzureXmlSerializer.create_storage_service_input_to_xml( kwargs['service_name'], @@ -886,10 +896,14 @@ def _create_storage_account(self, **kwargs): kwargs['location'], None, # Location True, # geo_replication_enabled - None), # extended_properties - async=True) + None)) # extended_properties + + if response.status != 200: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + result = self._parse_response_for_async_op(response) else: - result = self._perform_post( + response = self._perform_post( self._get_storage_service_path(), AzureXmlSerializer.create_storage_service_input_to_xml( kwargs['service_name'], @@ -898,8 +912,12 @@ def _create_storage_account(self, **kwargs): None, # Affinity Group kwargs['location'], # Location True, # geo_replication_enabled - None), # extended_properties - async=True) + None)) # extended_properties + + if response.status != 200: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + + result = self._parse_response_for_async_op(response) # We need to wait for this to be created before we can # create the storage container and the instance. @@ -948,13 +966,13 @@ def _perform_post(self, path, body, response_type=None, async=False): request.headers = self._update_management_header(request) response = self._perform_request(request) - if response_type is not None: - return self._parse_response(response, response_type) + #if response_type is not None: + # return self._parse_response(response, response_type) - if async: - return self._parse_response_for_async_op(response) + #if async: + # return self._parse_response_for_async_op(response) - return None + return response def _perform_delete(self, path, async=False): request = AzureHTTPRequest() diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 9f37b3816e..a239ba21d4 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -193,6 +193,24 @@ def test_create_node_and_deployment_second_node(self): node = type('Node', (object,), dict(id="dc14")) result = self.driver.create_node(ex_cloud_service_name="testdcabc2", **kwargs) + def test_create_node_and_deployment_second_node_307_response(self): + kwargs = {} + #kwargs["ex_cloud_service_name"]="dcoddkinztest02" + kwargs["ex_storage_service_name"]="mtlytics" + kwargs["ex_deployment_name"]="dcoddkinztest04" + kwargs["ex_deployment_slot"]="Production" + kwargs["ex_admin_user_id"]="azurecoder" + auth = NodeAuthPassword("Pa55w0rd", False) + + kwargs["auth"]= auth + + kwargs["size"]= "ExtraSmall" + kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["name"] = "dcoddkinztest04" + + with self.assertRaises(LibcloudError): + self.driver.create_node(ex_cloud_service_name="testdcabc3", **kwargs) + class AzureMockHttp(MockHttp): fixtures = ComputeFileFixtures('azure') @@ -332,5 +350,27 @@ def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_dep return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3_deployments(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3_deployments_dcoddkinztest02_roles(self, method, url, body, headers): + + return (httplib.TEMPORARY_REDIRECT, None, headers, httplib.responses[httplib.TEMPORARY_REDIRECT]) + if __name__ == '__main__': sys.exit(unittest.main()) From 82421ab30c9c67a7c5701ff0f005d60cbaae14b4 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Fri, 30 May 2014 10:57:39 +0100 Subject: [PATCH 292/315] added dict of standard images, fixed some minor issues --- libcloud/compute/drivers/azure.py | 67 ++++++++++++++++++++++++----- libcloud/test/compute/test_azure.py | 46 ++++++++++---------- 2 files changed, 81 insertions(+), 32 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 7b6a37f4f4..529e57c195 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -71,6 +71,53 @@ def _str(value): _USER_AGENT_STRING = 'libcloudazurecompute/' + __version__ X_MS_VERSION = '2013-08-01' +AZURE_DEFAULT_IMAGE_NAME = { + 'WinServer2013_JDK6':'0c0083a6d9a24f2d91800e52cad83950__JDK-1.6.0_71-0514-Win-GA', + 'WinServer2013_JDK7':'0c0083a6d9a24f2d91800e52cad83950__JDK-1.7.0_51-0514-Win-GA', + 'WinServer2013_JDK8':'0c0083a6d9a24f2d91800e52cad83950__JDK-1.8.0-0514-Win-GA', + 'OracleDb11gR2Ent_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-EE-0514-Win-GA', + 'OracleDb11gR2Ent_WL_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-EE-WebLogic-10.3.6-EE-JDK-1.7.0_51-0514-Win-GA', + 'OracleDb11gR2Std_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-SE-0514-Win-GA', + 'OracleDb11gR2Std_WL_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-SE-WebLogic-10.3.6-SE-JDK-1.7.0_51-0514-Win-GA', + 'OracleDb12cEnt_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-0514-EE-Win-GA', + 'OracleDb12cStd_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-0514-SE-Win-GA', + 'OracleDb12cEnt_WL_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-EE-WebLogic-12.1.2.0-EE-JDK-1.7.0_51-0514-Win-GA', + 'OracleDb12cStd_WL_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-SE-WebLogic-12.1.2.0-SE-JDK-1.7.0_51-0514-Win-GA', + 'OracleWL12cEnt_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__WebLogic-12.1.2.0-EE-JDK-1.7.0_51-0514-Win-GA', + 'OracleWL12cStd_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__WebLogic-12.1.2.0-SE-JDK-1.7.0_51-0514-Win-GA', + 'OracleWL11gEnt_WinServer2008R2':'0c0083a6d9a24f2d91800e52cad83950__Weblogic-10.3.6-EE-JDK-1.7.0_51-0514-Win-GA', + 'OracleWL11gStd_WinServer2008R2':'0c0083a6d9a24f2d91800e52cad83950__Weblogic-10.3.6-SE-JDK-1.7.0_51-0514-Win-GA', + 'BizTalkServer2013Ent_WinServer2012':'2cdc6229df6344129ee553dd3499f0d3__BizTalk-Server-2013-Enterprise', + 'BizTalkServer2013Std_WinServer2012':'2cdc6229df6344129ee553dd3499f0d3__BizTalk-Server-2013-Standard', + 'WinServerEssentials_WinServer2012R2':'3a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20140327-enus', + 'OpenLogic':'5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415', + 'WinServer2008R2SP1':'a699494373c04fc0bc8f2bb1389d6106__Win2K8R2SP1-Datacenter-201404.01-en.us-127GB.vhd', + 'WinServer2012DataCenter':'a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-Datacenter-201404.01-en.us-127GB.vhd', + 'WinServer2012R2DataCenter':'a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-R2-201404.01-en.us-127GB.vhd', + 'WinServerRemoteDskTopSessionHost_WinServer2012R2':'ad072bd3082149369c449ba5832401ae__Windows-Server-Remote-Desktop-Session-Host-on-Windows-Server-2012-R2-20140514-1852', + 'UbuntuServer13.10':'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140507-en-us-30GB', + 'UnuntuServer12.04.4LTS':'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140514-en-us-30GB', + 'UbuntuServer14.04LTS':'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140416.1-en-us-30GB', + 'SUSELinuxEntServer11SP3Prem':'b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-Prio-v202', + 'SUSELinuxEntServer11SP3Sap':'b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-SAP-CAL-v101,', + 'SUSELinuxEntServer11SP3':'b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-v202', + 'OpenSUSE13.1':'b4590d9e3ed742e4a1d46e5424aa335e__openSUSE-13.1-v101', + 'OracleDB12.1.0.1Ent_OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__Oracle-Database-121010.v1-EE-Lnx', + 'OracleDB12.1.0.1Std_OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__Oracle-Database-121010.v3-SE-Lnx', + 'OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__Oracle-Linux-6', + 'OracleWLServer12.1.2_OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__WebLogic-Server-12c.v1-Lnx', + 'PuppetEnt3.2.2':'de89c2ed05c748f5aded3ddc75fdcce4__PuppetEnterpriseMaster-3_2_2-amd64-server-20140408-en-us-30GB', + 'SQLServer2008R2SP2Ent_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2008R2SP2-Enterprise-CY13SU04-SQL2008-SP2-10.50.4021.0', + 'SQLServer2008R2SP2Std_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2008R2SP2-Standard-CY13SU04-SQL2008-SP2-10.50.4021.0', + 'SQLServer2012SP1Ent_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Enterprise-CY13SU04-SQL2012-SP1-11.0.3350.0-Win2012', + 'SQLServer2012SP1Ent_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Enterprise-CY13SU04-SQL11-SP1-CU3-11.0.3350.0-B', + 'SQLServer2012SP1Std_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Standard-CY13SU04-SQL2012-SP1-11.0.3350.0-Win2012', + 'SQLServer2012SP1Std_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Standard-CY13SU04-SQL11-SP1-CU3-11.0.3350.0-B', + 'SQLServer2012SP1Web_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Web-CY13SU04-SQL11-SP1-CU3-11.0.3350.0', + 'SQLServer2008R2SP2Web_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Web-ENU-Win2K8R2-CY13SU12', + 'SQLServer2012SP1DataWarehousing_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-DataWarehousing-ENU-WS2012-CY13SU12', + +} """ Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx @@ -290,7 +337,7 @@ def list_nodes(self, ex_cloud_service_name=None): except IndexError: return None - def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): + def reboot_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot=None): """ Reboots a node. @@ -315,6 +362,9 @@ def reboot_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None) if not ex_deployment_slot: ex_deployment_slot = "production" + if not node: + raise ValueError("node is required.") + _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name try: @@ -570,7 +620,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): driver=self.connection.driver ) - def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None): + def destroy_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot=None): """Remove Azure Virtual Machine This removes the instance, but does not @@ -590,6 +640,9 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") + if not node: + raise ValueError("node is required.") + if not ex_deployment_slot: ex_deployment_slot = "production" @@ -616,7 +669,7 @@ def destroy_node(self, node, ex_cloud_service_name=None, ex_deployment_slot=None return True - def create_cloud_service(self, ex_cloud_service_name, location, description=None, extended_properties=None): + def create_cloud_service(self, ex_cloud_service_name=None, location=None, description=None, extended_properties=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") @@ -647,7 +700,7 @@ def _perform_cloud_service_create(self, path, data): return response - def destroy_cloud_service(self, ex_cloud_service_name): + def destroy_cloud_service(self, ex_cloud_service_name=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") #add check to ensure all nodes have been deleted @@ -966,12 +1019,6 @@ def _perform_post(self, path, body, response_type=None, async=False): request.headers = self._update_management_header(request) response = self._perform_request(request) - #if response_type is not None: - # return self._parse_response(response, response_type) - - #if async: - # return self._parse_response_for_async_op(response) - return response def _perform_delete(self, path, async=False): diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index a239ba21d4..1654976957 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,6 +1,7 @@ import libcloud from libcloud.common.types import LibcloudError from libcloud.compute.base import NodeAuthPassword +from libcloud.compute.drivers.azure import AZURE_DEFAULT_IMAGE_NAME __author__ = 'david' @@ -8,8 +9,8 @@ import httplib import unittest -import urlparse import libcloud.security + from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures from libcloud.compute.types import Provider @@ -51,7 +52,7 @@ def test_images_returned_successfully(self): self.assertEquals(len(images), 215 ) def test_images_returned_successfully_filter_by_location(self): - images = self.driver.list_images("West US") + images = self.driver.list_images(location="West US") self.assertEquals(len(images), 207 ) def test_list_nodes_returned_successfully(self): @@ -82,7 +83,7 @@ def test_list_nodes_returned_no_cloud_service(self): def test_restart_node_success(self): node = type('Node', (object,), dict(id="dc03")) - result = self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") + result = self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") self.assertTrue(result) @@ -92,35 +93,35 @@ def test_restart_node_fail_no_deployment(self): node = type('Node', (object,), dict(id="dc03")) with self.assertRaises(LibcloudError): - self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest02", ex_deployment_slot="Production") + self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest02", ex_deployment_slot="Production") def test_restart_node_fail_no_cloud_service(self): node = type('Node', (object,), dict(id="dc03")) with self.assertRaises(LibcloudError): - self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest03", ex_deployment_slot="Production") + self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest03", ex_deployment_slot="Production") def test_restart_node_fail_node_not_found(self): node = type('Node', (object,), dict(id="dc13")) - result = self.driver.reboot_node(node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") + result = self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") self.assertFalse(result) def test_destroy_node_success_single_node_in_cloud_service(self): node = type('Node', (object,), dict(id="oddkinz1")) - result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz1", ex_deployment_slot="Production") + result = self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz1", ex_deployment_slot="Production") self.assertTrue(result) def test_destroy_node_success_multiple_nodes_in_cloud_service(self): node = type('Node', (object,), dict(id="oddkinz1")) - result = self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") + result = self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") self.assertTrue(result) def test_destroy_node_fail_node_does_not_exist(self): @@ -128,7 +129,7 @@ def test_destroy_node_fail_node_does_not_exist(self): node = type('Node', (object,), dict(id="oddkinz2")) with self.assertRaises(LibcloudError): - self.driver.destroy_node(node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") + self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") def test_destroy_node_success_cloud_service_not_found(self): @@ -136,7 +137,7 @@ def test_destroy_node_success_cloud_service_not_found(self): node["name"]="cloudredis" with self.assertRaises(LibcloudError): - self.driver.destroy_node(node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) + self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) def test_create_cloud_service(self): result = self.driver.create_cloud_service("testdc123", "North Europe") @@ -145,67 +146,68 @@ def test_create_cloud_service(self): def test_create_cloud_service_service_exists(self): with self.assertRaises(LibcloudError): - self.driver.create_cloud_service("testdc1234", "North Europe") + self.driver.create_cloud_service(ex_cloud_service_name="testdc1234", location="North Europe") def test_destroy_cloud_service(self): - result = self.driver.destroy_cloud_service("testdc123") + result = self.driver.destroy_cloud_service(ex_cloud_service_name="testdc123") self.assertTrue(result) def test_destroy_cloud_service_service_does_not_exist(self): with self.assertRaises(LibcloudError): - self.driver.destroy_cloud_service("testdc1234") + self.driver.destroy_cloud_service(ex_cloud_service_name="testdc1234") def test_create_node_and_deployment_one_node(self): kwargs = {} - #kwargs["ex_cloud_service_name"]="dcoddkinztest02" + kwargs["ex_storage_service_name"]="mtlytics" kwargs["ex_deployment_name"]="dcoddkinztest02" kwargs["ex_deployment_slot"]="Production" kwargs["ex_admin_user_id"]="azurecoder" - auth = NodeAuthPassword("Pa55w0rd", False) + auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth kwargs["size"]= "ExtraSmall" kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" kwargs["name"] = "dcoddkinztest03" - node = type('Node', (object,), dict(id="dc13")) result = self.driver.create_node(ex_cloud_service_name="testdcabc", **kwargs) + self.assertIsNotNone(result) def test_create_node_and_deployment_second_node(self): kwargs = {} - #kwargs["ex_cloud_service_name"]="dcoddkinztest02" + kwargs["ex_storage_service_name"]="mtlytics" kwargs["ex_deployment_name"]="dcoddkinztest02" kwargs["ex_deployment_slot"]="Production" kwargs["ex_admin_user_id"]="azurecoder" - auth = NodeAuthPassword("Pa55w0rd", False) + auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth kwargs["size"]= "ExtraSmall" - kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["image"] = AZURE_DEFAULT_IMAGE_NAME["OpenLogic"] kwargs["name"] = "dcoddkinztest03" node = type('Node', (object,), dict(id="dc14")) result = self.driver.create_node(ex_cloud_service_name="testdcabc2", **kwargs) + self.assertIsNotNone(result) def test_create_node_and_deployment_second_node_307_response(self): kwargs = {} - #kwargs["ex_cloud_service_name"]="dcoddkinztest02" + kwargs["ex_storage_service_name"]="mtlytics" kwargs["ex_deployment_name"]="dcoddkinztest04" kwargs["ex_deployment_slot"]="Production" kwargs["ex_admin_user_id"]="azurecoder" - auth = NodeAuthPassword("Pa55w0rd", False) + auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth kwargs["size"]= "ExtraSmall" - kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["image"] = AZURE_DEFAULT_IMAGE_NAME["OpenLogic"] kwargs["name"] = "dcoddkinztest04" with self.assertRaises(LibcloudError): From 235a4807bb9f461c17fb77f9f69c6db1433c0e78 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Fri, 30 May 2014 11:45:00 +0100 Subject: [PATCH 293/315] fixed small issue where unit tests did not run --- libcloud/common/base.py | 4 ++-- libcloud/httplib_ssl.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 212430ab43..92239568a1 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -571,10 +571,10 @@ def connect(self, host=None, port=None, base_url=None, **kwargs): if not hasattr(kwargs, 'port'): kwargs.update({'port': port}) - if not hasattr(kwargs, 'key_file'): + if not hasattr(kwargs, 'key_file') and hasattr(self, 'key_file' ): kwargs.update({'key_file': self.key_file}) - if not hasattr(kwargs, 'cert_file'): + if not hasattr(kwargs, 'cert_file') and hasattr(self, 'cert_file' ): kwargs.update({'cert_file': self.cert_file}) #kwargs = {'host': host, 'port': int(port)} diff --git a/libcloud/httplib_ssl.py b/libcloud/httplib_ssl.py index 7787e7b3d5..898ecc530f 100644 --- a/libcloud/httplib_ssl.py +++ b/libcloud/httplib_ssl.py @@ -201,7 +201,6 @@ def __init__(self, *args, **kwargs): Constructor """ self._setup_verify() - # Support for HTTP proxy proxy_url_env = os.environ.get(HTTP_PROXY_ENV_VARIABLE_NAME, None) proxy_url = kwargs.pop('proxy_url', proxy_url_env) From a3206dbd835c4031152acaba8305c2311d00ec47 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Fri, 30 May 2014 15:02:28 +0100 Subject: [PATCH 294/315] cleanup and re-ordering of code to meet submission requirements --- libcloud/common/azure.py | 5 +- libcloud/compute/drivers/azure.py | 90 +++++++++++++++++++------------ 2 files changed, 56 insertions(+), 39 deletions(-) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index 48caa42f93..8fbb1bd338 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -20,7 +20,6 @@ import hmac from hashlib import sha256 - from libcloud.utils.py3 import httplib from libcloud.utils.py3 import b from libcloud.utils.xml import fixxpath @@ -223,9 +222,7 @@ def __init__(self, subscription_id, key_file, *args, **kwargs): if not is_file_path: raise InvalidCredsError('pem file needed to authenticate to Microsoft Azure') self.key_file = key_file - #if is_file_path: - # with open(keypath, 'r') as f: - # key = f.read() + super(AzureServiceManagementConnection, self).__init__( subscription_id, key_file, *args, **kwargs) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 529e57c195..e617dad787 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -16,32 +16,25 @@ """ import httplib -import uuid import re import time import collections import random import sys -import os import copy import base64 -from libcloud.common.azure import AzureServiceManagementConnection +from libcloud.common.azure import AzureServiceManagementConnection from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize -from libcloud.compute.base import NodeImage, StorageVolume, VolumeSnapshot -from libcloud.compute.base import KeyPair, NodeAuthPassword -from libcloud.compute.types import NodeState, KeyPairDoesNotExistError +from libcloud.compute.base import NodeImage, StorageVolume +from libcloud.compute.base import KeyPair +from libcloud.compute.types import NodeState from libcloud.common.types import LibcloudError -from libcloud.common.base import ConnectionUserAndKey - from datetime import datetime from xml.dom import minidom from xml.sax.saxutils import escape as xml_escape -from httplib import ( - HTTPSConnection, - HTTPS_PORT, - ) +from httplib import (HTTPSConnection) if sys.version_info < (3,): from urllib2 import quote as url_quote @@ -118,6 +111,7 @@ def _str(value): 'SQLServer2012SP1DataWarehousing_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-DataWarehousing-ENU-WS2012-CY13SU12', } + """ Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx @@ -353,7 +347,7 @@ def reboot_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot= or "Staging". (Optional) :type ex_deployment_name: ``str`` - :rtype: ``list`` of :class:`Node` + :rtype: ``bool`` """ if not ex_cloud_service_name: @@ -670,7 +664,23 @@ def destroy_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot return True def create_cloud_service(self, ex_cloud_service_name=None, location=None, description=None, extended_properties=None): + """ + creates an azure cloud service. + + :param ex_cloud_service_name: Cloud Service name + :type ex_cloud_service_name: ``str`` + :param location: standard azure location string + :type location: ``str`` + + :param description: optional description + :type description: ``str`` + + :param extended_properties: optional extended_properties + :type extended_properties: ``dict`` + + :rtype: ``bool`` + """ if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") @@ -688,19 +698,17 @@ def create_cloud_service(self, ex_cloud_service_name=None, location=None, descri return True - def _perform_cloud_service_create(self, path, data): - request = AzureHTTPRequest() - request.method = 'POST' - request.host = azure_service_management_host - request.path = path - request.body = data - request.path, request.query = self._update_request_uri_query(request) - request.headers = self._update_management_header(request) - response = self._perform_request(request) + def destroy_cloud_service(self, ex_cloud_service_name=None): - return response + """ + deletes an azure cloud service. + + :param ex_cloud_service_name: Cloud Service name + :type ex_cloud_service_name: ``str`` + + :rtype: ``bool`` + """ - def destroy_cloud_service(self, ex_cloud_service_name=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") #add check to ensure all nodes have been deleted @@ -711,17 +719,6 @@ def destroy_cloud_service(self, ex_cloud_service_name=None): return True - def _perform_cloud_service_delete(self, path): - request = AzureHTTPRequest() - request.method = 'DELETE' - request.host = azure_service_management_host - request.path = path - request.path, request.query = self._update_request_uri_query(request) - request.headers = self._update_management_header(request) - response = self._perform_request(request) - - return response - """ Functions not implemented """ def create_volume_snapshot(self): @@ -752,6 +749,29 @@ def destroy_volume(self): """Private Functions """ + def _perform_cloud_service_create(self, path, data): + request = AzureHTTPRequest() + request.method = 'POST' + request.host = azure_service_management_host + request.path = path + request.body = data + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + return response + + def _perform_cloud_service_delete(self, path): + request = AzureHTTPRequest() + request.method = 'DELETE' + request.host = azure_service_management_host + request.path = path + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + return response + def _to_node(self, data): """ Convert the data from a Azure response object into a Node From 6d090069ccd5373bdda05896504de8abcac21db0 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Mon, 2 Jun 2014 09:50:13 +0100 Subject: [PATCH 295/315] modified test subscriptionId and associated files so it no longer uses elastacloud sub-id, ensured tests no longer rely on elastacloud pem file. Empty file in fixtures serves as an adequate replacement --- ...673d_526c_8d55_fee918758e6e_locations.xml} | 0 ...rvices_hostedservices_dcoddkinztest01.xml} | 0 ...1_deployments_dc03_roleinstances_dc13.xml} | 0 ...kinztest01_deploymentslots_Production.xml} | 0 ...kinztest02_deploymentslots_Production.xml} | 0 ...rvices_hostedservices_dcoddkinztest03.xml} | 0 ...kinztest03_deploymentslots_Production.xml} | 0 ...rvices_hostedservices_dcoddkinztest04.xml} | 0 ...s_oddkinz1_deploymentslots_Production.xml} | 0 ...s_oddkinz2_deploymentslots_Production.xml} | 0 ...s_oddkinz5_deploymentslots_Production.xml} | 0 ...6e_services_hostedservices_testdc1234.xml} | 0 ...e6e_services_hostedservices_testdcabc.xml} | 0 ...6e_services_hostedservices_testdcabc2.xml} | 0 ...hostedservices_testdcabc2_deployments.xml} | 0 ...testdcabc2_deploymentslots_Production.xml} | 0 ..._hostedservices_testdcabc_deployments.xml} | 0 ...26c_8d55_fee918758e6e_services_images.xml} | 0 .../test/compute/fixtures/azure/libcloud.pem | 0 libcloud/test/compute/test_azure.py | 106 +++++++++--------- 20 files changed, 53 insertions(+), 53 deletions(-) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml => _3761b98b_673d_526c_8d55_fee918758e6e_locations.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml} (100%) rename libcloud/test/compute/fixtures/azure/{_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml => _3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml} (100%) create mode 100644 libcloud/test/compute/fixtures/azure/libcloud.pem diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml diff --git a/libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml similarity index 100% rename from libcloud/test/compute/fixtures/azure/_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml rename to libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml diff --git a/libcloud/test/compute/fixtures/azure/libcloud.pem b/libcloud/test/compute/fixtures/azure/libcloud.pem new file mode 100644 index 0000000000..e69de29bb2 diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 1654976957..ac833bd60d 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -20,13 +20,13 @@ class AzureNodeDriverTests(unittest.TestCase) : libcloud.security.VERIFY_SSL_CERT = False - SUBSCRIPTION_ID = '5191b16a-673d-426c-8c55-fdd912858e4e' - KEY_FILE = 'C:\\Users\\david\\Desktop\\libcloud.pem' + SUBSCRIPTION_ID = '3761b98b-673d-526c-8d55-fee918758e6e' + KEY_FILE = 'fixtures\\azure\\libcloud.pem' #empty file is fine def setUp(self): Azure = get_driver(Provider.AZURE) Azure.connectionCls.conn_classes = (None, AzureMockHttp) - self.driver = Azure(self.SUBSCRIPTION_ID, self.KEY_FILE, pem_key_file = self.KEY_FILE ) + self.driver = Azure(self.SUBSCRIPTION_ID, self.KEY_FILE ) def test_locations_returned_successfully(self): locations = self.driver.list_locations() @@ -217,160 +217,160 @@ class AzureMockHttp(MockHttp): fixtures = ComputeFileFixtures('azure') - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz1_deployments_dc01(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deployments_dc01(self, method, url, body, headers): return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz1(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz1(self, method, url, body, headers): return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz2(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz2(self, method, url, body, headers): return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_oddkinz5_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc03(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc03(self, method, url, body, headers): headers["x-ms-request-id"]="acc33f6756cda6fd96826394fce4c9f3" return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest01.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest03.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_dcoddkinztest04.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_images(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_images(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_images.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_locations(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_locations(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_locations.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices(self, method, url, body, headers): # request url is the same irrespective of serviceName, only way to differentiate if "testdc123" in body: return (httplib.CREATED, body, headers, httplib.responses[httplib.CREATED]) elif "testdc1234" in body: return (httplib.CONFLICT, body, headers, httplib.responses[httplib.CONFLICT]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc123(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc123(self, method, url, body, headers): return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdc1234.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc_deployments.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments_dcoddkinztest02_roles(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments_dcoddkinztest02_roles(self, method, url, body, headers): return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3_deploymentslots_Production(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3_deployments(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_deployments(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc2_deployments.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) - def _5191b16a_673d_426c_8c55_fdd912858e4e_services_hostedservices_testdcabc3_deployments_dcoddkinztest02_roles(self, method, url, body, headers): + def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_deployments_dcoddkinztest02_roles(self, method, url, body, headers): return (httplib.TEMPORARY_REDIRECT, None, headers, httplib.responses[httplib.TEMPORARY_REDIRECT]) From 34cdb3f16846b15a43f0589d918747b03123c6df Mon Sep 17 00:00:00 2001 From: Richard Conway Date: Mon, 2 Jun 2014 12:15:48 +0100 Subject: [PATCH 296/315] updated python file to include proper async polling --- libcloud/compute/drivers/azure.py | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index e617dad787..8de8e066e1 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -565,7 +565,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): if response.status != 200: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) - result = self._parse_response_for_async_op(response) + self._ex_complete_async_azure_operation(response) else: _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name @@ -600,10 +600,12 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): None, # data_virtual_hard_disks size)) # role_size) + self._ex_complete_async_azure_operation(response) + if response.status != 202: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) - result = self._parse_response_for_async_op(response) + return Node( id=name, @@ -1409,6 +1411,26 @@ def _get_role_path(self, service_name, deployment_name, role_name=None): def _get_storage_service_path(self, service_name=None): return self._get_path('services/storageservices', service_name) + def _ex_complete_async_azure_operation(self, response=None, operation_type='create_node'): + + request_id = self._parse_response_for_async_op(response) + operation_status = self._get_operation_status(request_id) + + timeout = 60 * 5 + waittime = 0 + interval = 5 + + while operation_status.status == "InProgress" and waittime < timeout: + operation_status = self._get_operation_status(request_id) + if operation_status.status == "Succeeded": + break + + waittime += interval + time.sleep(interval) + + if operation_status.status == 'Failed': + raise LibcloudError('Message: Async request for operation %s has failed'% operation_type, driver=self) + #def get_connection(self): # certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" # port = HTTPS_PORT @@ -1946,6 +1968,8 @@ def extended_properties_dict_to_xml_fragment(extended_properties): xml += '' return xml + + """Data Classes Borrowed from the Azure SDK for Python. From 5c663200240f2c650908e1f0d05f1c4866de85c5 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Mon, 2 Jun 2014 14:11:13 +0100 Subject: [PATCH 297/315] Modified async code to poll for status. Modified unit tests accordingly --- libcloud/compute/drivers/azure.py | 36 ++++++------------- ...tions_acc33f6756cda6fd96826394fce4c9f3.xml | 1 + libcloud/test/compute/test_azure.py | 12 ++++++- 3 files changed, 22 insertions(+), 27 deletions(-) create mode 100644 libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 8de8e066e1..c33d07704d 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -600,12 +600,10 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): None, # data_virtual_hard_disks size)) # role_size) - self._ex_complete_async_azure_operation(response) - if response.status != 202: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) - + self._ex_complete_async_azure_operation(response) return Node( id=name, @@ -941,7 +939,7 @@ def _get_cloud_service_location(self, service_name=None): _affinity_group = res.hosted_service_properties.affinity_group _cloud_service_location = res.hosted_service_properties.location - if _affinity_group is not None: + if _affinity_group is not None and _affinity_group is not u'': return self.service_location(True, _affinity_group) elif _cloud_service_location is not None: return self.service_location(False, _cloud_service_location) @@ -967,49 +965,35 @@ def _create_storage_account(self, **kwargs): AzureXmlSerializer.create_storage_service_input_to_xml( kwargs['service_name'], kwargs['service_name'], - kwargs['service_name'], + self._encode_base64(kwargs['service_name']), kwargs['location'], None, # Location True, # geo_replication_enabled None)) # extended_properties - if response.status != 200: + if response.status != 202: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) - result = self._parse_response_for_async_op(response) else: response = self._perform_post( self._get_storage_service_path(), AzureXmlSerializer.create_storage_service_input_to_xml( kwargs['service_name'], kwargs['service_name'], - kwargs['service_name'], + self._encode_base64(kwargs['service_name']), None, # Affinity Group kwargs['location'], # Location True, # geo_replication_enabled None)) # extended_properties - if response.status != 200: + if response.status != 202: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) - result = self._parse_response_for_async_op(response) # We need to wait for this to be created before we can - # create the storage container and the instance. - - operation_status = self._get_operation_status(result.request_id) + # create the storage container and the instance. + self._ex_complete_async_azure_operation(response, "create_storage_account") - timeout = 60 * 5 - waittime = 0 - interval = 5 - - while operation_status.status == "InProgress" and waittime < timeout: - operation_status = self._get_operation_status(result.request_id) - if operation_status.status == "Succeeded": - break - - waittime += interval - time.sleep(interval) return def _get_operation_status(self, request_id): @@ -1414,7 +1398,7 @@ def _get_storage_service_path(self, service_name=None): def _ex_complete_async_azure_operation(self, response=None, operation_type='create_node'): request_id = self._parse_response_for_async_op(response) - operation_status = self._get_operation_status(request_id) + operation_status = self._get_operation_status(request_id.request_id) timeout = 60 * 5 waittime = 0 @@ -1460,7 +1444,7 @@ def create_storage_service_input_to_xml(service_name, description, label, 'CreateStorageServiceInput', [('ServiceName', service_name), ('Description', description), - ('Label', label, AzureNodeDriver._encode_base64), + ('Label', label), ('AffinityGroup', affinity_group), ('Location', location), ('GeoReplicationEnabled', geo_replication_enabled, _lower)], diff --git a/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml new file mode 100644 index 0000000000..2f89d16289 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml @@ -0,0 +1 @@ +fbd6fd8f-9e3e-acad-8524-92f64e1050b6Succeeded \ No newline at end of file diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index ac833bd60d..c52a133c77 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -18,6 +18,7 @@ class AzureNodeDriverTests(unittest.TestCase) : + #required otherwise we get client side SSL verification libcloud.security.VERIFY_SSL_CERT = False SUBSCRIPTION_ID = '3761b98b-673d-526c-8d55-fee918758e6e' @@ -325,6 +326,7 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc(self return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): + headers["x-ms-request-id"]="acc33f6756cda6fd96826394fce4c9f3" if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml') @@ -337,19 +339,21 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2(sel return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production(self, method, url, body, headers): + if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments(self, method, url, body, headers): + if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments_dcoddkinztest02_roles(self, method, url, body, headers): - + headers["x-ms-request-id"]="acc33f6756cda6fd96826394fce4c9f3" return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3(self, method, url, body, headers): @@ -374,5 +378,11 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_dep return (httplib.TEMPORARY_REDIRECT, None, headers, httplib.responses[httplib.TEMPORARY_REDIRECT]) + def _3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3(self, method, url, body, headers): + + if method == "GET": + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main()) From 3a1d40ba997b530912088d898696ce4607b2a45c Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Mon, 30 Jun 2014 10:15:20 +0100 Subject: [PATCH 298/315] modified test of create node response to 202 Accepted rather than 200 OK. Removed default VM image name map as its already out of date. Ensure line lengths are no greater the 80 characters where appropriate. Fixed unit tests --- libcloud/common/azure.py | 17 +- libcloud/compute/drivers/azure.py | 280 ++++++++++++++++------------ libcloud/test/compute/test_azure.py | 79 +++++--- 3 files changed, 227 insertions(+), 149 deletions(-) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index 8fbb1bd338..b7429336a5 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -31,7 +31,8 @@ from libcloud.common.types import InvalidCredsError from libcloud.common.types import LibcloudError, MalformedResponseError -from libcloud.common.base import ConnectionUserAndKey, RawResponse, CertificateConnection +from libcloud.common.base import ConnectionUserAndKey, RawResponse, \ + CertificateConnection from libcloud.common.base import XmlResponse # Azure API version @@ -43,8 +44,11 @@ class AzureResponse(XmlResponse): + valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, - httplib.BAD_REQUEST, httplib.TEMPORARY_REDIRECT] # added TEMPORARY_REDIRECT as this can sometimes be sent by azure instead of a success or fail response + # added TEMPORARY_REDIRECT as this can sometimes be + # sent by azure instead of a success or fail response + httplib.BAD_REQUEST, httplib.TEMPORARY_REDIRECT] def success(self): i = int(self.status) @@ -193,8 +197,10 @@ class AzureBaseDriver(object): class AzureServiceManagementConnection(CertificateConnection): # This needs the following approach - - # 1. Make request using LibcloudHTTPSConnection which is a overloaded class which takes in a client certificate - # 2. Depending on the type of operation use a PollingConnection when the response id is returned + # 1. Make request using LibcloudHTTPSConnection which is a overloaded + # class which takes in a client certificate + # 2. Depending on the type of operation use a PollingConnection + # when the response id is returned # 3. The Response can be used in an AzureServiceManagementResponse """Authentication class for "Service Account" authentication.""" driver = AzureBaseDriver @@ -220,7 +226,8 @@ def __init__(self, subscription_id, key_file, *args, **kwargs): self.keyfile = keypath; is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: - raise InvalidCredsError('pem file needed to authenticate to Microsoft Azure') + raise InvalidCredsError( + 'pem file needed to authenticate to Microsoft Azure') self.key_file = key_file super(AzureServiceManagementConnection, self).__init__( diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index c33d07704d..44d0b33158 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -64,54 +64,6 @@ def _str(value): _USER_AGENT_STRING = 'libcloudazurecompute/' + __version__ X_MS_VERSION = '2013-08-01' -AZURE_DEFAULT_IMAGE_NAME = { - 'WinServer2013_JDK6':'0c0083a6d9a24f2d91800e52cad83950__JDK-1.6.0_71-0514-Win-GA', - 'WinServer2013_JDK7':'0c0083a6d9a24f2d91800e52cad83950__JDK-1.7.0_51-0514-Win-GA', - 'WinServer2013_JDK8':'0c0083a6d9a24f2d91800e52cad83950__JDK-1.8.0-0514-Win-GA', - 'OracleDb11gR2Ent_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-EE-0514-Win-GA', - 'OracleDb11gR2Ent_WL_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-EE-WebLogic-10.3.6-EE-JDK-1.7.0_51-0514-Win-GA', - 'OracleDb11gR2Std_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-SE-0514-Win-GA', - 'OracleDb11gR2Std_WL_WinServer2008Rs':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-11.2.0.4.0-SE-WebLogic-10.3.6-SE-JDK-1.7.0_51-0514-Win-GA', - 'OracleDb12cEnt_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-0514-EE-Win-GA', - 'OracleDb12cStd_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-0514-SE-Win-GA', - 'OracleDb12cEnt_WL_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-EE-WebLogic-12.1.2.0-EE-JDK-1.7.0_51-0514-Win-GA', - 'OracleDb12cStd_WL_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__Oracle-Database-12.1.0.1.0-SE-WebLogic-12.1.2.0-SE-JDK-1.7.0_51-0514-Win-GA', - 'OracleWL12cEnt_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__WebLogic-12.1.2.0-EE-JDK-1.7.0_51-0514-Win-GA', - 'OracleWL12cStd_WinServer2012':'0c0083a6d9a24f2d91800e52cad83950__WebLogic-12.1.2.0-SE-JDK-1.7.0_51-0514-Win-GA', - 'OracleWL11gEnt_WinServer2008R2':'0c0083a6d9a24f2d91800e52cad83950__Weblogic-10.3.6-EE-JDK-1.7.0_51-0514-Win-GA', - 'OracleWL11gStd_WinServer2008R2':'0c0083a6d9a24f2d91800e52cad83950__Weblogic-10.3.6-SE-JDK-1.7.0_51-0514-Win-GA', - 'BizTalkServer2013Ent_WinServer2012':'2cdc6229df6344129ee553dd3499f0d3__BizTalk-Server-2013-Enterprise', - 'BizTalkServer2013Std_WinServer2012':'2cdc6229df6344129ee553dd3499f0d3__BizTalk-Server-2013-Standard', - 'WinServerEssentials_WinServer2012R2':'3a50f22b388a4ff7ab41029918570fa6__Windows-Server-2012-Essentials-20140327-enus', - 'OpenLogic':'5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415', - 'WinServer2008R2SP1':'a699494373c04fc0bc8f2bb1389d6106__Win2K8R2SP1-Datacenter-201404.01-en.us-127GB.vhd', - 'WinServer2012DataCenter':'a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-Datacenter-201404.01-en.us-127GB.vhd', - 'WinServer2012R2DataCenter':'a699494373c04fc0bc8f2bb1389d6106__Windows-Server-2012-R2-201404.01-en.us-127GB.vhd', - 'WinServerRemoteDskTopSessionHost_WinServer2012R2':'ad072bd3082149369c449ba5832401ae__Windows-Server-Remote-Desktop-Session-Host-on-Windows-Server-2012-R2-20140514-1852', - 'UbuntuServer13.10':'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-13_10-amd64-server-20140507-en-us-30GB', - 'UnuntuServer12.04.4LTS':'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-12_04_4-LTS-amd64-server-20140514-en-us-30GB', - 'UbuntuServer14.04LTS':'b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu-14_04-LTS-amd64-server-20140416.1-en-us-30GB', - 'SUSELinuxEntServer11SP3Prem':'b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-Prio-v202', - 'SUSELinuxEntServer11SP3Sap':'b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-SAP-CAL-v101,', - 'SUSELinuxEntServer11SP3':'b4590d9e3ed742e4a1d46e5424aa335e__SUSE-Linux-Enterprise-Server-11-SP3-v202', - 'OpenSUSE13.1':'b4590d9e3ed742e4a1d46e5424aa335e__openSUSE-13.1-v101', - 'OracleDB12.1.0.1Ent_OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__Oracle-Database-121010.v1-EE-Lnx', - 'OracleDB12.1.0.1Std_OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__Oracle-Database-121010.v3-SE-Lnx', - 'OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__Oracle-Linux-6', - 'OracleWLServer12.1.2_OracleLinux6.4':'c290a6b031d841e09f2da759bbabe71f__WebLogic-Server-12c.v1-Lnx', - 'PuppetEnt3.2.2':'de89c2ed05c748f5aded3ddc75fdcce4__PuppetEnterpriseMaster-3_2_2-amd64-server-20140408-en-us-30GB', - 'SQLServer2008R2SP2Ent_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2008R2SP2-Enterprise-CY13SU04-SQL2008-SP2-10.50.4021.0', - 'SQLServer2008R2SP2Std_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2008R2SP2-Standard-CY13SU04-SQL2008-SP2-10.50.4021.0', - 'SQLServer2012SP1Ent_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Enterprise-CY13SU04-SQL2012-SP1-11.0.3350.0-Win2012', - 'SQLServer2012SP1Ent_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Enterprise-CY13SU04-SQL11-SP1-CU3-11.0.3350.0-B', - 'SQLServer2012SP1Std_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Standard-CY13SU04-SQL2012-SP1-11.0.3350.0-Win2012', - 'SQLServer2012SP1Std_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Standard-CY13SU04-SQL11-SP1-CU3-11.0.3350.0-B', - 'SQLServer2012SP1Web_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__Microsoft-SQL-Server-2012SP1-Web-CY13SU04-SQL11-SP1-CU3-11.0.3350.0', - 'SQLServer2008R2SP2Web_WinServer2008R2':'fb83b3509582419d99629ce476bcb5c8__SQL-Server-2008R2SP2-GDR-10.50.4021.0-Web-ENU-Win2K8R2-CY13SU12', - 'SQLServer2012SP1DataWarehousing_WinServer2012':'fb83b3509582419d99629ce476bcb5c8__SQL-Server-2012SP1-CU5-11.0.3373.0-DataWarehousing-ENU-WS2012-CY13SU12', - -} - """ Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx @@ -322,16 +274,20 @@ def list_nodes(self, ex_cloud_service_name=None): '?embed-detail=True', None) if response.status != 200 : - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status) + , driver=self) data = self._parse_response(response, HostedService) try: - return [self._to_node(n) for n in data.deployments[0].role_instance_list] + return [self._to_node(n) for n in + data.deployments[0].role_instance_list] except IndexError: return None - def reboot_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot=None): + def reboot_node(self, node=None, ex_cloud_service_name=None, + ex_deployment_slot=None): """ Reboots a node. @@ -359,14 +315,20 @@ def reboot_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot= if not node: raise ValueError("node is required.") - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + _deployment_name = self._get_deployment( + service_name=ex_cloud_service_name, + deployment_slot=ex_deployment_slot).name try: - response = self._perform_post(self._get_deployment_path_using_name(ex_cloud_service_name, _deployment_name) + '/roleinstances/' + _str(node.id) + '?comp=reboot' - , '') + response = self._perform_post( + self._get_deployment_path_using_name( + ex_cloud_service_name, _deployment_name) + '/roleinstances/' + + _str(node.id) + '?comp=reboot', '') if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, + response.status), driver=self) if self._parse_response_for_async_op(response): return True @@ -395,7 +357,8 @@ def list_volumes(self, node=None): def create_node(self, ex_cloud_service_name=None, **kwargs): """Create Azure Virtual Machine - Reference: http://bit.ly/1fIsCb7 [www.windowsazure.com/en-us/documentation/] + Reference: http://bit.ly/1fIsCb7 + [www.windowsazure.com/en-us/documentation/] We default to: @@ -414,18 +377,21 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): @inherits: :class:`NodeDriver.create_node` - :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. + :keyword ex_cloud_service_name: Required. + Name of the Azure Cloud Service. :type ex_cloud_service_name: ``str`` - :keyword ex_storage_service_name: Optional: Name of the Azure Storage Service. + :keyword ex_storage_service_name: Optional: + Name of the Azure Storage Service. :type ex_storage_service_name: ``str`` - :keyword ex_deployment_name: Optional. The name of the deployment. - If this is not passed in we default to - using the Cloud Service name. + :keyword ex_deployment_name: Optional. The name of the deployment + If this is not passed in we default + to using the Cloud Service name. :type ex_deployment_name: ``str`` - :keyword ex_deployment_slot: Optional: Valid values: production|staging. + :keyword ex_deployment_slot: Optional: Valid values: production| + staging. Defaults to production. :type ex_deployment_slot: ``str`` @@ -447,7 +413,8 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): if "ex_deployment_slot" in kwargs: ex_deployment_slot = kwargs['ex_deployment_slot'] else: - ex_deployment_slot = "production" # We assume production if this is not provided. + # We assume production if this is not provided. + ex_deployment_slot = "production" if "ex_admin_user_id" in kwargs: ex_admin_user_id = kwargs['ex_admin_user_id'] @@ -461,15 +428,21 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): # We do this because we need to pass a Configuration to the # method. This will be either Linux or Windows. - if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", image, re.I): - machine_config = WindowsConfigurationSet(computer_name=name, admin_password=password, admin_user_name=ex_admin_user_id) + if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", + image, re.I): + machine_config = WindowsConfigurationSet( + computer_name=name, admin_password=password, + admin_user_name=ex_admin_user_id) + machine_config.domain_join = None if node_list is None: port = "3389" else: port = random.randint(41952,65535) - endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + endpoints = self._get_deployment( + service_name=ex_cloud_service_name, + deployment_slot=ex_deployment_slot) for instances in endpoints.role_instance_list: ports = [] @@ -492,7 +465,9 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): port = "22" else: port = random.randint(41952,65535) - endpoints = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + endpoints = self._get_deployment( + service_name=ex_cloud_service_name, + deployment_slot=ex_deployment_slot) for instances in endpoints.role_instance_list: ports = [] @@ -510,11 +485,13 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): load_balanced_endpoint_set_name=None, enable_direct_server_return=False ) - machine_config = LinuxConfigurationSet(name, ex_admin_user_id, password, False) + machine_config = LinuxConfigurationSet( + name, ex_admin_user_id, password, False) network_config.input_endpoints.input_endpoints.append(endpoint) - _storage_location = self._get_cloud_service_location(service_name=ex_cloud_service_name) + _storage_location = self._get_cloud_service_location( + service_name=ex_cloud_service_name) # OK, bit annoying here. You must create a deployment before # you can create an instance; however, the deployment function @@ -528,8 +505,11 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): ex_storage_service_name = kwargs['ex_storage_service_name'] else: ex_storage_service_name = ex_cloud_service_name - ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) - if self._is_storage_service_unique(service_name=ex_storage_service_name): + ex_storage_service_name = re.sub( + ur'[\W_]+', u'', ex_storage_service_name.lower(), + flags=re.UNICODE) + if self._is_storage_service_unique( + service_name=ex_storage_service_name): self._create_storage_account( service_name=ex_storage_service_name, location=_storage_location.service_location, @@ -541,8 +521,12 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): else: ex_deployment_name = ex_cloud_service_name - blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" - disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) # Azure's pattern in the UI. + blob_url = "http://" + ex_storage_service_name \ + + ".blob.core.windows.net" + + # Azure's pattern in the UI. + disk_name = "{0}-{1}-{2}.vhd".format( + ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image, media_link) @@ -562,28 +546,36 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): size, None)) - if response.status != 200: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + if response.status != 202: + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), driver=self) self._ex_complete_async_azure_operation(response) else: - _deployment_name = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot).name + _deployment_name = self._get_deployment( + service_name=ex_cloud_service_name, + deployment_slot=ex_deployment_slot).name if "ex_storage_service_name" in kwargs: ex_storage_service_name = kwargs['ex_storage_service_name'] else: ex_storage_service_name = ex_cloud_service_name - ex_storage_service_name = re.sub(ur'[\W_]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE) + ex_storage_service_name = re.sub( + ur'[\W_]+', u'', ex_storage_service_name.lower(), + flags=re.UNICODE) - if self._is_storage_service_unique(service_name=ex_storage_service_name): + if self._is_storage_service_unique( + service_name=ex_storage_service_name): self._create_storage_account( service_name=ex_storage_service_name, location=_storage_location.service_location, is_affinity_group=_storage_location.is_affinity_group ) - blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" - disk_name = "{0}-{1}-{2}.vhd".format(ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + blob_url = "http://" + ex_storage_service_name + \ + ".blob.core.windows.net" + disk_name = "{0}-{1}-{2}.vhd".format( + ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image, media_link) @@ -601,7 +593,9 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): size)) # role_size) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, + response.status), driver=self) self._ex_complete_async_azure_operation(response) @@ -614,7 +608,8 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): driver=self.connection.driver ) - def destroy_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot=None): + def destroy_node(self, node=None, ex_cloud_service_name=None, + ex_deployment_slot=None): """Remove Azure Virtual Machine This removes the instance, but does not @@ -622,7 +617,8 @@ def destroy_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot Azure sometimes has an issue where it will hold onto a blob lease for an extended amount of time. - :keyword ex_cloud_service_name: Required. Name of the Azure Cloud Service. + :keyword ex_cloud_service_name: Required. + Name of the Azure Cloud Service. :type ex_cloud_service_name: ``str`` :keyword ex_deployment_slot: Optional: The name of the deployment @@ -640,13 +636,17 @@ def destroy_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot if not ex_deployment_slot: ex_deployment_slot = "production" - _deployment = self._get_deployment(service_name=ex_cloud_service_name,deployment_slot=ex_deployment_slot) + _deployment = self._get_deployment( + service_name=ex_cloud_service_name, + deployment_slot=ex_deployment_slot) + _deployment_name = _deployment.name _server_deployment_count = len(_deployment.role_instance_list) if _server_deployment_count > 1: - path = self._get_role_path(ex_cloud_service_name, _deployment_name, node.id) + path = self._get_role_path(ex_cloud_service_name, + _deployment_name, node.id) path += '?comp=media' # forces deletion of attached disks data = self._perform_delete(path) @@ -663,7 +663,8 @@ def destroy_node(self, node=None, ex_cloud_service_name=None, ex_deployment_slot return True - def create_cloud_service(self, ex_cloud_service_name=None, location=None, description=None, extended_properties=None): + def create_cloud_service(self, ex_cloud_service_name=None, location=None, + description=None, extended_properties=None): """ creates an azure cloud service. @@ -689,12 +690,15 @@ def create_cloud_service(self, ex_cloud_service_name=None, location=None, descri response = self._perform_cloud_service_create( self._get_hosted_service_path(), - AzureXmlSerializer.create_hosted_service_to_xml(ex_cloud_service_name, self._encode_base64(ex_cloud_service_name), description, - location, None, - extended_properties)) + AzureXmlSerializer.create_hosted_service_to_xml( + ex_cloud_service_name, + self._encode_base64(ex_cloud_service_name), + description, location, None, extended_properties)) if response.status != 201: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' + % (response.error, response.body, + response.status), driver=self) return True @@ -712,10 +716,13 @@ def destroy_cloud_service(self, ex_cloud_service_name=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") #add check to ensure all nodes have been deleted - response = self._perform_cloud_service_delete(self._get_hosted_service_path(ex_cloud_service_name)) + response = self._perform_cloud_service_delete( + self._get_hosted_service_path(ex_cloud_service_name)) if response.status != 200: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status) + , driver=self) return True @@ -834,7 +841,8 @@ def _to_location(self, data): country=country, driver=self.connection.driver, available_services =data.available_services, - virtual_machine_role_sizes = (data.compute_capabilities).virtual_machines_role_sizes) + virtual_machine_role_sizes = + (data.compute_capabilities).virtual_machines_role_sizes) def _to_node_size(self, data): """ @@ -877,11 +885,13 @@ def _to_volume(self, volume, node): extra = {} extra['affinity_group'] = volume.affinity_group if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = volume.attached_to.hosted_service_name + extra['hosted_service_name'] = \ + volume.attached_to.hosted_service_name if hasattr(volume.attached_to, 'role_name'): extra['role_name'] = volume.attached_to.role_name if hasattr(volume.attached_to, 'deployment_name'): - extra['deployment_name'] = volume.attached_to.deployment_name + extra['deployment_name'] = \ + volume.attached_to.deployment_name extra['os'] = volume.os extra['location'] = volume.location extra['media_link'] = volume.media_link @@ -896,7 +906,8 @@ def _to_volume(self, volume, node): extra = {} extra['affinity_group'] = volume.affinity_group if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = volume.attached_to.hosted_service_name + extra['hosted_service_name'] = \ + volume.attached_to.hosted_service_name if hasattr(volume.attached_to, 'role_name'): extra['role_name'] = volume.attached_to.role_name if hasattr(volume.attached_to, 'deployment_name'): @@ -921,7 +932,9 @@ def _get_deployment(self, **kwargs): _service_name, _deployment_slot), None) if response.status != 200: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status) + , driver=self) return self._parse_response(response, Deployment) @@ -972,7 +985,9 @@ def _create_storage_account(self, **kwargs): None)) # extended_properties if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, + response.status), driver=self) else: response = self._perform_post( @@ -987,12 +1002,15 @@ def _create_storage_account(self, **kwargs): None)) # extended_properties if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, + response.status), driver=self) # We need to wait for this to be created before we can # create the storage container and the instance. - self._ex_complete_async_azure_operation(response, "create_storage_account") + self._ex_complete_async_azure_operation(response, + "create_storage_account") return @@ -1021,7 +1039,8 @@ def _perform_post(self, path, body, response_type=None, async=False): request.host = azure_service_management_host request.path = path request.body = self._get_request_body(body) - request.path, request.query = self._update_request_uri_query(request) + request.path, request.query = \ + self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) @@ -1037,7 +1056,9 @@ def _perform_delete(self, path, async=False): response = self._perform_request(request) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % (response.error, response.body, response.status), driver=self) + raise LibcloudError('Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status) + , driver=self) if async: return self._parse_response_for_async_op(response) @@ -1047,7 +1068,10 @@ def _perform_delete(self, path, async=False): def _perform_request(self, request): try: - return self.connection.request(action="https://%s%s" % (request.host, request.path), data=request.body, headers=request.headers, method=request.method) + return self.connection.request( + action="https://%s%s" % (request.host, request.path), + data=request.body, headers=request.headers, + method=request.method) except Exception, e: print e.message @@ -1077,7 +1101,8 @@ def _update_request_uri_query(self, request): request.path += '?' for name, value in request.query: if value is not None: - request.path += name + '=' + url_quote(value, '/()$=\',') + '&' + request.path += name + '=' + \ + url_quote(value, '/()$=\',') + '&' request.path = request.path[:-1] return request.path, request.query @@ -1122,7 +1147,8 @@ def _parse_response(self, response, return_type): Parse the HTTPResponse's body and fill all the data into a class of return_type. ''' - return self._parse_response_body_from_xml_text(response.body, return_type) + return self._parse_response_body_from_xml_text( + response.body, return_type) def _parse_response_body_from_xml_text(self, respbody, return_type): ''' @@ -1170,12 +1196,14 @@ def _fill_data_to_return_object(self, node, return_obj): elif isinstance(value, dict): setattr(return_obj, name, - self._fill_dict(node, self._get_serialization_name(name))) + self._fill_dict(node, + self._get_serialization_name(name))) elif isinstance(value, _Base64String): value = self._fill_data_minidom(node, name, '') if value is not None: value = self._decode_base64_to_text(value) - # always set the attribute, so we don't end up returning an object + # always set the attribute, + # so we don't end up returning an object # with type _Base64String setattr(return_obj, name, value) else: @@ -1185,7 +1213,8 @@ def _fill_data_to_return_object(self, node, return_obj): def _fill_list_of(self, xmldoc, element_type, xml_element_name): xmlelements = self._get_child_nodes(xmldoc, xml_element_name) - return [self._parse_response_body_from_xml_node(xmlelement, element_type) \ + return [self._parse_response_body_from_xml_node( + xmlelement, element_type) \ for xmlelement in xmlelements] def _parse_response_body_from_xml_node(self, node, return_type): @@ -1201,7 +1230,8 @@ def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, xml_element_name): xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) if xmlelements: - xmlelements = self._get_child_nodes(xmlelements[0], xml_element_name) + xmlelements = \ + self._get_child_nodes(xmlelements[0], xml_element_name) return [self._get_node_value(xmlelement, element_type) \ for xmlelement in xmlelements] @@ -1230,13 +1260,15 @@ def _get_serialization_name(self,element_name): return ''.join(name.capitalize() for name in element_name.split('_')) - def _fill_dict_of(self, xmldoc, parent_xml_element_name, pair_xml_element_name, + def _fill_dict_of( + self, xmldoc, parent_xml_element_name, pair_xml_element_name, key_xml_element_name, value_xml_element_name): return_obj = {} xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) if xmlelements: - xmlelements = self._get_child_nodes(xmlelements[0], pair_xml_element_name) + xmlelements = \ + self._get_child_nodes(xmlelements[0], pair_xml_element_name) for pair in xmlelements: keys = self._get_child_nodes(pair, key_xml_element_name) values = self._get_child_nodes(pair, value_xml_element_name) @@ -1345,8 +1377,8 @@ def _convert_class_to_xml(self, source, xml_prefix=True): isinstance(value, WindowsAzureData): xmlstr += self._convert_class_to_xml(value, False) else: - xmlstr += ('<' + self._get_serialization_name(name) + '>' + - xml_escape(str(value)) + '' + xml_escape(str(value)) + '') xmlstr += '' return xmlstr @@ -1365,7 +1397,8 @@ def _parse_response_for_async_op(self, response): def _get_deployment_path_using_name(self, service_name, deployment_name=None): - return self._get_path('services/hostedservices/' + _str(service_name) + + return self._get_path('services/hostedservices/' + + _str(service_name) + '/deployments', deployment_name) def _get_path(self, resource, name): @@ -1395,7 +1428,8 @@ def _get_role_path(self, service_name, deployment_name, role_name=None): def _get_storage_service_path(self, service_name=None): return self._get_path('services/storageservices', service_name) - def _ex_complete_async_azure_operation(self, response=None, operation_type='create_node'): + def _ex_complete_async_azure_operation(self, response=None, + operation_type='create_node'): request_id = self._parse_response_for_async_op(response) operation_status = self._get_operation_status(request_id.request_id) @@ -1413,7 +1447,9 @@ def _ex_complete_async_azure_operation(self, response=None, operation_type='crea time.sleep(interval) if operation_status.status == 'Failed': - raise LibcloudError('Message: Async request for operation %s has failed'% operation_type, driver=self) + raise LibcloudError( + 'Message: Async request for operation %s has failed'% + operation_type, driver=self) #def get_connection(self): # certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" @@ -1469,7 +1505,8 @@ def regenerate_keys_to_xml(key_type): @staticmethod def update_hosted_service_to_xml(label, description, extended_properties): return AzureXmlSerializer.doc_from_data('UpdateHostedService', - [('Label', label, AzureNodeDriver._encode_base64), + [('Label', label, + AzureNodeDriver._encode_base64), ('Description', description)], extended_properties) @@ -1992,7 +2029,8 @@ class WindowsConfigurationSet(WindowsAzureData): def __init__(self, computer_name=None, admin_password=None, reset_password_on_first_logon=None, - enable_automatic_updates=None, time_zone=None, admin_user_name=None): + enable_automatic_updates=None, + time_zone=None, admin_user_name=None): self.configuration_set_type = u'WindowsProvisioningConfiguration' self.computer_name = computer_name self.admin_password = admin_password @@ -2606,7 +2644,8 @@ def __init__(self, pair_xml_element_name, key_xml_element_name, class AzureNodeLocation(NodeLocation): # we can also have something in here for available services which is an extra to the API with Azure - def __init__(self, id, name, country, driver, available_services, virtual_machine_role_sizes): + def __init__(self, id, name, country, driver, available_services, + virtual_machine_role_sizes): super(AzureNodeLocation, self).__init__(id, name, country, driver) self.available_services = available_services self.virtual_machine_role_sizes = virtual_machine_role_sizes @@ -2615,4 +2654,5 @@ def __repr__(self): return (('') % (self.id, self.name, self.country, - self.driver.name, ','.join(self.available_service), ','.join(self.virtual_machine_role_sizes))) \ No newline at end of file + self.driver.name, ','.join(self.available_service), + ','.join(self.virtual_machine_role_sizes))) \ No newline at end of file diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index c52a133c77..d211bab973 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,7 +1,6 @@ import libcloud from libcloud.common.types import LibcloudError from libcloud.compute.base import NodeAuthPassword -from libcloud.compute.drivers.azure import AZURE_DEFAULT_IMAGE_NAME __author__ = 'david' @@ -34,18 +33,23 @@ def test_locations_returned_successfully(self): self.assertEqual(len(locations), 7) locationNamesResult = list(a.name for a in locations) - locationNamesExpected = ['East Asia','Southeast Asia','North Europe','West Europe','East US','North Central US','West US'] + locationNamesExpected = ['East Asia','Southeast Asia','North Europe', + 'West Europe','East US','North Central US', + 'West US'] self.assertListEqual(locationNamesResult, locationNamesExpected) - matchedLocation = next(location for location in locations if location.name == 'Southeast Asia') + matchedLocation = next(location for location in locations + if location.name == 'Southeast Asia') servicesResult = matchedLocation.available_services servicesExpected = ['Compute','Storage','PersistentVMRole','HighMemory'] self.assertListEqual(servicesResult, servicesExpected) vmRoleSizesResult = matchedLocation.virtual_machine_role_sizes - vmRoleSizesExpected = ['A5','A6','A7','Basic_A0','Basic_A1','Basic_A2','Basic_A3','Basic_A4','ExtraLarge','ExtraSmall','Large','Medium','Small'] + vmRoleSizesExpected = ['A5','A6','A7','Basic_A0','Basic_A1','Basic_A2', + 'Basic_A3','Basic_A4','ExtraLarge','ExtraSmall', + 'Large','Medium','Small'] self.assertListEqual(vmRoleSizesResult, vmRoleSizesExpected) def test_images_returned_successfully(self): @@ -57,7 +61,8 @@ def test_images_returned_successfully_filter_by_location(self): self.assertEquals(len(images), 207 ) def test_list_nodes_returned_successfully(self): - vmimages = self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest01") + vmimages = self.driver.list_nodes( + ex_cloud_service_name="dcoddkinztest01") self.assertEqual(len(vmimages), 2) img0 = vmimages[0] @@ -74,7 +79,8 @@ def test_list_nodes_returned_successfully(self): self.assertEquals(extra["ssh_port"], u'22') def test_list_nodes_returned_no_deployments(self): - vmimages = self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest03") + vmimages = self.driver.list_nodes( + ex_cloud_service_name="dcoddkinztest03") self.assertIsNone(vmimages) def test_list_nodes_returned_no_cloud_service(self): @@ -84,7 +90,9 @@ def test_list_nodes_returned_no_cloud_service(self): def test_restart_node_success(self): node = type('Node', (object,), dict(id="dc03")) - result = self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") + result = self.driver.reboot_node( + node=node, ex_cloud_service_name="dcoddkinztest01", + ex_deployment_slot="Production") self.assertTrue(result) @@ -94,35 +102,45 @@ def test_restart_node_fail_no_deployment(self): node = type('Node', (object,), dict(id="dc03")) with self.assertRaises(LibcloudError): - self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest02", ex_deployment_slot="Production") + self.driver.reboot_node(node=node, + ex_cloud_service_name="dcoddkinztest02", + ex_deployment_slot="Production") def test_restart_node_fail_no_cloud_service(self): node = type('Node', (object,), dict(id="dc03")) with self.assertRaises(LibcloudError): - self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest03", ex_deployment_slot="Production") + self.driver.reboot_node(node=node, + ex_cloud_service_name="dcoddkinztest03", + ex_deployment_slot="Production") def test_restart_node_fail_node_not_found(self): node = type('Node', (object,), dict(id="dc13")) - result = self.driver.reboot_node(node=node, ex_cloud_service_name="dcoddkinztest01", ex_deployment_slot="Production") + result = self.driver.reboot_node( + node=node, ex_cloud_service_name="dcoddkinztest01", + ex_deployment_slot="Production") self.assertFalse(result) def test_destroy_node_success_single_node_in_cloud_service(self): node = type('Node', (object,), dict(id="oddkinz1")) - result = self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz1", ex_deployment_slot="Production") + result = self.driver.destroy_node(node=node, + ex_cloud_service_name="oddkinz1", + ex_deployment_slot="Production") self.assertTrue(result) def test_destroy_node_success_multiple_nodes_in_cloud_service(self): node = type('Node', (object,), dict(id="oddkinz1")) - result = self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") + result = self.driver.destroy_node(node=node, + ex_cloud_service_name="oddkinz2", + ex_deployment_slot="Production") self.assertTrue(result) def test_destroy_node_fail_node_does_not_exist(self): @@ -130,7 +148,9 @@ def test_destroy_node_fail_node_does_not_exist(self): node = type('Node', (object,), dict(id="oddkinz2")) with self.assertRaises(LibcloudError): - self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz2", ex_deployment_slot="Production") + self.driver.destroy_node(node=node, + ex_cloud_service_name="oddkinz2", + ex_deployment_slot="Production") def test_destroy_node_success_cloud_service_not_found(self): @@ -138,7 +158,9 @@ def test_destroy_node_success_cloud_service_not_found(self): node["name"]="cloudredis" with self.assertRaises(LibcloudError): - self.driver.destroy_node(node=node, ex_cloud_service_name="oddkinz5", ex_deployment_slot="Production" ) + self.driver.destroy_node(node=node, + ex_cloud_service_name="oddkinz5", + ex_deployment_slot="Production" ) def test_create_cloud_service(self): result = self.driver.create_cloud_service("testdc123", "North Europe") @@ -147,17 +169,20 @@ def test_create_cloud_service(self): def test_create_cloud_service_service_exists(self): with self.assertRaises(LibcloudError): - self.driver.create_cloud_service(ex_cloud_service_name="testdc1234", location="North Europe") + self.driver.create_cloud_service(ex_cloud_service_name="testdc1234", + location="North Europe") def test_destroy_cloud_service(self): - result = self.driver.destroy_cloud_service(ex_cloud_service_name="testdc123") + result = self.driver.destroy_cloud_service( + ex_cloud_service_name="testdc123") self.assertTrue(result) def test_destroy_cloud_service_service_does_not_exist(self): with self.assertRaises(LibcloudError): - self.driver.destroy_cloud_service(ex_cloud_service_name="testdc1234") + self.driver.destroy_cloud_service( + ex_cloud_service_name="testdc1234") def test_create_node_and_deployment_one_node(self): kwargs = {} @@ -171,10 +196,12 @@ def test_create_node_and_deployment_one_node(self): kwargs["auth"]= auth kwargs["size"]= "ExtraSmall" - kwargs["image"] = "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["image"] = \ + "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" kwargs["name"] = "dcoddkinztest03" - result = self.driver.create_node(ex_cloud_service_name="testdcabc", **kwargs) + result = self.driver.create_node( + ex_cloud_service_name="testdcabc", **kwargs) self.assertIsNotNone(result) def test_create_node_and_deployment_second_node(self): @@ -189,11 +216,13 @@ def test_create_node_and_deployment_second_node(self): kwargs["auth"]= auth kwargs["size"]= "ExtraSmall" - kwargs["image"] = AZURE_DEFAULT_IMAGE_NAME["OpenLogic"] + kwargs["image"] = \ + "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" kwargs["name"] = "dcoddkinztest03" node = type('Node', (object,), dict(id="dc14")) - result = self.driver.create_node(ex_cloud_service_name="testdcabc2", **kwargs) + result = self.driver.create_node( + ex_cloud_service_name="testdcabc2", **kwargs) self.assertIsNotNone(result) def test_create_node_and_deployment_second_node_307_response(self): @@ -208,11 +237,13 @@ def test_create_node_and_deployment_second_node_307_response(self): kwargs["auth"]= auth kwargs["size"]= "ExtraSmall" - kwargs["image"] = AZURE_DEFAULT_IMAGE_NAME["OpenLogic"] + kwargs["image"] = \ + "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" kwargs["name"] = "dcoddkinztest04" with self.assertRaises(LibcloudError): - self.driver.create_node(ex_cloud_service_name="testdcabc3", **kwargs) + self.driver.create_node(ex_cloud_service_name="testdcabc3", + **kwargs) class AzureMockHttp(MockHttp): @@ -330,7 +361,7 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_depl if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2(self, method, url, body, headers): if method == "GET": From 96f03c7cdd6d960431efc03d789ae7a8d503ee27 Mon Sep 17 00:00:00 2001 From: davidcrossland Date: Mon, 30 Jun 2014 10:47:15 +0100 Subject: [PATCH 299/315] Added tests for kwarg required parameters when calling create_node --- libcloud/compute/drivers/azure.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 44d0b33158..6154fb34be 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -414,7 +414,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): ex_deployment_slot = kwargs['ex_deployment_slot'] else: # We assume production if this is not provided. - ex_deployment_slot = "production" + ex_deployment_slot = "Production" if "ex_admin_user_id" in kwargs: ex_admin_user_id = kwargs['ex_admin_user_id'] @@ -422,6 +422,24 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): # This mimics the Azure UI behavior. ex_admin_user_id = "azureuser" + if "ex_storage_service_name" not in kwargs: + raise ValueError("ex_storage_service_name is required.") + + if "size" not in kwargs: + raise ValueError("size is required. ") + + if "image" not in kwargs: + raise ValueError("image is required.") + + if "name" not in kwargs: + raise ValueError("name is required.") + + if "ex_admin_user_id" not in kwargs: + raise ValueError("ex_cloud_service_name is required.") + + if "ex_admin_user_id" not in kwargs: + raise ValueError("ex_cloud_service_name is required.") + node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' From ff3000f51f966398c464bab636f82a237d722d0b Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 1 Jul 2014 11:12:09 -0700 Subject: [PATCH 300/315] updated unit test to support linux pathing --- libcloud/test/compute/test_azure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index d211bab973..f3d6aaf0e7 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -21,7 +21,7 @@ class AzureNodeDriverTests(unittest.TestCase) : libcloud.security.VERIFY_SSL_CERT = False SUBSCRIPTION_ID = '3761b98b-673d-526c-8d55-fee918758e6e' - KEY_FILE = 'fixtures\\azure\\libcloud.pem' #empty file is fine + KEY_FILE = 'fixtures/azure/libcloud.pem' #empty file is fine def setUp(self): Azure = get_driver(Provider.AZURE) From 1f75d757195f3ab05048b3a3cb811e9518ed6270 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 1 Jul 2014 11:35:49 -0700 Subject: [PATCH 301/315] converted to using libcloud.utils.urlquote and libcloud.utils.urlunquote --- libcloud/compute/drivers/azure.py | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 6154fb34be..8b61c8e366 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -15,7 +15,6 @@ """Azure Compute driver """ -import httplib import re import time import collections @@ -24,6 +23,7 @@ import copy import base64 +from libcloud.utils.py3 import httplib from libcloud.common.azure import AzureServiceManagementConnection from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize @@ -36,14 +36,8 @@ from xml.sax.saxutils import escape as xml_escape from httplib import (HTTPSConnection) -if sys.version_info < (3,): - from urllib2 import quote as url_quote - from urllib2 import unquote as url_unquote - _strtype = basestring -else: - from urllib.parse import quote as url_quote - from urllib.parse import unquote as url_unquote - _strtype = str +from libcloud.utils.py3 import urlquote +from libcloud.utils.py3 import urlunquote if sys.version_info < (3,): _unicode_type = unicode From 579cfe277939a642bd2e3d687b4a9db20c5b7630 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 1 Jul 2014 12:46:31 -0700 Subject: [PATCH 302/315] backing out fix --- libcloud/compute/drivers/azure.py | 12 +++++++++--- libcloud/test/compute/test_azure.py | 1 + 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 8b61c8e366..6154fb34be 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -15,6 +15,7 @@ """Azure Compute driver """ +import httplib import re import time import collections @@ -23,7 +24,6 @@ import copy import base64 -from libcloud.utils.py3 import httplib from libcloud.common.azure import AzureServiceManagementConnection from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize @@ -36,8 +36,14 @@ from xml.sax.saxutils import escape as xml_escape from httplib import (HTTPSConnection) -from libcloud.utils.py3 import urlquote -from libcloud.utils.py3 import urlunquote +if sys.version_info < (3,): + from urllib2 import quote as url_quote + from urllib2 import unquote as url_unquote + _strtype = basestring +else: + from urllib.parse import quote as url_quote + from urllib.parse import unquote as url_unquote + _strtype = str if sys.version_info < (3,): _unicode_type = unicode diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index f3d6aaf0e7..4f76e8508f 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,6 +1,7 @@ import libcloud from libcloud.common.types import LibcloudError from libcloud.compute.base import NodeAuthPassword +from libcloud.common.azure import AzureServiceManagementConnection __author__ = 'david' From ae39549560001dd67bc4f2447b5da471d37bfce1 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 1 Jul 2014 12:53:56 -0700 Subject: [PATCH 303/315] re-applying the fix --- libcloud/compute/drivers/azure.py | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 6154fb34be..fe95f85c77 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -24,6 +24,8 @@ import copy import base64 +from libcloud.utils.py3 import urlquote as url_quote +from libcloud.utils.py3 import urlunquote as url_unquote from libcloud.common.azure import AzureServiceManagementConnection from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize @@ -36,15 +38,6 @@ from xml.sax.saxutils import escape as xml_escape from httplib import (HTTPSConnection) -if sys.version_info < (3,): - from urllib2 import quote as url_quote - from urllib2 import unquote as url_unquote - _strtype = basestring -else: - from urllib.parse import quote as url_quote - from urllib.parse import unquote as url_unquote - _strtype = str - if sys.version_info < (3,): _unicode_type = unicode From 4e1f8f79c8349e0cf099cfdfe4b67d8756d0c094 Mon Sep 17 00:00:00 2001 From: Matt Baldwin Date: Tue, 1 Jul 2014 13:59:50 -0700 Subject: [PATCH 304/315] moved subscription_id from common/base.py to common/azure.py --- libcloud/common/azure.py | 20 +++++++++++--------- libcloud/common/base.py | 7 +++---- 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index b7429336a5..931da74b98 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -214,25 +214,27 @@ def __init__(self, subscription_id, key_file, *args, **kwargs): Check to see if PyCrypto is available, and convert key file path into a key string if the key is in a file. - :param user_id: Email address to be used for Service Account - authentication. - :type user_id: ``str`` + :param subscription_id: Azure subscription ID. + :type subscription_id: ``str`` - :param key: The RSA Key or path to file containing the key. - :type key: ``str`` + :param key_file: The PEM file used to authenticate with the service. + :type key_file: ``str`` """ + super(AzureServiceManagementConnection, self).__init__( + key_file, *args, **kwargs) + + self.subscription_id = subscription_id + keypath = os.path.expanduser(key_file) self.keyfile = keypath; is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: raise InvalidCredsError( - 'pem file needed to authenticate to Microsoft Azure') + 'You need an certificate PEM file to authenticate with ' + 'Microsoft Azure. This can be found in the portal.') self.key_file = key_file - super(AzureServiceManagementConnection, self).__init__( - subscription_id, key_file, *args, **kwargs) - def add_default_headers(self, headers): """ @inherits: :class:`Connection.add_default_headers` diff --git a/libcloud/common/base.py b/libcloud/common/base.py index 92239568a1..9bbb1a6b72 100644 --- a/libcloud/common/base.py +++ b/libcloud/common/base.py @@ -950,19 +950,18 @@ def __init__(self, key, secure=True, host=None, port=None, url=None, class CertificateConnection(Connection): """ - Base connection class which accepts a single ``key`` argument. + Base connection class which accepts a single ``cert_file`` argument. """ - def __init__(self, subscription_id, cert_file, secure=True, host=None, port=None, url=None, + def __init__(self, cert_file, secure=True, host=None, port=None, url=None, timeout=None): """ - Initialize `user_id` and `key`; set `secure` to an ``int`` based on + Initialize `cert_file`; set `secure` to an ``int`` based on passed value. """ super(CertificateConnection, self).__init__(secure=secure, host=host, port=port, url=url, timeout=timeout) self.cert_file = cert_file - self.subscription_id = subscription_id class ConnectionUserAndKey(ConnectionKey): """ From c116bf9d310e25a658c259e1b9111ff13b9e5e7f Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Wed, 24 Sep 2014 16:55:21 -0400 Subject: [PATCH 305/315] Removed some checks for fields that aren't actually required in create_node. Fixed a typo in AzureNodeLocation that incorrectly referenced an object attribute --- libcloud/compute/drivers/azure.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index fe95f85c77..4f9d20ef51 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -415,9 +415,6 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): # This mimics the Azure UI behavior. ex_admin_user_id = "azureuser" - if "ex_storage_service_name" not in kwargs: - raise ValueError("ex_storage_service_name is required.") - if "size" not in kwargs: raise ValueError("size is required. ") @@ -427,12 +424,6 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): if "name" not in kwargs: raise ValueError("name is required.") - if "ex_admin_user_id" not in kwargs: - raise ValueError("ex_cloud_service_name is required.") - - if "ex_admin_user_id" not in kwargs: - raise ValueError("ex_cloud_service_name is required.") - node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' @@ -2665,5 +2656,5 @@ def __repr__(self): return (('') % (self.id, self.name, self.country, - self.driver.name, ','.join(self.available_service), + self.driver.name, ','.join(self.available_services), ','.join(self.virtual_machine_role_sizes))) \ No newline at end of file From c9a8587d24082cfd9dbaee8f000d5b636d7323e2 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Thu, 9 Oct 2014 17:57:46 -0400 Subject: [PATCH 306/315] list images now lists custom images (added supporting data types and serializations). Creating a node now requires a NodeSize and NodeImage created by calls to list_sizes and list_images (respectively) rather than strings to be more consistent. Creating a node supports creating a vm image if the NodeImage passed in represents a VM image (which is indicated in the extra variable and is filled out when data is deserialized in the list_images call). Updated tests to be compatible with the changes to create_node. Fixed some styling --- libcloud/compute/drivers/azure.py | 258 ++++++++++++++++++++-------- libcloud/test/compute/test_azure.py | 23 +-- 2 files changed, 197 insertions(+), 84 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 4f9d20ef51..5534eb192d 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -16,6 +16,7 @@ """ import httplib +import itertools import re import time import collections @@ -63,7 +64,7 @@ def _str(value): """ AZURE_COMPUTE_INSTANCE_TYPES = { 'A0': { - 'id': 'A0', + 'id': 'ExtraSmall', 'name': 'ExtraSmall Instance', 'ram': 768, 'disk': 127, @@ -73,7 +74,7 @@ def _str(value): 'cores': 'Shared' }, 'A1': { - 'id': 'A1', + 'id': 'Small', 'name': 'Small Instance', 'ram': 1792, 'disk': 127, @@ -83,7 +84,7 @@ def _str(value): 'cores': 1 }, 'A2': { - 'id': 'A2', + 'id': 'Medium', 'name': 'Medium Instance', 'ram': 3584, 'disk': 127, @@ -93,7 +94,7 @@ def _str(value): 'cores': 2 }, 'A3': { - 'id': 'A3', + 'id': 'Large', 'name': 'Large Instance', 'ram': 7168, 'disk': 127, @@ -103,7 +104,7 @@ def _str(value): 'cores': 4 }, 'A4': { - 'id': 'A4', + 'id': 'ExtraLarge', 'name': 'ExtraLarge Instance', 'ram': 14336, 'disk': 127, @@ -161,8 +162,8 @@ def _str(value): 'os': 'OS', 'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo', 'copy_id': 'CopyId', - } - + 'os_disk_configuration': 'OSDiskConfiguration' +} class AzureNodeDriver(NodeDriver): @@ -174,7 +175,7 @@ class AzureNodeDriver(NodeDriver): _instance_types = AZURE_COMPUTE_INSTANCE_TYPES _blob_url = ".blob.core.windows.net" features = {'create_node': ['password']} - service_location = collections.namedtuple('service_location',['is_affinity_group', 'service_location']) + service_location = collections.namedtuple('service_location', ['is_affinity_group', 'service_location']) NODE_STATE_MAP = { 'RoleStateUnknown': NodeState.UNKNOWN, @@ -203,8 +204,12 @@ def __init__(self, subscription_id=None, key_file=None, **kwargs): """ self.subscription_id = subscription_id self.key_file = key_file - super(AzureNodeDriver, self).__init__(self.subscription_id, self.key_file, - secure=True, **kwargs) + super(AzureNodeDriver, self).__init__( + self.subscription_id, + self.key_file, + secure=True, + **kwargs + ) def list_sizes(self): """ @@ -228,9 +233,12 @@ def list_images(self, location=None): """ data = self._perform_get(self._get_image_path(), Images) + custom_image_data = self._perform_get(self._get_vmimage_path(), VMImages) + images = [self._to_image(i) for i in data] + images.extend(self._vm_to_image(j) for j in custom_image_data) - if location != None: + if location is not None: images = [image for image in images if location in image.extra["location"]] return images @@ -265,13 +273,17 @@ def list_nodes(self, ex_cloud_service_name=None): response = self._perform_get( self._get_hosted_service_path(ex_cloud_service_name) + '?embed-detail=True', - None) - if response.status != 200 : - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status) - , driver=self) + None + ) + + if response.status != 200: + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self + ) - data = self._parse_response(response, HostedService) + data = self._parse_response(response, HostedService) try: return [self._to_node(n) for n in @@ -392,10 +404,6 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): :type ex_admin_user_id: ``str`` """ - name = kwargs['name'] - size = kwargs['size'] - image = kwargs['image'] - password = None auth = self._get_and_check_auth(kwargs["auth"]) password = auth.password @@ -418,12 +426,22 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): if "size" not in kwargs: raise ValueError("size is required. ") + if not isinstance(kwargs['size'], NodeSize): + raise ValueError('Size must be an instance of NodeSize') + if "image" not in kwargs: raise ValueError("image is required.") if "name" not in kwargs: raise ValueError("name is required.") + name = kwargs['name'] + size = kwargs['size'] + image = kwargs['image'] + + if not isinstance(image, NodeImage): + raise ValueError("Image must be an instance of NodeImage, produced by list_images()") + node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' @@ -431,7 +449,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): # We do this because we need to pass a Configuration to the # method. This will be either Linux or Windows. if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", - image, re.I): + image.name, re.I): machine_config = WindowsConfigurationSet( computer_name=name, admin_password=password, admin_user_name=ex_admin_user_id) @@ -494,13 +512,14 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): _storage_location = self._get_cloud_service_location( service_name=ex_cloud_service_name) - + # OK, bit annoying here. You must create a deployment before # you can create an instance; however, the deployment function # creates the first instance, but all subsequent instances - # must be created using the add_role function. + # must be created using the add_role function. # # So, yeah, annoying. + if node_list is None: # This is the first node in this cloud service. if "ex_storage_service_name" in kwargs: @@ -523,30 +542,50 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): else: ex_deployment_name = ex_cloud_service_name - blob_url = "http://" + ex_storage_service_name \ - + ".blob.core.windows.net" + if image.extra['vm_image']: + response = self._perform_post( + self._get_deployment_path_using_name(ex_cloud_service_name), + AzureXmlSerializer.virtual_machine_deployment_to_xml( + ex_deployment_name, + ex_deployment_slot, + name, + name, + machine_config, + None, + 'PersistentVMRole', + None, + None, + None, + size.id, + None, + image.id)) + else: + blob_url = "http://" + ex_storage_service_name \ + + ".blob.core.windows.net" - # Azure's pattern in the UI. - disk_name = "{0}-{1}-{2}.vhd".format( - ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) - media_link = blob_url + "/vhds/" + disk_name - disk_config = OSVirtualHardDisk(image, media_link) + # Azure's pattern in the UI. + disk_name = "{0}-{1}-{2}.vhd".format( + ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + media_link = blob_url + "/vhds/" + disk_name - response = self._perform_post( - self._get_deployment_path_using_name(ex_cloud_service_name), - AzureXmlSerializer.virtual_machine_deployment_to_xml( - ex_deployment_name, - ex_deployment_slot, - name, - name, - machine_config, - disk_config, - 'PersistentVMRole', - network_config, - None, - None, - size, - None)) + disk_config = OSVirtualHardDisk(image.id, media_link) + + response = self._perform_post( + self._get_deployment_path_using_name(ex_cloud_service_name), + AzureXmlSerializer.virtual_machine_deployment_to_xml( + ex_deployment_name, + ex_deployment_slot, + name, + name, + machine_config, + disk_config, + 'PersistentVMRole', + network_config, + None, + None, + size.id, + None, + None)) if response.status != 202: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % @@ -574,25 +613,41 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): is_affinity_group=_storage_location.is_affinity_group ) - blob_url = "http://" + ex_storage_service_name + \ - ".blob.core.windows.net" - disk_name = "{0}-{1}-{2}.vhd".format( - ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) - media_link = blob_url + "/vhds/" + disk_name - disk_config = OSVirtualHardDisk(image, media_link) - - response = self._perform_post( - self._get_role_path(ex_cloud_service_name, - _deployment_name), - AzureXmlSerializer.add_role_to_xml( - name, # role_name - machine_config, # system_config - disk_config, # os_virtual_hard_disk - 'PersistentVMRole', # role_type - network_config, # network_config - None, # availability_set_name - None, # data_virtual_hard_disks - size)) # role_size) + if image.extra['vm_image']: + response = self._perform_post( + self._get_role_path(ex_cloud_service_name, + image.extra['deployment_name']), + AzureXmlSerializer.add_role_to_xml( + name, # role_name + machine_config, # system_config + None, # os_virtual_hard_disk + 'PersistentVMRole', # role_type + None, # network_config + None, # availability_set_name + None, # data_virtual_hard_disks + image.id, #vm_image + size.id)) # role_size + else: + blob_url = "http://" + ex_storage_service_name + \ + ".blob.core.windows.net" + disk_name = "{0}-{1}-{2}.vhd".format( + ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + media_link = blob_url + "/vhds/" + disk_name + disk_config = OSVirtualHardDisk(image.id, media_link) + + response = self._perform_post( + self._get_role_path(ex_cloud_service_name, + _deployment_name), + AzureXmlSerializer.add_role_to_xml( + name, # role_name + machine_config, # system_config + disk_config, # os_virtual_hard_disk + 'PersistentVMRole', # role_type + network_config, # network_config + None, # availability_set_name + None, # data_virtual_hard_disks + size.id, # role_size + None)) #vm_image) if response.status != 202: raise LibcloudError('Message: %s, Body: %s, Status code: %d' % @@ -871,12 +926,29 @@ def _to_image(self, data): name=data.label, driver=self.connection.driver, extra={ - 'os' : data.os, - 'category' : data.category, - 'description' : data.description, - 'location' : data.location, - 'affinity_group' : data.affinity_group, - 'media_link' : data.media_link + 'os': data.os, + 'category': data.category, + 'description': data.description, + 'location': data.location, + 'affinity_group': data.affinity_group, + 'media_link': data.media_link, + 'vm_image': False + }) + + def _vm_to_image(self, data): + + return NodeImage( + id=data.name, + name=data.label, + driver=self.connection.driver, + extra={ + 'os': data.os_disk_configuration.os, + 'category': data.category, + 'location': data.location, + 'media_link': data.os_disk_configuration.media_link, + 'affinity_group': data.affinity_group, + 'deployment_name': data.deployment_name, + 'vm_image': True }) def _to_volume(self, volume, node): @@ -1412,6 +1484,9 @@ def _get_path(self, resource, name): def _get_image_path(self, image_name=None): return self._get_path('services/images', image_name) + def _get_vmimage_path(self, image_name=None): + return self._get_path('services/vmimages', image_name) + def _get_hosted_service_path(self, service_name=None): return self._get_path('services/hostedservices', service_name) @@ -1786,7 +1861,7 @@ def network_configuration_to_xml(configuration): @staticmethod def role_to_xml(availability_set_name, data_virtual_hard_disks, - network_configuration_set, os_virtual_hard_disk, role_name, + network_configuration_set, os_virtual_hard_disk, vm_image_name, role_name, role_size, role_type, system_configuration_set): xml = AzureXmlSerializer.data_to_xml([('RoleName', role_name), ('RoleType', role_type)]) @@ -1839,6 +1914,9 @@ def role_to_xml(availability_set_name, data_virtual_hard_disks, ('SourceImageName', os_virtual_hard_disk.source_image_name)]) xml += '' + if vm_image_name is not None: + xml += AzureXmlSerializer.data_to_xml([('VMImageName', vm_image_name)]) + if role_size is not None: xml += AzureXmlSerializer.data_to_xml([('RoleSize', role_size)]) @@ -1848,12 +1926,13 @@ def role_to_xml(availability_set_name, data_virtual_hard_disks, def add_role_to_xml(role_name, system_configuration_set, os_virtual_hard_disk, role_type, network_configuration_set, availability_set_name, - data_virtual_hard_disks, role_size): + data_virtual_hard_disks, vm_image_name, role_size): xml = AzureXmlSerializer.role_to_xml( availability_set_name, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, + vm_image_name, role_name, role_size, role_type, @@ -1906,7 +1985,7 @@ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, network_configuration_set, availability_set_name, data_virtual_hard_disks, role_size, - virtual_network_name): + virtual_network_name, vm_image_name): xml = AzureXmlSerializer.data_to_xml([('Name', deployment_name), ('DeploymentSlot', deployment_slot), ('Label', label)]) @@ -1917,6 +1996,7 @@ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, data_virtual_hard_disks, network_configuration_set, os_virtual_hard_disk, + vm_image_name, role_name, role_size, role_type, @@ -2243,6 +2323,13 @@ def __len__(self): def __getitem__(self, index): return self.images[index] + +class VMImages(Images): + + def __init__(self): + self.images = _list_of(VMImage) + + class OSImage(WindowsAzureData): def __init__(self): @@ -2257,6 +2344,19 @@ def __init__(self): self.eula = u'' self.description = u'' +class VMImage(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.label = u'' + self.category = u'' + self.os_disk_configuration = OSDiskConfiguration() + self.service_name = u'' + self.deployment_name = u'' + self.role_name = u'' + self.location = u'' + self.affinity_group = u'' + class HostedServices(WindowsAzureData): def __init__(self): @@ -2504,6 +2604,16 @@ def __init__(self): self.family = 0 self.family_label = _Base64String() +class OSDiskConfiguration(WindowsAzureData): + + def __init__(self): + self.name = u'' + self.host_caching = u'' + self.os_state = u'' + self.os = u'' + self.media_link = u'' + self.logical_disk_size_in_gb = 0 + class OperatingSystems(WindowsAzureData): def __init__(self): diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 4f76e8508f..7ac96222ec 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,6 +1,6 @@ import libcloud from libcloud.common.types import LibcloudError -from libcloud.compute.base import NodeAuthPassword +from libcloud.compute.base import NodeAuthPassword, NodeImage, NodeSize from libcloud.common.azure import AzureServiceManagementConnection __author__ = 'david' @@ -196,9 +196,10 @@ def test_create_node_and_deployment_one_node(self): auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth - kwargs["size"]= "ExtraSmall" - kwargs["image"] = \ - "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["size"] = NodeSize(id="ExtraSmall") + kwargs["image"] = NodeImage( + id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + ) kwargs["name"] = "dcoddkinztest03" result = self.driver.create_node( @@ -216,9 +217,10 @@ def test_create_node_and_deployment_second_node(self): auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth - kwargs["size"]= "ExtraSmall" - kwargs["image"] = \ - "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["size"] = NodeSize(id="ExtraSmall") + kwargs["image"] = NodeImage( + id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + ) kwargs["name"] = "dcoddkinztest03" node = type('Node', (object,), dict(id="dc14")) @@ -237,9 +239,10 @@ def test_create_node_and_deployment_second_node_307_response(self): auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth - kwargs["size"]= "ExtraSmall" - kwargs["image"] = \ - "5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + kwargs["size"] = NodeSize(id="ExtraSmall") + kwargs["image"] = NodeImage( + id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + ) kwargs["name"] = "dcoddkinztest04" with self.assertRaises(LibcloudError): From 76d3e2e822391510a2481a23f8b36f1a53226e38 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Fri, 10 Oct 2014 18:02:50 -0400 Subject: [PATCH 307/315] Cleaned up and formatted the code for readability and consistency. Updated tests for the new changes. Refactored some data structures to make things more DRY. Added a new function to list all cloud services. Modified the parsing and creation of Nodes to get the public ip from the deployment data object, since Azure appears to only return it there in some cases. Also modified nodes to include the service name they belong to in the extra dict and modified reboot and destroy node to search for that, rather than requiring it to be passed it --- libcloud/common/azure.py | 81 +- libcloud/compute/drivers/azure.py | 1817 ++++++++++++++++----------- libcloud/test/compute/test_azure.py | 327 +++-- 3 files changed, 1317 insertions(+), 908 deletions(-) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index 931da74b98..1075c9589f 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -44,15 +44,18 @@ class AzureResponse(XmlResponse): - - valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT, - # added TEMPORARY_REDIRECT as this can sometimes be - # sent by azure instead of a success or fail response - httplib.BAD_REQUEST, httplib.TEMPORARY_REDIRECT] + valid_response_codes = [ + httplib.NOT_FOUND, + httplib.CONFLICT, + httplib.BAD_REQUEST, + httplib.TEMPORARY_REDIRECT + # added TEMPORARY_REDIRECT as this can sometimes be + # sent by azure instead of a success or fail response + ] def success(self): i = int(self.status) - return i >= 200 and i <= 299 or i in self.valid_response_codes + return 200 <= i <= 299 or i in self.valid_response_codes def parse_error(self, msg=None): error_msg = 'Unknown error' @@ -77,8 +80,10 @@ def parse_error(self, msg=None): if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]: raise InvalidCredsError(error_msg) - raise LibcloudError('%s Status code: %d.' % (error_msg, self.status), - driver=self) + raise LibcloudError( + '%s Status code: %d.' % (error_msg, self.status), + driver=self + ) class AzureRawResponse(RawResponse): @@ -105,16 +110,26 @@ def pre_connect_hook(self, params, headers): # Add the authorization header headers['Authorization'] = self._get_azure_auth_signature( - method=self.method, headers=headers, params=params, - account=self.user_id, secret_key=self.key, path=self.action) + method=self.method, + headers=headers, + params=params, + account=self.user_id, + secret_key=self.key, + path=self.action + ) # Azure cribs about this in 'raw' connections headers.pop('Host', None) return params, headers - def _get_azure_auth_signature(self, method, headers, params, - account, secret_key, path='/'): + def _get_azure_auth_signature(self, + method, + headers, + params, + account, + secret_key, + path='/'): """ Signature = Base64( HMAC-SHA1( YourSecretAccessKeyID, UTF-8-Encoding-Of( StringToSign ) ) ) ); @@ -137,11 +152,19 @@ def _get_azure_auth_signature(self, method, headers, params, special_header_values = [] xms_header_values = [] param_list = [] - special_header_keys = ['content-encoding', 'content-language', - 'content-length', 'content-md5', - 'content-type', 'date', 'if-modified-since', - 'if-match', 'if-none-match', - 'if-unmodified-since', 'range'] + special_header_keys = [ + 'content-encoding', + 'content-language', + 'content-length', + 'content-md5', + 'content-type', + 'date', + 'if-modified-since', + 'if-match', + 'if-none-match', + 'if-unmodified-since', + 'range' + ] # Split the x-ms headers and normal headers and make everything # lower case @@ -192,9 +215,11 @@ def _get_azure_auth_signature(self, method, headers, params, return 'SharedKey %s:%s' % (self.user_id, b64_hmac.decode('utf-8')) + class AzureBaseDriver(object): name = "Microsoft Azure Service Management API" + class AzureServiceManagementConnection(CertificateConnection): # This needs the following approach - # 1. Make request using LibcloudHTTPSConnection which is a overloaded @@ -202,13 +227,18 @@ class AzureServiceManagementConnection(CertificateConnection): # 2. Depending on the type of operation use a PollingConnection # when the response id is returned # 3. The Response can be used in an AzureServiceManagementResponse - """Authentication class for "Service Account" authentication.""" + + """ + Authentication class for "Service Account" authentication. + """ + driver = AzureBaseDriver responseCls = AzureResponse rawResponseCls = AzureRawResponse name = 'Azure Service Management API Connection' host = 'management.core.windows.net' keyfile = "" + def __init__(self, subscription_id, key_file, *args, **kwargs): """ Check to see if PyCrypto is available, and convert key file path into a @@ -222,17 +252,21 @@ def __init__(self, subscription_id, key_file, *args, **kwargs): """ super(AzureServiceManagementConnection, self).__init__( - key_file, *args, **kwargs) + key_file, + *args, + **kwargs + ) self.subscription_id = subscription_id keypath = os.path.expanduser(key_file) - self.keyfile = keypath; + self.keyfile = keypath is_file_path = os.path.exists(keypath) and os.path.isfile(keypath) if not is_file_path: raise InvalidCredsError( 'You need an certificate PEM file to authenticate with ' - 'Microsoft Azure. This can be found in the portal.') + 'Microsoft Azure. This can be found in the portal.' + ) self.key_file = key_file def add_default_headers(self, headers): @@ -244,8 +278,3 @@ def add_default_headers(self, headers): headers['x-ms-date'] = time.strftime(AZURE_TIME_FORMAT, time.gmtime()) #headers['host'] = self.host return headers - - - - - diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 5534eb192d..501b083c24 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -162,7 +162,8 @@ def _str(value): 'os': 'OS', 'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo', 'copy_id': 'CopyId', - 'os_disk_configuration': 'OSDiskConfiguration' + 'os_disk_configuration': 'OSDiskConfiguration', + 'is_dns_programmed': 'IsDnsProgrammed' } @@ -285,13 +286,19 @@ def list_nodes(self, ex_cloud_service_name=None): data = self._parse_response(response, HostedService) + vips = [vip.address for vip in data.deployments[0].virtual_ips] + try: - return [self._to_node(n) for n in - data.deployments[0].role_instance_list] + return [ + self._to_node(n, ex_cloud_service_name, vips) + for n in data.deployments[0].role_instance_list + ] except IndexError: return None - def reboot_node(self, node=None, ex_cloud_service_name=None, + def reboot_node(self, + node=None, + ex_cloud_service_name=None, ex_deployment_slot=None): """ Reboots a node. @@ -304,36 +311,44 @@ def reboot_node(self, node=None, ex_cloud_service_name=None, :param ex_cloud_service_name: Cloud Service name :type ex_cloud_service_name: ``str`` - :param ex_deployment_name: Options are "production" (default) + :param ex_deployment_slot: Options are "production" (default) or "Staging". (Optional) - :type ex_deployment_name: ``str`` + :type ex_deployment_slot: ``str`` :rtype: ``bool`` """ + if not node: + raise ValueError("node is required.") + + ex_cloud_service_name = ex_cloud_service_name or (node.extra and node.extra.get('ex_cloud_service_name')) if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") if not ex_deployment_slot: - ex_deployment_slot = "production" + ex_deployment_slot = "Production" - if not node: - raise ValueError("node is required.") _deployment_name = self._get_deployment( service_name=ex_cloud_service_name, - deployment_slot=ex_deployment_slot).name + deployment_slot=ex_deployment_slot + ).name try: response = self._perform_post( self._get_deployment_path_using_name( - ex_cloud_service_name, _deployment_name) + '/roleinstances/' - + _str(node.id) + '?comp=reboot', '') + ex_cloud_service_name, + _deployment_name + ) + '/roleinstances/' + _str(node.id) + '?comp=reboot', + '' + ) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, - response.status), driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self + ) if self._parse_response_for_async_op(response): return True @@ -354,9 +369,7 @@ def list_volumes(self, node=None): """ data = self._perform_get(self._get_disk_path(), Disks) - - volumes = [self._to_volume(volume=v,node=node) for v in data] - + volumes = [self._to_volume(volume=v, node=node) for v in data] return volumes def create_node(self, ex_cloud_service_name=None, **kwargs): @@ -395,6 +408,11 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): to using the Cloud Service name. :type ex_deployment_name: ``str`` + :keyword ex_new_deployment: Optional. Tells azure to create a new + deployment rather than add to an existing + one. + :type ex_deployment_name: ``boolean`` + :keyword ex_deployment_slot: Optional: Valid values: production| staging. Defaults to production. @@ -403,14 +421,22 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): :keyword ex_admin_user_id: Optional. Defaults to 'azureuser'. :type ex_admin_user_id: ``str`` + :keyword image: The image to use when creating this node + :type image: `NodeImage` + + :keyword size: The size of the instance to create + :type size: `NodeSize` + """ - password = None + auth = self._get_and_check_auth(kwargs["auth"]) password = auth.password if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") + ex_new_deployment = kwargs.get("ex_new_deployment", False) + if "ex_deployment_slot" in kwargs: ex_deployment_slot = kwargs['ex_deployment_slot'] else: @@ -447,30 +473,31 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): network_config.configuration_set_type = 'NetworkConfiguration' # We do this because we need to pass a Configuration to the - # method. This will be either Linux or Windows. - if re.search("Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk", - image.name, re.I): + # method. This will be either Linux or Windows. + windows_server_regex = re.compile(r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk') + if windows_server_regex.search(image.id, re.I): machine_config = WindowsConfigurationSet( - computer_name=name, admin_password=password, - admin_user_name=ex_admin_user_id) + computer_name=name, + admin_password=password, + admin_user_name=ex_admin_user_id + ) machine_config.domain_join = None - if node_list is None: + if node_list is None or ex_new_deployment: port = "3389" else: - port = random.randint(41952,65535) + port = random.randint(41952, 65535) endpoints = self._get_deployment( service_name=ex_cloud_service_name, - deployment_slot=ex_deployment_slot) + deployment_slot=ex_deployment_slot + ) for instances in endpoints.role_instance_list: - ports = [] - for ep in instances.instance_endpoints: - ports += [ep.public_port] + ports = [ep.public_port for ep in instances.instance_endpoints] while port in ports: - port = random.randint(41952,65535) + port = random.randint(41952, 65535) endpoint = ConfigurationSetInputEndpoint( name='Remote Desktop', @@ -481,18 +508,20 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): enable_direct_server_return=False ) else: - if node_list is None: + if node_list is None or ex_new_deployment: port = "22" else: - port = random.randint(41952,65535) + port = random.randint(41952, 65535) endpoints = self._get_deployment( service_name=ex_cloud_service_name, - deployment_slot=ex_deployment_slot) + deployment_slot=ex_deployment_slot + ) for instances in endpoints.role_instance_list: ports = [] - for ep in instances.instance_endpoints: - ports += [ep.public_port] + if instances.instance_endpoints is not None: + for ep in instances.instance_endpoints: + ports += [ep.public_port] while port in ports: port = random.randint(41952,65535) @@ -506,153 +535,144 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): enable_direct_server_return=False ) machine_config = LinuxConfigurationSet( - name, ex_admin_user_id, password, False) + name, + ex_admin_user_id, + password, + False + ) - network_config.input_endpoints.input_endpoints.append(endpoint) + network_config.input_endpoints.items.append(endpoint) _storage_location = self._get_cloud_service_location( service_name=ex_cloud_service_name) + if "ex_storage_service_name" in kwargs: + ex_storage_service_name = kwargs['ex_storage_service_name'] + else: + ex_storage_service_name = ex_cloud_service_name + ex_storage_service_name = re.sub( + ur'[\W_]+', + u'', + ex_storage_service_name.lower(), + flags=re.UNICODE + ) + + if self._is_storage_service_unique( + service_name=ex_storage_service_name): + + self._create_storage_account( + service_name=ex_storage_service_name, + location=_storage_location.service_location, + is_affinity_group=_storage_location.is_affinity_group + ) + # OK, bit annoying here. You must create a deployment before # you can create an instance; however, the deployment function # creates the first instance, but all subsequent instances # must be created using the add_role function. # # So, yeah, annoying. - - if node_list is None: + if node_list is None or ex_new_deployment: # This is the first node in this cloud service. - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: - ex_storage_service_name = ex_cloud_service_name - ex_storage_service_name = re.sub( - ur'[\W_]+', u'', ex_storage_service_name.lower(), - flags=re.UNICODE) - if self._is_storage_service_unique( - service_name=ex_storage_service_name): - self._create_storage_account( - service_name=ex_storage_service_name, - location=_storage_location.service_location, - is_affinity_group=_storage_location.is_affinity_group - ) if "ex_deployment_name" in kwargs: ex_deployment_name = kwargs['ex_deployment_name'] else: ex_deployment_name = ex_cloud_service_name + vm_image_id = None + disk_config = None + if image.extra['vm_image']: - response = self._perform_post( - self._get_deployment_path_using_name(ex_cloud_service_name), - AzureXmlSerializer.virtual_machine_deployment_to_xml( - ex_deployment_name, - ex_deployment_slot, - name, - name, - machine_config, - None, - 'PersistentVMRole', - None, - None, - None, - size.id, - None, - image.id)) + vm_image_id = image.id + #network_config = None else: - blob_url = "http://" + ex_storage_service_name \ - + ".blob.core.windows.net" + blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" # Azure's pattern in the UI. disk_name = "{0}-{1}-{2}.vhd".format( - ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + ex_cloud_service_name, + name, + time.strftime("%Y-%m-%d") + ) + media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image.id, media_link) - response = self._perform_post( - self._get_deployment_path_using_name(ex_cloud_service_name), - AzureXmlSerializer.virtual_machine_deployment_to_xml( - ex_deployment_name, - ex_deployment_slot, - name, - name, - machine_config, - disk_config, - 'PersistentVMRole', - network_config, - None, - None, - size.id, - None, - None)) + response = self._perform_post( + self._get_deployment_path_using_name(ex_cloud_service_name), + AzureXmlSerializer.virtual_machine_deployment_to_xml( + ex_deployment_name, + ex_deployment_slot, + name, + name, + machine_config, + disk_config, + 'PersistentVMRole', + network_config, + None, + None, + size.id, + None, + vm_image_id + ) + ) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self + ) self._ex_complete_async_azure_operation(response) else: _deployment_name = self._get_deployment( service_name=ex_cloud_service_name, - deployment_slot=ex_deployment_slot).name + deployment_slot=ex_deployment_slot + ).name - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: - ex_storage_service_name = ex_cloud_service_name - ex_storage_service_name = re.sub( - ur'[\W_]+', u'', ex_storage_service_name.lower(), - flags=re.UNICODE) - - if self._is_storage_service_unique( - service_name=ex_storage_service_name): - self._create_storage_account( - service_name=ex_storage_service_name, - location=_storage_location.service_location, - is_affinity_group=_storage_location.is_affinity_group - ) + vm_image_id = None + disk_config = None if image.extra['vm_image']: - response = self._perform_post( - self._get_role_path(ex_cloud_service_name, - image.extra['deployment_name']), - AzureXmlSerializer.add_role_to_xml( - name, # role_name - machine_config, # system_config - None, # os_virtual_hard_disk - 'PersistentVMRole', # role_type - None, # network_config - None, # availability_set_name - None, # data_virtual_hard_disks - image.id, #vm_image - size.id)) # role_size + vm_image_id = image.id + #network_config = None else: - blob_url = "http://" + ex_storage_service_name + \ - ".blob.core.windows.net" + blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" disk_name = "{0}-{1}-{2}.vhd".format( - ex_cloud_service_name,name,time.strftime("%Y-%m-%d")) + ex_cloud_service_name, + name, + time.strftime("%Y-%m-%d") + ) media_link = blob_url + "/vhds/" + disk_name disk_config = OSVirtualHardDisk(image.id, media_link) - response = self._perform_post( - self._get_role_path(ex_cloud_service_name, - _deployment_name), - AzureXmlSerializer.add_role_to_xml( - name, # role_name - machine_config, # system_config - disk_config, # os_virtual_hard_disk - 'PersistentVMRole', # role_type - network_config, # network_config - None, # availability_set_name - None, # data_virtual_hard_disks - size.id, # role_size - None)) #vm_image) + response = self._perform_post( + self._get_role_path( + ex_cloud_service_name, + _deployment_name + ), + AzureXmlSerializer.add_role_to_xml( + name, # role_name + machine_config, # system_config + disk_config, # os_virtual_hard_disk + 'PersistentVMRole', # role_type + network_config, # network_config + None, # availability_set_name + None, # data_virtual_hard_disks + vm_image_id, # vm_image + size.id # role_size + ) + ) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, - response.status), driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self.connection.driver + ) self._ex_complete_async_azure_operation(response) @@ -662,10 +682,15 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): state=NodeState.PENDING, public_ips=[], private_ips=[], - driver=self.connection.driver + driver=self.connection.driver, + extra={ + 'ex_cloud_service_name': ex_cloud_service_name + } ) - def destroy_node(self, node=None, ex_cloud_service_name=None, + def destroy_node(self, + node=None, + ex_cloud_service_name=None, ex_deployment_slot=None): """Remove Azure Virtual Machine @@ -684,44 +709,54 @@ def destroy_node(self, node=None, ex_cloud_service_name=None, :type ex_deployment_slot: ``str`` """ - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") - if not node: raise ValueError("node is required.") + ex_cloud_service_name = ex_cloud_service_name or (node.extra and node.extra.get('ex_cloud_service_name')) + + if not ex_cloud_service_name: + raise ValueError("ex_cloud_service_name is required.") + if not ex_deployment_slot: ex_deployment_slot = "production" _deployment = self._get_deployment( service_name=ex_cloud_service_name, - deployment_slot=ex_deployment_slot) + deployment_slot=ex_deployment_slot + ) _deployment_name = _deployment.name _server_deployment_count = len(_deployment.role_instance_list) if _server_deployment_count > 1: - path = self._get_role_path(ex_cloud_service_name, - _deployment_name, node.id) - path += '?comp=media' # forces deletion of attached disks + path = self._get_role_path( + ex_cloud_service_name, + _deployment_name, + node.id + ) + path += '?comp=media' # forces deletion of attached disks - data = self._perform_delete(path) + self._perform_delete(path) return True else: path = self._get_deployment_path_using_name( ex_cloud_service_name, - _deployment_name) + _deployment_name + ) path += '?comp=media' - data = self._perform_delete(path) + self._perform_delete(path) return True - def create_cloud_service(self, ex_cloud_service_name=None, location=None, - description=None, extended_properties=None): + def create_cloud_service(self, + ex_cloud_service_name=None, + location=None, + description=None, + extended_properties=None): """ creates an azure cloud service. @@ -745,17 +780,24 @@ def create_cloud_service(self, ex_cloud_service_name=None, location=None, if not location: raise ValueError("location is required.") - response = self._perform_cloud_service_create( + response = self._perform_cloud_service_create( self._get_hosted_service_path(), AzureXmlSerializer.create_hosted_service_to_xml( ex_cloud_service_name, self._encode_base64(ex_cloud_service_name), - description, location, None, extended_properties)) + description, + location, + None, + extended_properties + ) + ) if response.status != 201: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' - % (response.error, response.body, - response.status), driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' + % (response.error, response.body, response.status), + driver=self + ) return True @@ -772,45 +814,63 @@ def destroy_cloud_service(self, ex_cloud_service_name=None): if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") + #add check to ensure all nodes have been deleted response = self._perform_cloud_service_delete( - self._get_hosted_service_path(ex_cloud_service_name)) + self._get_hosted_service_path(ex_cloud_service_name) + ) if response.status != 200: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status) - , driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self + ) return True - """ Functions not implemented + def list_cloud_services(self): + + return self._perform_get( + self._get_hosted_service_path(), + HostedServices + ) + + """ + Functions not implemented """ def create_volume_snapshot(self): raise NotImplementedError( 'You cannot create snapshots of ' - 'Azure VMs at this time.') + 'Azure VMs at this time.' + ) def attach_volume(self): raise NotImplementedError( 'attach_volume is not supported ' - 'at this time.') + 'at this time.' + ) def create_volume(self): raise NotImplementedError( 'create_volume is not supported ' - 'at this time.') + 'at this time.' + ) def detach_volume(self): raise NotImplementedError( 'detach_volume is not supported ' - 'at this time.') + 'at this time.' + ) def destroy_volume(self): raise NotImplementedError( 'destroy_volume is not supported ' - 'at this time.') + 'at this time.' + ) - """Private Functions + """ + Private Functions """ def _perform_cloud_service_create(self, path, data): @@ -836,40 +896,44 @@ def _perform_cloud_service_delete(self, path): return response - def _to_node(self, data): + def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None): """ Convert the data from a Azure response object into a Node """ - if len(data.instance_endpoints) >= 1: - public_ip = data.instance_endpoints[0].vip - else: - public_ip = [] + remote_desktop_port = u'' + ssh_port = u'' + public_ips = virtual_ips or [] - for port in data.instance_endpoints: - if port.name == 'Remote Desktop': - remote_desktop_port = port.public_port - else: - remote_desktop_port = [] + if data.instance_endpoints is not None: + if len(data.instance_endpoints) >= 1: + public_ips = [data.instance_endpoints[0].vip] - if port.name == "SSH": - ssh_port = port.public_port - else: - ssh_port = [] + for port in data.instance_endpoints: + if port.name == 'Remote Desktop': + remote_desktop_port = port.public_port + + if port.name == "SSH": + ssh_port = port.public_port return Node( id=data.role_name, name=data.role_name, state=self.NODE_STATE_MAP.get( - data.instance_status, NodeState.UNKNOWN), - public_ips=[public_ip], + data.instance_status, + NodeState.UNKNOWN + ), + public_ips=public_ips, private_ips=[data.ip_address], - driver=AzureNodeDriver, + driver=self.connection.driver, extra={ 'remote_desktop_port': remote_desktop_port, 'ssh_port': ssh_port, 'power_state': data.power_state, - 'instance_size': data.instance_size}) + 'instance_size': data.instance_size, + 'ex_cloud_service_name': ex_cloud_service_name + } + ) def _to_location(self, data): """ @@ -897,9 +961,9 @@ def _to_location(self, data): name=data.display_name, country=country, driver=self.connection.driver, - available_services =data.available_services, - virtual_machine_role_sizes = - (data.compute_capabilities).virtual_machines_role_sizes) + available_services=data.available_services, + virtual_machine_role_sizes=data.compute_capabilities.virtual_machines_role_sizes + ) def _to_node_size(self, data): """ @@ -915,9 +979,10 @@ def _to_node_size(self, data): price=data["price"], driver=self.connection.driver, extra={ - 'max_data_disks' : data["max_data_disks"], - 'cores' : data["cores"] - }) + 'max_data_disks': data["max_data_disks"], + 'cores': data["cores"] + } + ) def _to_image(self, data): @@ -933,7 +998,8 @@ def _to_image(self, data): 'affinity_group': data.affinity_group, 'media_link': data.media_link, 'vm_image': False - }) + } + ) def _vm_to_image(self, data): @@ -949,41 +1015,46 @@ def _vm_to_image(self, data): 'affinity_group': data.affinity_group, 'deployment_name': data.deployment_name, 'vm_image': True - }) + } + ) def _to_volume(self, volume, node): if node: if hasattr(volume.attached_to, 'role_name'): if volume.attached_to.role_name == node.id: - extra = {} - extra['affinity_group'] = volume.affinity_group + extra = {'affinity_group': volume.affinity_group} + if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = \ - volume.attached_to.hosted_service_name + extra['hosted_service_name'] = volume.attached_to.hosted_service_name + if hasattr(volume.attached_to, 'role_name'): extra['role_name'] = volume.attached_to.role_name + if hasattr(volume.attached_to, 'deployment_name'): - extra['deployment_name'] = \ - volume.attached_to.deployment_name + extra['deployment_name'] = volume.attached_to.deployment_name + extra['os'] = volume.os extra['location'] = volume.location extra['media_link'] = volume.media_link extra['source_image_name'] = volume.source_image_name - return StorageVolume(id=volume.name, + return StorageVolume( + id=volume.name, name=volume.name, size=int(volume.logical_disk_size_in_gb), driver=self.connection.driver, - extra=extra) + extra=extra + ) else: - extra = {} - extra['affinity_group'] = volume.affinity_group + extra = {'affinity_group': volume.affinity_group} + if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = \ - volume.attached_to.hosted_service_name + extra['hosted_service_name'] = volume.attached_to.hosted_service_name + if hasattr(volume.attached_to, 'role_name'): extra['role_name'] = volume.attached_to.role_name + if hasattr(volume.attached_to, 'deployment_name'): extra['deployment_name'] = volume.attached_to.deployment_name extra['os'] = volume.os @@ -991,11 +1062,13 @@ def _to_volume(self, volume, node): extra['media_link'] = volume.media_link extra['source_image_name'] = volume.source_image_name - return StorageVolume(id=volume.name, + return StorageVolume( + id=volume.name, name=volume.name, size=int(volume.logical_disk_size_in_gb), driver=self.connection.driver, - extra=extra) + extra=extra + ) def _get_deployment(self, **kwargs): _service_name = kwargs['service_name'] @@ -1003,25 +1076,30 @@ def _get_deployment(self, **kwargs): response = self._perform_get( self._get_deployment_path_using_slot( - _service_name, _deployment_slot), None) + _service_name, + _deployment_slot + ), + None + ) if response.status != 200: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status) - , driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self.connection.driver + ) return self._parse_response(response, Deployment) - def _get_cloud_service_location(self, service_name=None): if not service_name: raise ValueError("service_name is required.") res = self._perform_get( - self._get_hosted_service_path(service_name) + - '?embed-detail=False', - HostedService) + self._get_hosted_service_path(service_name) + '?embed-detail=False', + HostedService + ) _affinity_group = res.hosted_service_properties.affinity_group _cloud_service_location = res.hosted_service_properties.location @@ -1041,7 +1119,8 @@ def _is_storage_service_unique(self, service_name=None): self._get_storage_service_path() + '/operations/isavailable/' + _str(service_name) + '', - AvailabilityResponse) + AvailabilityResponse + ) return _check_availability.result @@ -1056,12 +1135,16 @@ def _create_storage_account(self, **kwargs): kwargs['location'], None, # Location True, # geo_replication_enabled - None)) # extended_properties + None # extended_properties + ) + ) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, - response.status), driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self.connection.driver + ) else: response = self._perform_post( @@ -1073,25 +1156,29 @@ def _create_storage_account(self, **kwargs): None, # Affinity Group kwargs['location'], # Location True, # geo_replication_enabled - None)) # extended_properties + None # extended_properties + ) + ) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, - response.status), driver=self) - + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self.connection.driver + ) # We need to wait for this to be created before we can # create the storage container and the instance. - self._ex_complete_async_azure_operation(response, - "create_storage_account") - - return + self._ex_complete_async_azure_operation( + response, + "create_storage_account" + ) def _get_operation_status(self, request_id): return self._perform_get( '/' + self.subscription_id + '/operations/' + _str(request_id), - Operation) + Operation + ) def _perform_get(self, path, response_type): request = AzureHTTPRequest() @@ -1113,8 +1200,7 @@ def _perform_post(self, path, body, response_type=None, async=False): request.host = azure_service_management_host request.path = path request.body = self._get_request_body(body) - request.path, request.query = \ - self._update_request_uri_query(request) + request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) @@ -1130,34 +1216,33 @@ def _perform_delete(self, path, async=False): response = self._perform_request(request) if response.status != 202: - raise LibcloudError('Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status) - , driver=self) + raise LibcloudError( + 'Message: %s, Body: %s, Status code: %d' % + (response.error, response.body, response.status), + driver=self.connection.driver + ) if async: return self._parse_response_for_async_op(response) - return None - def _perform_request(self, request): try: return self.connection.request( action="https://%s%s" % (request.host, request.path), data=request.body, headers=request.headers, - method=request.method) + method=request.method + ) except Exception, e: print e.message - - - - def _update_request_uri_query(self, request): - '''pulls the query string out of the URI and moves it into + """ + pulls the query string out of the URI and moves it into the query portion of the request object. If there are already query parameters on the request the parameters in the URI will - appear after the existing parameters''' + appear after the existing parameters + """ if '?' in request.path: request.path, _, query_string = request.path.partition('?') @@ -1175,14 +1260,13 @@ def _update_request_uri_query(self, request): request.path += '?' for name, value in request.query: if value is not None: - request.path += name + '=' + \ - url_quote(value, '/()$=\',') + '&' + request.path += name + '=' + url_quote(value, '/()$=\',') + '&' request.path = request.path[:-1] return request.path, request.query def _update_management_header(self, request): - ''' Add additional headers for management. ''' + """ Add additional headers for management. """ if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']: request.headers['Content-Length'] = str(len(request.body)) @@ -1196,38 +1280,42 @@ def _update_management_header(self, request): if 'content-type' == key.lower(): break else: - request.headers['Content-Type']='application/xml' + request.headers['Content-Type'] = 'application/xml' return request.headers def send_request_headers(self, connection, request_headers): - for name, value in request_headers: - if value: - connection.putheader(name, value) + for name, value in request_headers: + if value: + connection.putheader(name, value) - connection.putheader('User-Agent', _USER_AGENT_STRING) - connection.endheaders() + connection.putheader('User-Agent', _USER_AGENT_STRING) + connection.endheaders() def send_request_body(self, connection, request_body): - if request_body: - assert isinstance(request_body, bytes) - connection.send(request_body) - elif (not isinstance(connection, HTTPSConnection) and - not isinstance(connection, httplib.HTTPConnection)): - connection.send(None) + if request_body: + assert isinstance(request_body, bytes) + connection.send(request_body) + elif (not isinstance(connection, HTTPSConnection) and + not isinstance(connection, httplib.HTTPConnection)): + connection.send(None) def _parse_response(self, response, return_type): - ''' + """ Parse the HTTPResponse's body and fill all the data into a class of return_type. - ''' + """ + return self._parse_response_body_from_xml_text( - response.body, return_type) + response.body, + return_type + ) def _parse_response_body_from_xml_text(self, respbody, return_type): - ''' + """ parse the xml and fill all the data into a class of return_type - ''' + """ + doc = minidom.parseString(respbody) return_obj = return_type() for node in self._get_child_nodes(doc, return_type.__name__): @@ -1235,43 +1323,61 @@ def _parse_response_body_from_xml_text(self, respbody, return_type): return return_obj - def _get_child_nodes(self, node, tagName): - return [childNode for childNode in node.getElementsByTagName(tagName) + def _get_child_nodes(self, node, tag_name): + return [childNode for childNode in node.getElementsByTagName(tag_name) if childNode.parentNode == node] def _fill_data_to_return_object(self, node, return_obj): members = dict(vars(return_obj)) for name, value in members.items(): if isinstance(value, _list_of): - setattr(return_obj, - name, - self._fill_list_of(node, - value.list_type, - value.xml_element_name)) + setattr( + return_obj, + name, + self._fill_list_of( + node, + value.list_type, + value.xml_element_name + ) + ) elif isinstance(value, _scalar_list_of): - setattr(return_obj, - name, - self._fill_scalar_list_of(node, - value.list_type, - self._get_serialization_name(name), - value.xml_element_name)) + setattr( + return_obj, + name, + self._fill_scalar_list_of( + node, + value.list_type, + self._get_serialization_name(name), + value.xml_element_name + ) + ) elif isinstance(value, _dict_of): - setattr(return_obj, - name, - self._fill_dict_of(node, - self._get_serialization_name(name), - value.pair_xml_element_name, - value.key_xml_element_name, - value.value_xml_element_name)) + setattr( + return_obj, + name, + self._fill_dict_of( + node, + self._get_serialization_name(name), + value.pair_xml_element_name, + value.key_xml_element_name, + value.value_xml_element_name + ) + ) elif isinstance(value, WindowsAzureData): - setattr(return_obj, - name, - self._fill_instance_child(node, name, value.__class__)) + setattr( + return_obj, + name, + self._fill_instance_child(node, name, value.__class__) + ) elif isinstance(value, dict): - setattr(return_obj, - name, - self._fill_dict(node, - self._get_serialization_name(name))) + setattr( + return_obj, + name, + self._fill_dict( + node, + self._get_serialization_name(name) + ) + ) elif isinstance(value, _Base64String): value = self._fill_data_minidom(node, name, '') if value is not None: @@ -1287,27 +1393,33 @@ def _fill_data_to_return_object(self, node, return_obj): def _fill_list_of(self, xmldoc, element_type, xml_element_name): xmlelements = self._get_child_nodes(xmldoc, xml_element_name) - return [self._parse_response_body_from_xml_node( - xmlelement, element_type) \ - for xmlelement in xmlelements] + return [ + self._parse_response_body_from_xml_node(xmlelement, element_type) + for xmlelement in xmlelements + ] def _parse_response_body_from_xml_node(self, node, return_type): - ''' + """ parse the xml and fill all the data into a class of return_type - ''' + """ return_obj = return_type() self._fill_data_to_return_object(node, return_obj) return return_obj - def _fill_scalar_list_of(self, xmldoc, element_type, parent_xml_element_name, + def _fill_scalar_list_of(self, + xmldoc, + element_type, + parent_xml_element_name, xml_element_name): xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: - xmlelements = \ - self._get_child_nodes(xmlelements[0], xml_element_name) - return [self._get_node_value(xmlelement, element_type) \ - for xmlelement in xmlelements] + xmlelements = self._get_child_nodes(xmlelements[0], xml_element_name) + return [ + self._get_node_value(xmlelement, element_type) + for xmlelement in xmlelements + ] def _get_node_value(self, xmlelement, data_type): value = xmlelement.firstChild.nodeValue @@ -1318,31 +1430,38 @@ def _get_node_value(self, xmlelement, data_type): else: return data_type(value) - def _get_serialization_name(self,element_name): + def _get_serialization_name(self, element_name): """converts a Python name into a serializable name""" + known = _KNOWN_SERIALIZATION_XFORMS.get(element_name) if known is not None: return known if element_name.startswith('x_ms_'): return element_name.replace('_', '-') + if element_name.endswith('_id'): element_name = element_name.replace('_id', 'ID') + for name in ['content_', 'last_modified', 'if_', 'cache_control']: if element_name.startswith(name): element_name = element_name.replace('_', '-_') return ''.join(name.capitalize() for name in element_name.split('_')) - def _fill_dict_of( - self, xmldoc, parent_xml_element_name, pair_xml_element_name, - key_xml_element_name, value_xml_element_name): + def _fill_dict_of(self, + xmldoc, + parent_xml_element_name, + pair_xml_element_name, + key_xml_element_name, + value_xml_element_name): + return_obj = {} xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) + if xmlelements: - xmlelements = \ - self._get_child_nodes(xmlelements[0], pair_xml_element_name) + xmlelements = self._get_child_nodes(xmlelements[0], pair_xml_element_name) for pair in xmlelements: keys = self._get_child_nodes(pair, key_xml_element_name) values = self._get_child_nodes(pair, value_xml_element_name) @@ -1354,10 +1473,13 @@ def _fill_dict_of( return return_obj def _fill_instance_child(self, xmldoc, element_name, return_type): - '''Converts a child of the current dom element to the specified type. - ''' + """ + Converts a child of the current dom element to the specified type. + """ xmlelements = self._get_child_nodes( - xmldoc, self._get_serialization_name(element_name)) + xmldoc, + self._get_serialization_name(element_name) + ) if not xmlelements: return None @@ -1369,6 +1491,7 @@ def _fill_instance_child(self, xmldoc, element_name, return_type): def _fill_dict(self, xmldoc, element_name): xmlelements = self._get_child_nodes(xmldoc, element_name) + if xmlelements: return_obj = {} for child in xmlelements[0].childNodes: @@ -1393,7 +1516,9 @@ def _decode_base64_to_text(self, data): def _fill_data_minidom(self, xmldoc, element_name, data_member): xmlelements = self._get_child_nodes( - xmldoc, self._get_serialization_name(element_name)) + xmldoc, + self._get_serialization_name(element_name) + ) if not xmlelements or not xmlelements[0].childNodes: return None @@ -1440,20 +1565,20 @@ def _convert_class_to_xml(self, source, xml_prefix=True): xmlstr = '' if isinstance(source, list): - for value in source: - xmlstr += self._convert_class_to_xml(value, False) + xmlstr += ''.join(self._convert_class_to_xml(value, False) for value in source) elif isinstance(source, WindowsAzureData): class_name = source.__class__.__name__ xmlstr += '<' + class_name + '>' for name, value in vars(source).items(): if value is not None: - if isinstance(value, list) or \ - isinstance(value, WindowsAzureData): + if isinstance(value, list) or isinstance(value, WindowsAzureData): xmlstr += self._convert_class_to_xml(value, False) else: - xmlstr += ('<' + self._get_serialization_name(name) + - '>' + xml_escape(str(value)) + '') + xmlstr += ( + '<' + self._get_serialization_name(name) + '>' + + xml_escape(str(value)) + + '' + ) xmlstr += '' return xmlstr @@ -1469,17 +1594,21 @@ def _parse_response_for_async_op(self, response): return result - def _get_deployment_path_using_name(self, service_name, - deployment_name=None): - return self._get_path('services/hostedservices/' - + _str(service_name) + - '/deployments', deployment_name) + def _get_deployment_path_using_name(self, + service_name, + deployment_name=None): + return self._get_path( + 'services/hostedservices/' + + _str(service_name) + + '/deployments', + deployment_name + ) def _get_path(self, resource, name): - path = '/' + self.subscription_id + '/' + resource - if name is not None: - path += '/' + _str(name) - return path + path = '/' + self.subscription_id + '/' + resource + if name is not None: + path += '/' + _str(name) + return path def _get_image_path(self, image_name=None): return self._get_path('services/images', image_name) @@ -1498,14 +1627,20 @@ def _get_disk_path(self, disk_name=None): return self._get_path('services/disks', disk_name) def _get_role_path(self, service_name, deployment_name, role_name=None): - return self._get_path('services/hostedservices/' + _str(service_name) + - '/deployments/' + deployment_name + - '/roles', role_name) + return self._get_path( + 'services/hostedservices/' + + _str(service_name) + + '/deployments/' + + deployment_name + + '/roles', + role_name + ) def _get_storage_service_path(self, service_name=None): return self._get_path('services/storageservices', service_name) - def _ex_complete_async_azure_operation(self, response=None, + def _ex_complete_async_azure_operation(self, + response=None, operation_type='create_node'): request_id = self._parse_response_for_async_op(response) @@ -1525,8 +1660,10 @@ def _ex_complete_async_azure_operation(self, response=None, if operation_status.status == 'Failed': raise LibcloudError( - 'Message: Async request for operation %s has failed'% - operation_type, driver=self) + 'Message: Async request for operation %s has failed' % + operation_type, + driver=self.connection.driver + ) #def get_connection(self): # certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" @@ -1539,284 +1676,396 @@ def _ex_complete_async_azure_operation(self, response=None, # return connection -"""XML Serializer +""" +XML Serializer Borrowed from the Azure SDK for Python. """ + + def _lower(text): return text.lower() + class AzureXmlSerializer(): @staticmethod - def create_storage_service_input_to_xml(service_name, description, label, - affinity_group, location, + def create_storage_service_input_to_xml(service_name, + description, + label, + affinity_group, + location, geo_replication_enabled, extended_properties): return AzureXmlSerializer.doc_from_data( 'CreateStorageServiceInput', - [('ServiceName', service_name), - ('Description', description), - ('Label', label), - ('AffinityGroup', affinity_group), - ('Location', location), - ('GeoReplicationEnabled', geo_replication_enabled, _lower)], - extended_properties) + [ + ('ServiceName', service_name), + ('Description', description), + ('Label', label), + ('AffinityGroup', affinity_group), + ('Location', location), + ('GeoReplicationEnabled', geo_replication_enabled, _lower) + ], + extended_properties + ) @staticmethod - def update_storage_service_input_to_xml(description, label, + def update_storage_service_input_to_xml(description, + label, geo_replication_enabled, extended_properties): return AzureXmlSerializer.doc_from_data( 'UpdateStorageServiceInput', - [('Description', description), - ('Label', label, AzureNodeDriver._encode_base64), - ('GeoReplicationEnabled', geo_replication_enabled, _lower)], - extended_properties) + [ + ('Description', description), + ('Label', label, AzureNodeDriver._encode_base64), + ('GeoReplicationEnabled', geo_replication_enabled, _lower) + ], + extended_properties + ) @staticmethod def regenerate_keys_to_xml(key_type): - return AzureXmlSerializer.doc_from_data('RegenerateKeys', - [('KeyType', key_type)]) + return AzureXmlSerializer.doc_from_data( + 'RegenerateKeys', + [('KeyType', key_type)] + ) @staticmethod def update_hosted_service_to_xml(label, description, extended_properties): - return AzureXmlSerializer.doc_from_data('UpdateHostedService', - [('Label', label, - AzureNodeDriver._encode_base64), - ('Description', description)], - extended_properties) + return AzureXmlSerializer.doc_from_data( + 'UpdateHostedService', + [ + ('Label', label, AzureNodeDriver._encode_base64), + ('Description', description) + ], + extended_properties + ) @staticmethod - def create_hosted_service_to_xml(service_name, label, description, - location, affinity_group, + def create_hosted_service_to_xml(service_name, + label, + description, + location, + affinity_group, extended_properties): return AzureXmlSerializer.doc_from_data( 'CreateHostedService', - [('ServiceName', service_name), - ('Label', label), - ('Description', description), - ('Location', location), - ('AffinityGroup', affinity_group)], - extended_properties) + [ + ('ServiceName', service_name), + ('Label', label), + ('Description', description), + ('Location', location), + ('AffinityGroup', affinity_group) + ], + extended_properties + ) @staticmethod - def create_deployment_to_xml(name, package_url, label, configuration, - start_deployment, treat_warnings_as_error, + def create_deployment_to_xml(name, + package_url, + label, + configuration, + start_deployment, + treat_warnings_as_error, extended_properties): return AzureXmlSerializer.doc_from_data( 'CreateDeployment', - [('Name', name), - ('PackageUrl', package_url), - ('Label', label, AzureNodeDriver._encode_base64), - ('Configuration', configuration), - ('StartDeployment', - start_deployment, _lower), - ('TreatWarningsAsError', treat_warnings_as_error, _lower)], - extended_properties) + [ + ('Name', name), + ('PackageUrl', package_url), + ('Label', label, AzureNodeDriver._encode_base64), + ('Configuration', configuration), + ('StartDeployment', start_deployment, _lower), + ('TreatWarningsAsError', treat_warnings_as_error, _lower) + ], + extended_properties + ) @staticmethod def swap_deployment_to_xml(production, source_deployment): return AzureXmlSerializer.doc_from_data( 'Swap', - [('Production', production), - ('SourceDeployment', source_deployment)]) + [ + ('Production', production), + ('SourceDeployment', source_deployment) + ] + ) @staticmethod def update_deployment_status_to_xml(status): return AzureXmlSerializer.doc_from_data( 'UpdateDeploymentStatus', - [('Status', status)]) + [('Status', status)] + ) @staticmethod - def change_deployment_to_xml(configuration, treat_warnings_as_error, mode, + def change_deployment_to_xml(configuration, + treat_warnings_as_error, + mode, extended_properties): return AzureXmlSerializer.doc_from_data( 'ChangeConfiguration', - [('Configuration', configuration), - ('TreatWarningsAsError', treat_warnings_as_error, _lower), - ('Mode', mode)], - extended_properties) + [ + ('Configuration', configuration), + ('TreatWarningsAsError', treat_warnings_as_error, _lower), + ('Mode', mode) + ], + extended_properties + ) @staticmethod - def upgrade_deployment_to_xml(mode, package_url, configuration, label, - role_to_upgrade, force, extended_properties): + def upgrade_deployment_to_xml(mode, + package_url, + configuration, + label, + role_to_upgrade, + force, + extended_properties): return AzureXmlSerializer.doc_from_data( 'UpgradeDeployment', - [('Mode', mode), - ('PackageUrl', package_url), - ('Configuration', configuration), - ('Label', label, AzureNodeDriver._encode_base64), - ('RoleToUpgrade', role_to_upgrade), - ('Force', force, _lower)], - extended_properties) + [ + ('Mode', mode), + ('PackageUrl', package_url), + ('Configuration', configuration), + ('Label', label, AzureNodeDriver._encode_base64), + ('RoleToUpgrade', role_to_upgrade), + ('Force', force, _lower) + ], + extended_properties + ) @staticmethod def rollback_upgrade_to_xml(mode, force): return AzureXmlSerializer.doc_from_data( 'RollbackUpdateOrUpgrade', - [('Mode', mode), - ('Force', force, _lower)]) + [ + ('Mode', mode), + ('Force', force, _lower) + ] + ) @staticmethod def walk_upgrade_domain_to_xml(upgrade_domain): return AzureXmlSerializer.doc_from_data( 'WalkUpgradeDomain', - [('UpgradeDomain', upgrade_domain)]) + [('UpgradeDomain', upgrade_domain)] + ) @staticmethod def certificate_file_to_xml(data, certificate_format, password): return AzureXmlSerializer.doc_from_data( 'CertificateFile', - [('Data', data), - ('CertificateFormat', certificate_format), - ('Password', password)]) + [ + ('Data', data), + ('CertificateFormat', certificate_format), + ('Password', password) + ] + ) @staticmethod def create_affinity_group_to_xml(name, label, description, location): return AzureXmlSerializer.doc_from_data( 'CreateAffinityGroup', - [('Name', name), - ('Label', label, AzureNodeDriver._encode_base64), - ('Description', description), - ('Location', location)]) + [ + ('Name', name), + ('Label', label, AzureNodeDriver._encode_base64), + ('Description', description), + ('Location', location) + ] + ) @staticmethod def update_affinity_group_to_xml(label, description): return AzureXmlSerializer.doc_from_data( 'UpdateAffinityGroup', - [('Label', label, AzureNodeDriver._encode_base64), - ('Description', description)]) + [ + ('Label', label, AzureNodeDriver._encode_base64), + ('Description', description) + ] + ) @staticmethod def subscription_certificate_to_xml(public_key, thumbprint, data): return AzureXmlSerializer.doc_from_data( 'SubscriptionCertificate', - [('SubscriptionCertificatePublicKey', public_key), - ('SubscriptionCertificateThumbprint', thumbprint), - ('SubscriptionCertificateData', data)]) + [ + ('SubscriptionCertificatePublicKey', public_key), + ('SubscriptionCertificateThumbprint', thumbprint), + ('SubscriptionCertificateData', data) + ] + ) @staticmethod def os_image_to_xml(label, media_link, name, os): return AzureXmlSerializer.doc_from_data( 'OSImage', - [('Label', label), - ('MediaLink', media_link), - ('Name', name), - ('OS', os)]) + [ + ('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os) + ] + ) @staticmethod - def data_virtual_hard_disk_to_xml(host_caching, disk_label, disk_name, lun, - logical_disk_size_in_gb, media_link, + def data_virtual_hard_disk_to_xml(host_caching, + disk_label, + disk_name, + lun, + logical_disk_size_in_gb, + media_link, source_media_link): return AzureXmlSerializer.doc_from_data( 'DataVirtualHardDisk', - [('HostCaching', host_caching), - ('DiskLabel', disk_label), - ('DiskName', disk_name), - ('Lun', lun), - ('LogicalDiskSizeInGB', logical_disk_size_in_gb), - ('MediaLink', media_link), - ('SourceMediaLink', source_media_link)]) + [ + ('HostCaching', host_caching), + ('DiskLabel', disk_label), + ('DiskName', disk_name), + ('Lun', lun), + ('LogicalDiskSizeInGB', logical_disk_size_in_gb), + ('MediaLink', media_link), + ('SourceMediaLink', source_media_link) + ] + ) @staticmethod def disk_to_xml(has_operating_system, label, media_link, name, os): return AzureXmlSerializer.doc_from_data( 'Disk', - [('HasOperatingSystem', has_operating_system, _lower), - ('Label', label), - ('MediaLink', media_link), - ('Name', name), - ('OS', os)]) + [ + ('HasOperatingSystem', has_operating_system, _lower), + ('Label', label), + ('MediaLink', media_link), + ('Name', name), + ('OS', os) + ] + ) @staticmethod def restart_role_operation_to_xml(): return AzureXmlSerializer.doc_from_xml( 'RestartRoleOperation', - 'RestartRoleOperation') + 'RestartRoleOperation' + ) @staticmethod def shutdown_role_operation_to_xml(): return AzureXmlSerializer.doc_from_xml( 'ShutdownRoleOperation', - 'ShutdownRoleOperation') + 'ShutdownRoleOperation' + ) @staticmethod def start_role_operation_to_xml(): return AzureXmlSerializer.doc_from_xml( 'StartRoleOperation', - 'StartRoleOperation') + 'StartRoleOperation' + ) @staticmethod def windows_configuration_to_xml(configuration): xml = AzureXmlSerializer.data_to_xml( - [('ConfigurationSetType', configuration.configuration_set_type), - ('ComputerName', configuration.computer_name), - ('AdminPassword', configuration.admin_password), - ('ResetPasswordOnFirstLogon', - configuration.reset_password_on_first_logon, - _lower), - ('EnableAutomaticUpdates', - configuration.enable_automatic_updates, - _lower), - ('TimeZone', configuration.time_zone)]) + [ + ('ConfigurationSetType', configuration.configuration_set_type), + ('ComputerName', configuration.computer_name), + ('AdminPassword', configuration.admin_password), + ( + 'ResetPasswordOnFirstLogon', + configuration.reset_password_on_first_logon, + _lower + ), + ( + 'EnableAutomaticUpdates', + configuration.enable_automatic_updates, + _lower + ), + ('TimeZone', configuration.time_zone) + ] + ) if configuration.domain_join is not None: xml += '' xml += '' xml += AzureXmlSerializer.data_to_xml( - [('Domain', configuration.domain_join.credentials.domain), - ('Username', configuration.domain_join.credentials.username), - ('Password', configuration.domain_join.credentials.password)]) + [ + ('Domain', configuration.domain_join.credentials.domain), + ('Username', configuration.domain_join.credentials.username), + ('Password', configuration.domain_join.credentials.password) + ] + ) xml += '' xml += AzureXmlSerializer.data_to_xml( - [('JoinDomain', configuration.domain_join.join_domain), - ('MachineObjectOU', - configuration.domain_join.machine_object_ou)]) + [ + ('JoinDomain', configuration.domain_join.join_domain), + ( + 'MachineObjectOU', + configuration.domain_join.machine_object_ou + ) + ] + ) xml += '' if configuration.stored_certificate_settings is not None: xml += '' + for cert in configuration.stored_certificate_settings: xml += '' xml += AzureXmlSerializer.data_to_xml( - [('StoreLocation', cert.store_location), - ('StoreName', cert.store_name), - ('Thumbprint', cert.thumbprint)]) + [ + ('StoreLocation', cert.store_location), + ('StoreName', cert.store_name), + ('Thumbprint', cert.thumbprint) + ] + ) xml += '' xml += '' xml += AzureXmlSerializer.data_to_xml( - [('AdminUsername', configuration.admin_user_name)]) + [('AdminUsername', configuration.admin_user_name)] + ) return xml @staticmethod def linux_configuration_to_xml(configuration): xml = AzureXmlSerializer.data_to_xml( - [('ConfigurationSetType', configuration.configuration_set_type), - ('HostName', configuration.host_name), - ('UserName', configuration.user_name), - ('UserPassword', configuration.user_password), - ('DisableSshPasswordAuthentication', - configuration.disable_ssh_password_authentication, - _lower)]) + [ + ('ConfigurationSetType', configuration.configuration_set_type), + ('HostName', configuration.host_name), + ('UserName', configuration.user_name), + ('UserPassword', configuration.user_password), + ( + 'DisableSshPasswordAuthentication', + configuration.disable_ssh_password_authentication, + _lower + ) + ] + ) if configuration.ssh is not None: xml += '' xml += '' + for key in configuration.ssh.public_keys: xml += '' xml += AzureXmlSerializer.data_to_xml( - [('Fingerprint', key.fingerprint), - ('Path', key.path)]) + [ + ('Fingerprint', key.fingerprint), + ('Path', key.path) + ] + ) xml += '' + xml += '' xml += '' + for key in configuration.ssh.key_pairs: xml += '' xml += AzureXmlSerializer.data_to_xml( [('Fingerprint', key.fingerprint), ('Path', key.path)]) xml += '' + xml += '' xml += '' return xml @@ -1824,25 +2073,35 @@ def linux_configuration_to_xml(configuration): @staticmethod def network_configuration_to_xml(configuration): xml = AzureXmlSerializer.data_to_xml( - [('ConfigurationSetType', configuration.configuration_set_type)]) + [('ConfigurationSetType', configuration.configuration_set_type)] + ) xml += '' + for endpoint in configuration.input_endpoints: xml += '' xml += AzureXmlSerializer.data_to_xml( - [('LoadBalancedEndpointSetName', - endpoint.load_balanced_endpoint_set_name), - ('LocalPort', endpoint.local_port), - ('Name', endpoint.name), - ('Port', endpoint.port)]) - - if endpoint.load_balancer_probe.path or\ - endpoint.load_balancer_probe.port or\ - endpoint.load_balancer_probe.protocol: + [ + ( + 'LoadBalancedEndpointSetName', + endpoint.load_balanced_endpoint_set_name + ), + ('LocalPort', endpoint.local_port), + ('Name', endpoint.name), + ('Port', endpoint.port) + ] + ) + + if (endpoint.load_balancer_probe.path or + endpoint.load_balancer_probe.port or + endpoint.load_balancer_probe.protocol): xml += '' xml += AzureXmlSerializer.data_to_xml( - [('Path', endpoint.load_balancer_probe.path), - ('Port', endpoint.load_balancer_probe.port), - ('Protocol', endpoint.load_balancer_probe.protocol)]) + [ + ('Path', endpoint.load_balancer_probe.path), + ('Port', endpoint.load_balancer_probe.port), + ('Protocol', endpoint.load_balancer_probe.protocol) + ] + ) xml += '' xml += AzureXmlSerializer.data_to_xml( @@ -1854,17 +2113,29 @@ def network_configuration_to_xml(configuration): xml += '' xml += '' xml += '' - for name in configuration.subnet_names: - xml += AzureXmlSerializer.data_to_xml([('SubnetName', name)]) + xml += ''.join( + AzureXmlSerializer.data_to_xml([('SubnetName', name)]) + for name in configuration.subnet_names + ) xml += '' return xml @staticmethod - def role_to_xml(availability_set_name, data_virtual_hard_disks, - network_configuration_set, os_virtual_hard_disk, vm_image_name, role_name, - role_size, role_type, system_configuration_set): - xml = AzureXmlSerializer.data_to_xml([('RoleName', role_name), - ('RoleType', role_type)]) + def role_to_xml(availability_set_name, + data_virtual_hard_disks, + network_configuration_set, + os_virtual_hard_disk, + vm_image_name, + role_name, + role_size, + role_type, + system_configuration_set): + xml = AzureXmlSerializer.data_to_xml( + [ + ('RoleName', role_name), + ('RoleType', role_type) + ] + ) xml += '' @@ -1872,46 +2143,56 @@ def role_to_xml(availability_set_name, data_virtual_hard_disks, xml += '' if isinstance(system_configuration_set, WindowsConfigurationSet): xml += AzureXmlSerializer.windows_configuration_to_xml( - system_configuration_set) + system_configuration_set + ) elif isinstance(system_configuration_set, LinuxConfigurationSet): xml += AzureXmlSerializer.linux_configuration_to_xml( - system_configuration_set) + system_configuration_set + ) xml += '' if network_configuration_set is not None: xml += '' xml += AzureXmlSerializer.network_configuration_to_xml( - network_configuration_set) + network_configuration_set + ) xml += '' xml += '' if availability_set_name is not None: xml += AzureXmlSerializer.data_to_xml( - [('AvailabilitySetName', availability_set_name)]) + [('AvailabilitySetName', availability_set_name)] + ) if data_virtual_hard_disks is not None: xml += '' for hd in data_virtual_hard_disks: xml += '' xml += AzureXmlSerializer.data_to_xml( - [('HostCaching', hd.host_caching), - ('DiskLabel', hd.disk_label), - ('DiskName', hd.disk_name), - ('Lun', hd.lun), - ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb), - ('MediaLink', hd.media_link)]) + [ + ('HostCaching', hd.host_caching), + ('DiskLabel', hd.disk_label), + ('DiskName', hd.disk_name), + ('Lun', hd.lun), + ('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb), + ('MediaLink', hd.media_link) + ] + ) xml += '' xml += '' if os_virtual_hard_disk is not None: xml += '' xml += AzureXmlSerializer.data_to_xml( - [('HostCaching', os_virtual_hard_disk.host_caching), - ('DiskLabel', os_virtual_hard_disk.disk_label), - ('DiskName', os_virtual_hard_disk.disk_name), - ('MediaLink', os_virtual_hard_disk.media_link), - ('SourceImageName', os_virtual_hard_disk.source_image_name)]) + [ + ('HostCaching', os_virtual_hard_disk.host_caching), + ('DiskLabel', os_virtual_hard_disk.disk_label), + ('DiskName', os_virtual_hard_disk.disk_name), + ('MediaLink', os_virtual_hard_disk.media_link), + ('SourceImageName', os_virtual_hard_disk.source_image_name) + ] + ) xml += '' if vm_image_name is not None: @@ -1923,10 +2204,15 @@ def role_to_xml(availability_set_name, data_virtual_hard_disks, return xml @staticmethod - def add_role_to_xml(role_name, system_configuration_set, - os_virtual_hard_disk, role_type, - network_configuration_set, availability_set_name, - data_virtual_hard_disks, vm_image_name, role_size): + def add_role_to_xml(role_name, + system_configuration_set, + os_virtual_hard_disk, + role_type, + network_configuration_set, + availability_set_name, + data_virtual_hard_disks, + vm_image_name, + role_size): xml = AzureXmlSerializer.role_to_xml( availability_set_name, data_virtual_hard_disks, @@ -1936,13 +2222,17 @@ def add_role_to_xml(role_name, system_configuration_set, role_name, role_size, role_type, - system_configuration_set) + system_configuration_set + ) return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) @staticmethod - def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, - network_configuration_set, availability_set_name, - data_virtual_hard_disks, role_size): + def update_role_to_xml(role_name, + os_virtual_hard_disk, + role_type, + network_configuration_set, + availability_set_name, + data_virtual_hard_disks,role_size): xml = AzureXmlSerializer.role_to_xml( availability_set_name, data_virtual_hard_disks, @@ -1951,44 +2241,64 @@ def update_role_to_xml(role_name, os_virtual_hard_disk, role_type, role_name, role_size, role_type, - None) + None + ) return AzureXmlSerializer.doc_from_xml('PersistentVMRole', xml) @staticmethod - def capture_role_to_xml(post_capture_action, target_image_name, - target_image_label, provisioning_configuration): + def capture_role_to_xml(post_capture_action, + target_image_name, + target_image_label, + provisioning_configuration): xml = AzureXmlSerializer.data_to_xml( - [('OperationType', 'CaptureRoleOperation'), - ('PostCaptureAction', post_capture_action)]) + [ + ('OperationType', 'CaptureRoleOperation'), + ('PostCaptureAction', post_capture_action) + ] + ) if provisioning_configuration is not None: xml += '' if isinstance(provisioning_configuration, WindowsConfigurationSet): xml += AzureXmlSerializer.windows_configuration_to_xml( - provisioning_configuration) + provisioning_configuration + ) elif isinstance(provisioning_configuration, LinuxConfigurationSet): xml += AzureXmlSerializer.linux_configuration_to_xml( - provisioning_configuration) + provisioning_configuration + ) xml += '' xml += AzureXmlSerializer.data_to_xml( - [('TargetImageLabel', target_image_label), - ('TargetImageName', target_image_name)]) + [ + ('TargetImageLabel', target_image_label), + ('TargetImageName', target_image_name) + ] + ) return AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml) @staticmethod - def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, - label, role_name, + def virtual_machine_deployment_to_xml(deployment_name, + deployment_slot, + label, + role_name, system_configuration_set, - os_virtual_hard_disk, role_type, + os_virtual_hard_disk, + role_type, network_configuration_set, availability_set_name, - data_virtual_hard_disks, role_size, - virtual_network_name, vm_image_name): - xml = AzureXmlSerializer.data_to_xml([('Name', deployment_name), - ('DeploymentSlot', deployment_slot), - ('Label', label)]) + data_virtual_hard_disks, + role_size, + virtual_network_name, + vm_image_name): + xml = AzureXmlSerializer.data_to_xml( + [ + ('Name', deployment_name), + ('DeploymentSlot', deployment_slot), + ('Label', label) + ] + ) xml += '' xml += '' xml += AzureXmlSerializer.role_to_xml( @@ -2000,23 +2310,26 @@ def virtual_machine_deployment_to_xml(deployment_name, deployment_slot, role_name, role_size, role_type, - system_configuration_set) + system_configuration_set + ) xml += '' xml += '' if virtual_network_name is not None: xml += AzureXmlSerializer.data_to_xml( - [('VirtualNetworkName', virtual_network_name)]) + [('VirtualNetworkName', virtual_network_name)] + ) return AzureXmlSerializer.doc_from_xml('Deployment', xml) @staticmethod def data_to_xml(data): - '''Creates an xml fragment from the specified data. + """ + Creates an xml fragment from the specified data. data: Array of tuples, where first: xml element name second: xml element text third: conversion function - ''' + """ xml = '' for element in data: name = element[0] @@ -2037,11 +2350,18 @@ def data_to_xml(data): @staticmethod def doc_from_xml(document_element_name, inner_xml): - '''Wraps the specified xml in an xml root element with default azure - namespaces''' - xml = ''.join(['<', document_element_name, - ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', - ' xmlns="http://schemas.microsoft.com/windowsazure">']) + """ + Wraps the specified xml in an xml root element with default azure + namespaces + """ + xml = ''.join( + [ + '<', + document_element_name, + ' xmlns:i="http://www.w3.org/2001/XMLSchema-instance"', + ' xmlns="http://schemas.microsoft.com/windowsazure">' + ] + ) xml += inner_xml xml += ''.join(['']) return xml @@ -2051,7 +2371,8 @@ def doc_from_data(document_element_name, data, extended_properties=None): xml = AzureXmlSerializer.data_to_xml(data) if extended_properties is not None: xml += AzureXmlSerializer.extended_properties_dict_to_xml_fragment( - extended_properties) + extended_properties + ) return AzureXmlSerializer.doc_from_xml(document_element_name, xml) @staticmethod @@ -2060,34 +2381,63 @@ def extended_properties_dict_to_xml_fragment(extended_properties): if extended_properties is not None and len(extended_properties) > 0: xml += '' for key, val in extended_properties.items(): - xml += ''.join(['', - '', - _str(key), - '', - '', - _str(val), - '', - '']) + xml += ''.join( + [ + '', + '', + _str(key), + '', + '', + _str(val), + '', + '' + ] + ) xml += '' return xml - - -"""Data Classes +""" +Data Classes Borrowed from the Azure SDK for Python. """ + class WindowsAzureData(object): - ''' This is the base of data class. - It is only used to check whether it is instance or not. ''' + """ + This is the base of data class. + It is only used to check whether it is instance or not. + """ pass + +class WindowsAzureDataTypedList(WindowsAzureData): + + list_type = None + xml_element_name = None + + def __init__(self): + self.items = _list_of(self.list_type, self.xml_element_name) + + def __iter__(self): + return iter(self.items) + + def __len__(self): + return len(self.items) + + def __getitem__(self, index): + return self.items[index] + + class OSVirtualHardDisk(WindowsAzureData): - def __init__(self, source_image_name=None, media_link=None, - host_caching=None, disk_label=None, disk_name=None): + def __init__(self, + source_image_name=None, + media_link=None, + host_caching=None, + disk_label=None, + disk_name=None): self.source_image_name = source_image_name self.media_link = media_link self.host_caching = host_caching @@ -2095,24 +2445,31 @@ def __init__(self, source_image_name=None, media_link=None, self.disk_name = disk_name self.os = u'' # undocumented, not used when adding a role + class LinuxConfigurationSet(WindowsAzureData): - def __init__(self, host_name=None, user_name=None, user_password=None, + def __init__(self, + host_name=None, + user_name=None, + user_password=None, disable_ssh_password_authentication=None): self.configuration_set_type = u'LinuxProvisioningConfiguration' self.host_name = host_name self.user_name = user_name self.user_password = user_password - self.disable_ssh_password_authentication =\ - disable_ssh_password_authentication + self.disable_ssh_password_authentication = disable_ssh_password_authentication self.ssh = SSH() + class WindowsConfigurationSet(WindowsAzureData): - def __init__(self, computer_name=None, admin_password=None, + def __init__(self, + computer_name=None, + admin_password=None, reset_password_on_first_logon=None, enable_automatic_updates=None, - time_zone=None, admin_user_name=None): + time_zone=None, + admin_user_name=None): self.configuration_set_type = u'WindowsProvisioningConfiguration' self.computer_name = computer_name self.admin_password = admin_password @@ -2123,6 +2480,7 @@ def __init__(self, computer_name=None, admin_password=None, self.domain_join = DomainJoin() self.stored_certificate_settings = StoredCertificateSettings() + class DomainJoin(WindowsAzureData): def __init__(self): @@ -2130,6 +2488,7 @@ def __init__(self): self.join_domain = u'' self.machine_object_ou = u'' + class Credentials(WindowsAzureData): def __init__(self): @@ -2137,23 +2496,10 @@ def __init__(self): self.username = u'' self.password = u'' -class StoredCertificateSettings(WindowsAzureData): - - def __init__(self): - self.stored_certificate_settings = _list_of(CertificateSetting) - - def __iter__(self): - return iter(self.stored_certificate_settings) - - def __len__(self): - return len(self.stored_certificate_settings) - - def __getitem__(self, index): - return self.stored_certificate_settings[index] class CertificateSetting(WindowsAzureData): - ''' + """ Initializes a certificate setting. thumbprint: @@ -2165,32 +2511,25 @@ class CertificateSetting(WindowsAzureData): store_location: Specifies the target certificate store location on the virtual machine. The only supported value is LocalMachine. - ''' + """ def __init__(self, thumbprint=u'', store_name=u'', store_location=u''): self.thumbprint = thumbprint self.store_name = store_name self.store_location = store_location -class SSH(WindowsAzureData): - def __init__(self): - self.public_keys = PublicKeys() - self.key_pairs = KeyPairs() +class StoredCertificateSettings(WindowsAzureDataTypedList): -class PublicKeys(WindowsAzureData): + list_type = CertificateSetting - def __init__(self): - self.public_keys = _list_of(PublicKey) - def __iter__(self): - return iter(self.public_keys) +class SSH(WindowsAzureData): - def __len__(self): - return len(self.public_keys) + def __init__(self): + self.public_keys = PublicKeys() + self.key_pairs = KeyPairs() - def __getitem__(self, index): - return self.public_keys[index] class PublicKey(WindowsAzureData): @@ -2198,26 +2537,24 @@ def __init__(self, fingerprint=u'', path=u''): self.fingerprint = fingerprint self.path = path -class KeyPairs(WindowsAzureData): - def __init__(self): - self.key_pairs = _list_of(KeyPair) - - def __iter__(self): - return iter(self.key_pairs) +class PublicKeys(WindowsAzureDataTypedList): - def __len__(self): - return len(self.key_pairs) + list_type = PublicKey - def __getitem__(self, index): - return self.key_pairs[index] -class KeyPair(WindowsAzureData): +class AzureKeyPair(WindowsAzureData): def __init__(self, fingerprint=u'', path=u''): self.fingerprint = fingerprint self.path = path + +class KeyPairs(WindowsAzureDataTypedList): + + list_type = AzureKeyPair + + class LoadBalancerProbe(WindowsAzureData): def __init__(self): @@ -2225,19 +2562,6 @@ def __init__(self): self.port = u'' self.protocol = u'' -class ConfigurationSets(WindowsAzureData): - - def __init__(self): - self.configuration_sets = _list_of(ConfigurationSet) - - def __iter__(self): - return iter(self.configuration_sets) - - def __len__(self): - return len(self.configuration_sets) - - def __getitem__(self, index): - return self.configuration_sets[index] class ConfigurationSet(WindowsAzureData): @@ -2247,24 +2571,19 @@ def __init__(self): self.input_endpoints = ConfigurationSetInputEndpoints() self.subnet_names = _scalar_list_of(str, 'SubnetName') -class ConfigurationSetInputEndpoints(WindowsAzureData): - def __init__(self): - self.input_endpoints = _list_of( - ConfigurationSetInputEndpoint, 'InputEndpoint') +class ConfigurationSets(WindowsAzureDataTypedList): - def __iter__(self): - return iter(self.input_endpoints) + list_type = ConfigurationSet - def __len__(self): - return len(self.input_endpoints) - - def __getitem__(self, index): - return self.input_endpoints[index] class ConfigurationSetInputEndpoint(WindowsAzureData): - def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'', + def __init__(self, + name=u'', + protocol=u'', + port=u'', + local_port=u'', load_balanced_endpoint_set_name=u'', enable_direct_server_return=False): self.enable_direct_server_return = enable_direct_server_return @@ -2275,19 +2594,12 @@ def __init__(self, name=u'', protocol=u'', port=u'', local_port=u'', self.load_balancer_probe = LoadBalancerProbe() self.protocol = protocol -class Locations(WindowsAzureData): - def __init__(self): - self.locations = _list_of(Location) +class ConfigurationSetInputEndpoints(WindowsAzureDataTypedList): - def __iter__(self): - return iter(self.locations) - - def __len__(self): - return len(self.locations) + list_type = ConfigurationSetInputEndpoint + xml_element_name = 'InputEndpoint' - def __getitem__(self, index): - return self.locations[index] class Location(WindowsAzureData): @@ -2298,37 +2610,22 @@ def __init__(self): self.compute_capabilities = ComputeCapability() +class Locations(WindowsAzureDataTypedList): + + list_type = Location + class ComputeCapability(WindowsAzureData): def __init__(self): self.virtual_machines_role_sizes = _scalar_list_of(str, 'RoleSize') + class VirtualMachinesRoleSizes(WindowsAzureData): def __init__(self): self.role_size = _scalar_list_of(str, 'RoleSize') -class Images(WindowsAzureData): - - def __init__(self): - self.images = _list_of(OSImage) - - def __iter__(self): - return iter(self.images) - - def __len__(self): - return len(self.images) - - def __getitem__(self, index): - return self.images[index] - - -class VMImages(Images): - - def __init__(self): - self.images = _list_of(VMImage) - class OSImage(WindowsAzureData): @@ -2344,6 +2641,12 @@ def __init__(self): self.eula = u'' self.description = u'' + +class Images(WindowsAzureDataTypedList): + + list_type = OSImage + + class VMImage(WindowsAzureData): def __init__(self): @@ -2357,19 +2660,24 @@ def __init__(self): self.location = u'' self.affinity_group = u'' -class HostedServices(WindowsAzureData): + +class VMImages(WindowsAzureDataTypedList): + + list_type = VMImage + + +class VirtualIP(WindowsAzureData): def __init__(self): - self.hosted_services = _list_of(HostedService) + self.address = u'' + self.is_dns_programmed = u'' + self.name = u'' - def __iter__(self): - return iter(self.hosted_services) - def __len__(self): - return len(self.hosted_services) +class VirtualIPs(WindowsAzureDataTypedList): + + list_type = VirtualIP - def __getitem__(self, index): - return self.hosted_services[index] class HostedService(WindowsAzureData): @@ -2379,6 +2687,12 @@ def __init__(self): self.hosted_service_properties = HostedServiceProperties() self.deployments = Deployments() + +class HostedServices(WindowsAzureDataTypedList): + + list_type = HostedService + + class HostedServiceProperties(WindowsAzureData): def __init__(self): @@ -2390,21 +2704,9 @@ def __init__(self): self.date_created = u'' self.date_last_modified = u'' self.extended_properties = _dict_of( - 'ExtendedProperty', 'Name', 'Value') - -class Deployments(WindowsAzureData): - - def __init__(self): - self.deployments = _list_of(Deployment) - - def __iter__(self): - return iter(self.deployments) - - def __len__(self): - return len(self.deployments) + 'ExtendedProperty', 'Name', 'Value' + ) - def __getitem__(self, index): - return self.deployments[index] class Deployment(WindowsAzureData): @@ -2428,7 +2730,15 @@ def __init__(self): self.created_time = u'' self.last_modified_time = u'' self.extended_properties = _dict_of( - 'ExtendedProperty', 'Name', 'Value') + 'ExtendedProperty', 'Name', 'Value' + ) + self.virtual_ips = VirtualIPs() + + +class Deployments(WindowsAzureDataTypedList): + + list_type = Deployment + class UpgradeStatus(WindowsAzureData): @@ -2437,19 +2747,6 @@ def __init__(self): self.current_upgrade_domain_state = u'' self.current_upgrade_domain = u'' -class RoleInstanceList(WindowsAzureData): - - def __init__(self): - self.role_instances = _list_of(RoleInstance) - - def __iter__(self): - return iter(self.role_instances) - - def __len__(self): - return len(self.role_instances) - - def __getitem__(self, index): - return self.role_instances[index] class RoleInstance(WindowsAzureData): @@ -2468,19 +2765,11 @@ def __init__(self): self.fqdn = u'' self.host_name = u'' -class InstanceEndpoints(WindowsAzureData): - def __init__(self): - self.instance_endpoints = _list_of(InstanceEndpoint) +class RoleInstanceList(WindowsAzureDataTypedList): - def __iter__(self): - return iter(self.instance_endpoints) + list_type = RoleInstance - def __len__(self): - return len(self.instance_endpoints) - - def __getitem__(self, index): - return self.instance_endpoints[index] class InstanceEndpoint(WindowsAzureData): @@ -2491,19 +2780,11 @@ def __init__(self): self.local_port = u'' self.protocol = u'' -class InputEndpoints(WindowsAzureData): - def __init__(self): - self.input_endpoints = _list_of(InputEndpoint) - - def __iter__(self): - return iter(self.input_endpoints) +class InstanceEndpoints(WindowsAzureDataTypedList): - def __len__(self): - return len(self.input_endpoints) + list_type = InstanceEndpoint - def __getitem__(self, index): - return self.input_endpoints[index] class InputEndpoint(WindowsAzureData): @@ -2512,19 +2793,11 @@ def __init__(self): self.vip = u'' self.port = u'' -class RoleList(WindowsAzureData): - def __init__(self): - self.roles = _list_of(Role) +class InputEndpoints(WindowsAzureDataTypedList): - def __iter__(self): - return iter(self.roles) - - def __len__(self): - return len(self.roles) + list_type = InputEndpoint - def __getitem__(self, index): - return self.roles[index] class Role(WindowsAzureData): @@ -2532,6 +2805,12 @@ def __init__(self): self.role_name = u'' self.os_version = u'' + +class RoleList(WindowsAzureDataTypedList): + + list_type = Role + + class PersistentVMDowntimeInfo(WindowsAzureData): def __init__(self): @@ -2539,24 +2818,12 @@ def __init__(self): self.end_time = u'' self.status = u'' + class AsynchronousOperationResult(WindowsAzureData): def __init__(self, request_id=None): self.request_id = request_id -class Disks(WindowsAzureData): - - def __init__(self): - self.disks = _list_of(Disk) - - def __iter__(self): - return iter(self.disks) - - def __len__(self): - return len(self.disks) - - def __getitem__(self, index): - return self.disks[index] class Disk(WindowsAzureData): @@ -2573,6 +2840,12 @@ def __init__(self): self.os = u'' self.source_image_name = u'' + +class Disks(WindowsAzureDataTypedList): + + list_type = Disk + + class AttachedTo(WindowsAzureData): def __init__(self): @@ -2580,12 +2853,14 @@ def __init__(self): self.deployment_name = u'' self.role_name = u'' + class OperationError(WindowsAzureData): def __init__(self): self.code = u'' self.message = u'' + class Operation(WindowsAzureData): def __init__(self): @@ -2594,6 +2869,7 @@ def __init__(self): self.http_status_code = u'' self.error = OperationError() + class OperatingSystem(WindowsAzureData): def __init__(self): @@ -2604,6 +2880,7 @@ def __init__(self): self.family = 0 self.family_label = _Base64String() + class OSDiskConfiguration(WindowsAzureData): def __init__(self): @@ -2614,19 +2891,11 @@ def __init__(self): self.media_link = u'' self.logical_disk_size_in_gb = 0 -class OperatingSystems(WindowsAzureData): - def __init__(self): - self.operating_systems = _list_of(OperatingSystem) +class OperatingSystems(WindowsAzureDataTypedList): - def __iter__(self): - return iter(self.operating_systems) + list_type = OperatingSystem - def __len__(self): - return len(self.operating_systems) - - def __getitem__(self, index): - return self.operating_systems[index] class OperatingSystemFamily(WindowsAzureData): @@ -2635,19 +2904,11 @@ def __init__(self): self.label = _Base64String() self.operating_systems = OperatingSystems() -class OperatingSystemFamilies(WindowsAzureData): - def __init__(self): - self.operating_system_families = _list_of(OperatingSystemFamily) +class OperatingSystemFamilies(WindowsAzureDataTypedList): - def __iter__(self): - return iter(self.operating_system_families) + list_type = OperatingSystemFamily - def __len__(self): - return len(self.operating_system_families) - - def __getitem__(self, index): - return self.operating_system_families[index] class Subscription(WindowsAzureData): @@ -2667,24 +2928,12 @@ def __init__(self): self.max_local_network_sites = 0 self.max_dns_servers = 0 + class AvailabilityResponse(WindowsAzureData): def __init__(self): self.result = False -class SubscriptionCertificates(WindowsAzureData): - - def __init__(self): - self.subscription_certificates = _list_of(SubscriptionCertificate) - - def __iter__(self): - return iter(self.subscription_certificates) - - def __len__(self): - return len(self.subscription_certificates) - - def __getitem__(self, index): - return self.subscription_certificates[index] class SubscriptionCertificate(WindowsAzureData): @@ -2694,6 +2943,12 @@ def __init__(self): self.subscription_certificate_data = u'' self.created = u'' + +class SubscriptionCertificates(WindowsAzureDataTypedList): + + list_type = SubscriptionCertificate + + class AzureHTTPRequest(object): def __init__(self): self.host = '' @@ -2704,6 +2959,7 @@ def __init__(self): self.body = '' self.protocol_override = None + class AzureHTTPResponse(object): def __init__(self, status, message, headers, body): self.status = status @@ -2711,16 +2967,21 @@ def __init__(self, status, message, headers, body): self.headers = headers self.body = body -"""Helper Functions """ +Helper Functions +""" + class _Base64String(str): pass + class _list_of(list): - """a list which carries with it the type that's expected to go in it. - Used for deserializaion and construction of the lists""" + """ + A list which carries with it the type that's expected to go in it. + Used for deserializaion and construction of the lists + """ def __init__(self, list_type, xml_element_name=None): self.list_type = list_type @@ -2730,41 +2991,63 @@ def __init__(self, list_type, xml_element_name=None): self.xml_element_name = xml_element_name super(_list_of, self).__init__() + class _scalar_list_of(list): - """a list of scalar types which carries with it the type that's + """ + A list of scalar types which carries with it the type that's expected to go in it along with its xml element name. - Used for deserializaion and construction of the lists""" + Used for deserializaion and construction of the lists + """ def __init__(self, list_type, xml_element_name): self.list_type = list_type self.xml_element_name = xml_element_name super(_scalar_list_of, self).__init__() + class _dict_of(dict): - """a dict which carries with it the xml element names for key,val. - Used for deserializaion and construction of the lists""" + """ + A dict which carries with it the xml element names for key,val. + Used for deserializaion and construction of the lists + """ - def __init__(self, pair_xml_element_name, key_xml_element_name, + def __init__(self, + pair_xml_element_name, + key_xml_element_name, value_xml_element_name): self.pair_xml_element_name = pair_xml_element_name self.key_xml_element_name = key_xml_element_name self.value_xml_element_name = value_xml_element_name super(_dict_of, self).__init__() + class AzureNodeLocation(NodeLocation): # we can also have something in here for available services which is an extra to the API with Azure - def __init__(self, id, name, country, driver, available_services, + def __init__(self, + id, + name, + country, + driver, + available_services, virtual_machine_role_sizes): super(AzureNodeLocation, self).__init__(id, name, country, driver) self.available_services = available_services self.virtual_machine_role_sizes = virtual_machine_role_sizes def __repr__(self): - return (('') - % (self.id, self.name, self.country, - self.driver.name, ','.join(self.available_services), - ','.join(self.virtual_machine_role_sizes))) \ No newline at end of file + return ( + ( + '' + ) % ( + self.id, + self.name, + self.country, + self.driver.name, + ','.join(self.available_services), + ','.join(self.virtual_machine_role_sizes) + ) + ) diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 7ac96222ec..2f33570d83 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -5,6 +5,7 @@ __author__ = 'david' +import os import sys import httplib @@ -16,64 +17,87 @@ from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver -class AzureNodeDriverTests(unittest.TestCase) : + +class AzureNodeDriverTests(unittest.TestCase): #required otherwise we get client side SSL verification libcloud.security.VERIFY_SSL_CERT = False SUBSCRIPTION_ID = '3761b98b-673d-526c-8d55-fee918758e6e' - KEY_FILE = 'fixtures/azure/libcloud.pem' #empty file is fine + KEY_FILE = os.path.join(os.path.dirname(__file__), 'fixtures/azure/libcloud.pem') # empty file is fine def setUp(self): Azure = get_driver(Provider.AZURE) Azure.connectionCls.conn_classes = (None, AzureMockHttp) - self.driver = Azure(self.SUBSCRIPTION_ID, self.KEY_FILE ) + self.driver = Azure(self.SUBSCRIPTION_ID, self.KEY_FILE) def test_locations_returned_successfully(self): locations = self.driver.list_locations() self.assertEqual(len(locations), 7) - locationNamesResult = list(a.name for a in locations) - locationNamesExpected = ['East Asia','Southeast Asia','North Europe', - 'West Europe','East US','North Central US', - 'West US'] - - self.assertListEqual(locationNamesResult, locationNamesExpected) - - matchedLocation = next(location for location in locations - if location.name == 'Southeast Asia') - servicesResult = matchedLocation.available_services - servicesExpected = ['Compute','Storage','PersistentVMRole','HighMemory'] - self.assertListEqual(servicesResult, servicesExpected) - - vmRoleSizesResult = matchedLocation.virtual_machine_role_sizes - - vmRoleSizesExpected = ['A5','A6','A7','Basic_A0','Basic_A1','Basic_A2', - 'Basic_A3','Basic_A4','ExtraLarge','ExtraSmall', - 'Large','Medium','Small'] - self.assertListEqual(vmRoleSizesResult, vmRoleSizesExpected) + location_names_result = list(a.name for a in locations) + location_names_expected = [ + 'East Asia', + 'Southeast Asia', + 'North Europe', + 'West Europe', + 'East US', + 'North Central US', + 'West US' + ] + + self.assertListEqual(location_names_result, location_names_expected) + + matched_location = next( + location for location in locations + if location.name == 'Southeast Asia' + ) + services_result = matched_location.available_services + services_expected = ['Compute','Storage','PersistentVMRole','HighMemory'] + self.assertListEqual(services_result, services_expected) + + vm_role_sizes_result = matched_location.virtual_machine_role_sizes + + vm_role_sizes_expected = [ + 'A5', + 'A6', + 'A7', + 'Basic_A0', + 'Basic_A1', + 'Basic_A2', + 'Basic_A3', + 'Basic_A4', + 'ExtraLarge', + 'ExtraSmall', + 'Large', + 'Medium', + 'Small' + ] + self.assertListEqual(vm_role_sizes_result, vm_role_sizes_expected) def test_images_returned_successfully(self): images = self.driver.list_images() - self.assertEquals(len(images), 215 ) + # There should be 215 standard OSImages and one VMImage returned + self.assertEquals(len(images), 216) def test_images_returned_successfully_filter_by_location(self): images = self.driver.list_images(location="West US") - self.assertEquals(len(images), 207 ) + self.assertEquals(len(images), 207) def test_list_nodes_returned_successfully(self): vmimages = self.driver.list_nodes( - ex_cloud_service_name="dcoddkinztest01") + ex_cloud_service_name="dcoddkinztest01" + ) self.assertEqual(len(vmimages), 2) img0 = vmimages[0] - self.assertEquals(img0.id,"dc03") - self.assertEquals(img0.name,u"dc03") - self.assertListEqual(img0.public_ips,["191.235.135.62"]) - self.assertListEqual(img0.private_ips,["100.92.66.69"]) - self.assertEquals(img0.size,None) - self.assertEquals(img0.state,0) - self.assertTrue(isinstance(img0.extra,dict)) + self.assertEquals(img0.id, "dc03") + self.assertEquals(img0.name, u"dc03") + self.assertListEqual(img0.public_ips, ["191.235.135.62"]) + self.assertListEqual(img0.private_ips, ["100.92.66.69"]) + self.assertEquals(img0.size, None) + self.assertEquals(img0.state, 0) + self.assertTrue(isinstance(img0.extra, dict)) extra = img0.extra self.assertEquals(extra["instance_size"], u'Small') self.assertEquals(extra["power_state"], u'Started') @@ -81,7 +105,8 @@ def test_list_nodes_returned_successfully(self): def test_list_nodes_returned_no_deployments(self): vmimages = self.driver.list_nodes( - ex_cloud_service_name="dcoddkinztest03") + ex_cloud_service_name="dcoddkinztest03" + ) self.assertIsNone(vmimages) def test_list_nodes_returned_no_cloud_service(self): @@ -92,8 +117,10 @@ def test_restart_node_success(self): node = type('Node', (object,), dict(id="dc03")) result = self.driver.reboot_node( - node=node, ex_cloud_service_name="dcoddkinztest01", - ex_deployment_slot="Production") + node=node, + ex_cloud_service_name="dcoddkinztest01", + ex_deployment_slot="Production" + ) self.assertTrue(result) @@ -103,45 +130,54 @@ def test_restart_node_fail_no_deployment(self): node = type('Node', (object,), dict(id="dc03")) with self.assertRaises(LibcloudError): - self.driver.reboot_node(node=node, - ex_cloud_service_name="dcoddkinztest02", - ex_deployment_slot="Production") + self.driver.reboot_node( + node=node, + ex_cloud_service_name="dcoddkinztest02", + ex_deployment_slot="Production" + ) def test_restart_node_fail_no_cloud_service(self): node = type('Node', (object,), dict(id="dc03")) with self.assertRaises(LibcloudError): - self.driver.reboot_node(node=node, - ex_cloud_service_name="dcoddkinztest03", - ex_deployment_slot="Production") + self.driver.reboot_node( + node=node, + ex_cloud_service_name="dcoddkinztest03", + ex_deployment_slot="Production" + ) def test_restart_node_fail_node_not_found(self): node = type('Node', (object,), dict(id="dc13")) - result = self.driver.reboot_node( - node=node, ex_cloud_service_name="dcoddkinztest01", - ex_deployment_slot="Production") + node=node, + ex_cloud_service_name="dcoddkinztest01", + ex_deployment_slot="Production" + ) self.assertFalse(result) def test_destroy_node_success_single_node_in_cloud_service(self): node = type('Node', (object,), dict(id="oddkinz1")) - result = self.driver.destroy_node(node=node, - ex_cloud_service_name="oddkinz1", - ex_deployment_slot="Production") + result = self.driver.destroy_node( + node=node, + ex_cloud_service_name="oddkinz1", + ex_deployment_slot="Production" + ) self.assertTrue(result) def test_destroy_node_success_multiple_nodes_in_cloud_service(self): node = type('Node', (object,), dict(id="oddkinz1")) - result = self.driver.destroy_node(node=node, - ex_cloud_service_name="oddkinz2", - ex_deployment_slot="Production") + result = self.driver.destroy_node( + node=node, + ex_cloud_service_name="oddkinz2", + ex_deployment_slot="Production" + ) self.assertTrue(result) def test_destroy_node_fail_node_does_not_exist(self): @@ -149,9 +185,11 @@ def test_destroy_node_fail_node_does_not_exist(self): node = type('Node', (object,), dict(id="oddkinz2")) with self.assertRaises(LibcloudError): - self.driver.destroy_node(node=node, - ex_cloud_service_name="oddkinz2", - ex_deployment_slot="Production") + self.driver.destroy_node( + node=node, + ex_cloud_service_name="oddkinz2", + ex_deployment_slot="Production" + ) def test_destroy_node_success_cloud_service_not_found(self): @@ -159,9 +197,11 @@ def test_destroy_node_success_cloud_service_not_found(self): node["name"]="cloudredis" with self.assertRaises(LibcloudError): - self.driver.destroy_node(node=node, - ex_cloud_service_name="oddkinz5", - ex_deployment_slot="Production" ) + self.driver.destroy_node( + node=node, + ex_cloud_service_name="oddkinz5", + ex_deployment_slot="Production" + ) def test_create_cloud_service(self): result = self.driver.create_cloud_service("testdc123", "North Europe") @@ -170,62 +210,96 @@ def test_create_cloud_service(self): def test_create_cloud_service_service_exists(self): with self.assertRaises(LibcloudError): - self.driver.create_cloud_service(ex_cloud_service_name="testdc1234", - location="North Europe") + self.driver.create_cloud_service( + ex_cloud_service_name="testdc1234", + location="North Europe" + ) def test_destroy_cloud_service(self): result = self.driver.destroy_cloud_service( - ex_cloud_service_name="testdc123") + ex_cloud_service_name="testdc123" + ) self.assertTrue(result) def test_destroy_cloud_service_service_does_not_exist(self): with self.assertRaises(LibcloudError): self.driver.destroy_cloud_service( - ex_cloud_service_name="testdc1234") + ex_cloud_service_name="testdc1234" + ) def test_create_node_and_deployment_one_node(self): - kwargs = {} - - kwargs["ex_storage_service_name"]="mtlytics" - kwargs["ex_deployment_name"]="dcoddkinztest02" - kwargs["ex_deployment_slot"]="Production" - kwargs["ex_admin_user_id"]="azurecoder" + kwargs = { + "ex_storage_service_name": "mtlytics", + "ex_deployment_name": "dcoddkinztest02", + "ex_deployment_slot": "Production", + "ex_admin_user_id": "azurecoder" + } auth = NodeAuthPassword("Pa55w0rd", False) - kwargs["auth"]= auth + kwargs["auth"] = auth + kwargs["name"] = "dcoddkinztest03" - kwargs["size"] = NodeSize(id="ExtraSmall") + kwargs["size"] = NodeSize( + id="ExtraSmall", + name="ExtraSmall", + ram=1024, + disk="30gb", + bandwidth=0, + price=0, + driver=self.driver + ) kwargs["image"] = NodeImage( - id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415", + name="FakeImage", + driver=self.driver, + extra={ + 'vm_image': False + } ) - kwargs["name"] = "dcoddkinztest03" result = self.driver.create_node( - ex_cloud_service_name="testdcabc", **kwargs) + ex_cloud_service_name="testdcabc", + **kwargs + ) self.assertIsNotNone(result) def test_create_node_and_deployment_second_node(self): - kwargs = {} - - kwargs["ex_storage_service_name"]="mtlytics" - kwargs["ex_deployment_name"]="dcoddkinztest02" - kwargs["ex_deployment_slot"]="Production" - kwargs["ex_admin_user_id"]="azurecoder" + kwargs = { + "ex_storage_service_name": "mtlytics", + "ex_deployment_name": "dcoddkinztest02", + "ex_deployment_slot": "Production", + "ex_admin_user_id": "azurecoder" + } auth = NodeAuthPassword("Pa55w0rd", False) - kwargs["auth"]= auth - - kwargs["size"] = NodeSize(id="ExtraSmall") + kwargs["auth"] = auth + + kwargs["size"] = NodeSize( + id="ExtraSmall", + name="ExtraSmall", + ram=1024, + disk="30gb", + bandwidth=0, + price=0, + driver=self.driver + ) kwargs["image"] = NodeImage( - id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415", + name="FakeImage", + driver=self.driver, + extra={ + 'vm_image': False + } ) kwargs["name"] = "dcoddkinztest03" node = type('Node', (object,), dict(id="dc14")) result = self.driver.create_node( - ex_cloud_service_name="testdcabc2", **kwargs) + ex_cloud_service_name="testdcabc2", + **kwargs + ) self.assertIsNotNone(result) def test_create_node_and_deployment_second_node_307_response(self): @@ -239,50 +313,66 @@ def test_create_node_and_deployment_second_node_307_response(self): auth = NodeAuthPassword("Pa55w0rd", False) kwargs["auth"]= auth - kwargs["size"] = NodeSize(id="ExtraSmall") + kwargs["size"] = NodeSize( + id="ExtraSmall", + name="ExtraSmall", + ram=1024, + disk="30gb", + bandwidth=0, + price=0, + driver=self.driver + ) kwargs["image"] = NodeImage( - id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415" + id="5112500ae3b842c8b9c604889f8753c3__OpenLogic-CentOS-65-20140415", + name="FakeImage", + driver=self.driver, + extra={ + 'vm_image': False + } ) kwargs["name"] = "dcoddkinztest04" with self.assertRaises(LibcloudError): - self.driver.create_node(ex_cloud_service_name="testdcabc3", - **kwargs) + self.driver.create_node( + ex_cloud_service_name="testdcabc3", + **kwargs + ) + class AzureMockHttp(MockHttp): fixtures = ComputeFileFixtures('azure') def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production(self, method, url, body, headers): - if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml') + if method == "GET": + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deploymentslots_Production.xml') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz1_deployments_dc01(self, method, url, body, headers): - return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production(self, method, url, body, headers): - if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml') + if method == "GET": + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deploymentslots_Production.xml') - return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz1(self, method, url, body, headers): - return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) + return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz2_deployments_dc03_roles_oddkinz2(self, method, url, body, headers): - return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) + return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_oddkinz5_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) @@ -292,49 +382,55 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest0 def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest02_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03_deploymentslots_Production.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01_deployments_dc03_roleinstances_dc13.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest01.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest03.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_dcoddkinztest04.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_images(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_images.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + + def _3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages(self, method, url, body, headers): + if method == "GET": + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_locations(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_locations.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) @@ -350,40 +446,40 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc123(self def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdc1234.xml') return (httplib.NOT_FOUND, body, headers, httplib.responses[httplib.NOT_FOUND]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): headers["x-ms-request-id"]="acc33f6756cda6fd96826394fce4c9f3" if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml') return (httplib.ACCEPTED, body, headers, httplib.responses[httplib.ACCEPTED]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) @@ -393,19 +489,19 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_dep def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_deploymentslots_Production(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deploymentslots_Production.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_deployments(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2_deployments.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) @@ -416,8 +512,9 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_dep def _3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3(self, method, url, body, headers): if method == "GET": - body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml') + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3.xml') return (httplib.OK, body, headers, httplib.responses[httplib.OK]) + if __name__ == '__main__': sys.exit(unittest.main()) From b7eb06f53f559e6fd987c36eb596579586425a5f Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Wed, 15 Oct 2014 13:21:48 -0400 Subject: [PATCH 308/315] forgot to add vm image fixture --- ...6c_8d55_fee918758e6e_services_vmimages.xml | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml diff --git a/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml new file mode 100644 index 0000000000..6062ffe7a4 --- /dev/null +++ b/libcloud/test/compute/fixtures/azure/_3761b98b_673d_526c_8d55_fee918758e6e_services_vmimages.xml @@ -0,0 +1,23 @@ + + + test-vm-20140926-782617 + + User + + test-vm-20140926-782617-os-2014-09-26 + ReadWrite + Generalized + Linux + https://portal.blob.core.windows.net/vhds/test-vm-20140926-782617-os-2014-09-26.vhd + 30 + + + test-service + test-deployment + test-role + Central US + 2014-09-26T17:39:34.5199416Z + 2014-09-26T17:39:34.5199416Z + false + + \ No newline at end of file From c9ce29eded3eb762553d2ea68cef1574a09c717f Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Wed, 29 Oct 2014 10:50:30 -0400 Subject: [PATCH 309/315] Fixed an issue with trying to parse VirtualIPs for all nodes --- libcloud/compute/drivers/azure.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 501b083c24..f5e0524015 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -286,7 +286,10 @@ def list_nodes(self, ex_cloud_service_name=None): data = self._parse_response(response, HostedService) - vips = [vip.address for vip in data.deployments[0].virtual_ips] + vips = None + + if data.deployments[0].virtual_ips is not None: + vips = [vip.address for vip in data.deployments[0].virtual_ips] try: return [ @@ -735,22 +738,17 @@ def destroy_node(self, _deployment_name, node.id ) - path += '?comp=media' # forces deletion of attached disks - - self._perform_delete(path) - - return True else: path = self._get_deployment_path_using_name( ex_cloud_service_name, _deployment_name ) - path += '?comp=media' + path += '?comp=media' - self._perform_delete(path) + self._perform_delete(path) - return True + return True def create_cloud_service(self, ex_cloud_service_name=None, From 656d43e28d5697f255835029f8ebe3838ea0f5d4 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Fri, 21 Nov 2014 18:23:01 -0500 Subject: [PATCH 310/315] Fixed an error in listing nodes created from user images. Added support for handling 307 Temp Redirects, rather than just failing --- libcloud/common/azure.py | 12 ++++++++++++ libcloud/compute/drivers/azure.py | 18 +++++++++++++----- libcloud/test/compute/test_azure.py | 21 +++++++++++++++++++-- 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/libcloud/common/azure.py b/libcloud/common/azure.py index e76097c614..727046eca5 100644 --- a/libcloud/common/azure.py +++ b/libcloud/common/azure.py @@ -41,6 +41,12 @@ AZURE_TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT' +class AzureRedirectException(Exception): + + def __init__(self, response): + self.location = response.headers['location'] + + class AzureResponse(XmlResponse): valid_response_codes = [ @@ -84,6 +90,12 @@ def parse_error(self, msg=None): driver=self ) + def parse_body(self): + if int(self.status) == httplib.TEMPORARY_REDIRECT and self.connection.driver.follow_redirects: + raise AzureRedirectException(self) + else: + return super(AzureResponse, self).parse_body() + class AzureRawResponse(RawResponse): pass diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index a25ee701ef..25e8e54cb1 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -27,7 +27,7 @@ from libcloud.utils.py3 import urlquote as url_quote from libcloud.utils.py3 import urlunquote as url_unquote -from libcloud.common.azure import AzureServiceManagementConnection +from libcloud.common.azure import AzureServiceManagementConnection, AzureRedirectException from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume @@ -210,6 +210,7 @@ def __init__(self, subscription_id=None, key_file=None, **kwargs): """ self.subscription_id = subscription_id self.key_file = key_file + self.follow_redirects = kwargs.get('follow_redirects', True) super(AzureNodeDriver, self).__init__( self.subscription_id, self.key_file, @@ -293,7 +294,7 @@ def list_nodes(self, ex_cloud_service_name=None): vips = None - if data.deployments[0].virtual_ips is not None: + if len(data.deployments) > 0 and data.deployments[0].virtual_ips is not None: vips = [vip.address for vip in data.deployments[0].virtual_ips] try: @@ -1233,11 +1234,18 @@ def _perform_request(self, request): try: return self.connection.request( action="https://%s%s" % (request.host, request.path), - data=request.body, headers=request.headers, + data=request.body, + headers=request.headers, method=request.method ) + except AzureRedirectException as e: + from libcloud.utils.py3 import urlparse + parsed_url = urlparse.urlparse(e.location) + request.host = parsed_url.netloc + return self._perform_request(request) except Exception, e: - print e.message + import traceback + print "Exception performing request: {}".format(traceback.format_exc()) def _update_request_uri_query(self, request): """ @@ -2404,13 +2412,13 @@ def extended_properties_dict_to_xml_fragment(extended_properties): return xml - """ Data Classes Borrowed from the Azure SDK for Python. """ + class WindowsAzureData(object): """ diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 2f33570d83..0538604aa9 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -1,6 +1,7 @@ import libcloud from libcloud.common.types import LibcloudError from libcloud.compute.base import NodeAuthPassword, NodeImage, NodeSize +from libcloud.compute.drivers.azure import azure_service_management_host from libcloud.common.azure import AzureServiceManagementConnection __author__ = 'david' @@ -506,8 +507,24 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_dep return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc3_deployments_dcoddkinztest02_roles(self, method, url, body, headers): - - return (httplib.TEMPORARY_REDIRECT, None, headers, httplib.responses[httplib.TEMPORARY_REDIRECT]) + redirect_host = "ussouth.management.core.windows.net" + + if not getattr(AzureMockHttp, "in_redirect", False): + setattr(AzureMockHttp, "in_redirect", True) + headers["Location"] = url.replace(azure_service_management_host, redirect_host) + return (httplib.TEMPORARY_REDIRECT, None, headers, httplib.responses[httplib.TEMPORARY_REDIRECT]) + else: + delattr(AzureMockHttp, "in_redirect") + if redirect_host not in url: + if azure_service_management_host in url: + return (httplib.TEMPORARY_REDIRECT, None, headers, httplib.responses[httplib.TEMPORARY_REDIRECT]) + else: + return (httplib.REQUEST_TIMEOUT, None, None, httplib.responses[httplib.REQUEST_TIMEOUT]) + + if method == "GET": + body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc2.xml') + + return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_operations_acc33f6756cda6fd96826394fce4c9f3(self, method, url, body, headers): From 086ad185bbb508bd9ce8df60c2b2c66b021ce716 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Sun, 23 Nov 2014 10:18:50 -0500 Subject: [PATCH 311/315] Refactored to make some things more DRY. Formatted for PEP8 and libcloud style guides. Updated tests to use libcloud Node objects --- libcloud/compute/drivers/azure.py | 631 +++++++++++++++++----------- libcloud/test/compute/test_azure.py | 100 ++++- 2 files changed, 466 insertions(+), 265 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 25e8e54cb1..98048148f2 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -27,7 +27,8 @@ from libcloud.utils.py3 import urlquote as url_quote from libcloud.utils.py3 import urlunquote as url_unquote -from libcloud.common.azure import AzureServiceManagementConnection, AzureRedirectException +from libcloud.common.azure import (AzureServiceManagementConnection, + AzureRedirectException) from libcloud.compute.providers import Provider from libcloud.compute.base import Node, NodeDriver, NodeLocation, NodeSize from libcloud.compute.base import NodeImage, StorageVolume @@ -64,7 +65,7 @@ def _str(value): X_MS_VERSION = '2013-08-01' """ -Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them. +Sizes must be hardcoded because Microsoft doesn't provide an API to fetch them From http://msdn.microsoft.com/en-us/library/windowsazure/dn197896.aspx """ AZURE_COMPUTE_INSTANCE_TYPES = { @@ -181,7 +182,10 @@ class AzureNodeDriver(NodeDriver): _instance_types = AZURE_COMPUTE_INSTANCE_TYPES _blob_url = ".blob.core.windows.net" features = {'create_node': ['password']} - service_location = collections.namedtuple('service_location', ['is_affinity_group', 'service_location']) + service_location = collections.namedtuple( + 'service_location', + ['is_affinity_group', 'service_location'] + ) NODE_STATE_MAP = { 'RoleStateUnknown': NodeState.UNKNOWN, @@ -240,13 +244,20 @@ def list_images(self, location=None): """ data = self._perform_get(self._get_image_path(), Images) - custom_image_data = self._perform_get(self._get_vmimage_path(), VMImages) + custom_image_data = self._perform_get( + self._get_vmimage_path(), + VMImages + ) images = [self._to_image(i) for i in data] images.extend(self._vm_to_image(j) for j in custom_image_data) if location is not None: - images = [image for image in images if location in image.extra["location"]] + images = [ + image + for image in images + if location in image.extra["location"] + ] return images @@ -256,7 +267,10 @@ def list_locations(self): :rtype: ``list`` of :class:`NodeLocation` """ - data = self._perform_get('/' + self.subscription_id + '/locations', Locations) + data = self._perform_get( + '/' + self.subscription_id + '/locations', + Locations + ) return [self._to_location(l) for l in data] @@ -283,18 +297,14 @@ def list_nodes(self, ex_cloud_service_name=None): None ) - if response.status != 200: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self - ) + self.raise_for_response(response, 200) data = self._parse_response(response, HostedService) vips = None - if len(data.deployments) > 0 and data.deployments[0].virtual_ips is not None: + if (len(data.deployments) > 0 and + data.deployments[0].virtual_ips is not None): vips = [vip.address for vip in data.deployments[0].virtual_ips] try: @@ -326,10 +336,14 @@ def reboot_node(self, :rtype: ``bool`` """ - if not node: + if node is None: raise ValueError("node is required.") - ex_cloud_service_name = ex_cloud_service_name or (node.extra and node.extra.get('ex_cloud_service_name')) + if ex_cloud_service_name is None: + if node.extra is not None: + ex_cloud_service_name = node.extra.get( + 'ex_cloud_service_name' + ) if not ex_cloud_service_name: raise ValueError("ex_cloud_service_name is required.") @@ -351,12 +365,7 @@ def reboot_node(self, '' ) - if response.status != 202: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self - ) + self.raise_for_response(response, 202) if self._parse_response_for_async_op(response): return True @@ -380,7 +389,17 @@ def list_volumes(self, node=None): volumes = [self._to_volume(volume=v, node=node) for v in data] return volumes - def create_node(self, ex_cloud_service_name=None, **kwargs): + def create_node(self, + name, + image, + size, + ex_cloud_service_name, + ex_storage_service_name=None, + ex_new_deployment=False, + ex_deployment_slot="Production", + ex_admin_user_id="azureuser", + auth=None, + **kwargs): """Create Azure Virtual Machine Reference: http://bit.ly/1fIsCb7 @@ -437,52 +456,31 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): """ - auth = self._get_and_check_auth(kwargs["auth"]) + auth = self._get_and_check_auth(auth) password = auth.password - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") - - ex_new_deployment = kwargs.get("ex_new_deployment", False) - - if "ex_deployment_slot" in kwargs: - ex_deployment_slot = kwargs['ex_deployment_slot'] - else: - # We assume production if this is not provided. - ex_deployment_slot = "Production" - - if "ex_admin_user_id" in kwargs: - ex_admin_user_id = kwargs['ex_admin_user_id'] - else: - # This mimics the Azure UI behavior. - ex_admin_user_id = "azureuser" - - if "size" not in kwargs: - raise ValueError("size is required. ") - - if not isinstance(kwargs['size'], NodeSize): + if not isinstance(size, NodeSize): raise ValueError('Size must be an instance of NodeSize') - if "image" not in kwargs: - raise ValueError("image is required.") - - if "name" not in kwargs: - raise ValueError("name is required.") - - name = kwargs['name'] - size = kwargs['size'] - image = kwargs['image'] - if not isinstance(image, NodeImage): - raise ValueError("Image must be an instance of NodeImage, produced by list_images()") + raise ValueError( + "Image must be an instance of NodeImage, " + "produced by list_images()" + ) + + node_list = self.list_nodes( + ex_cloud_service_name=ex_cloud_service_name + ) - node_list = self.list_nodes(ex_cloud_service_name=ex_cloud_service_name) network_config = ConfigurationSet() network_config.configuration_set_type = 'NetworkConfiguration' # We do this because we need to pass a Configuration to the # method. This will be either Linux or Windows. - windows_server_regex = re.compile(r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk') + windows_server_regex = re.compile( + r'Win|SQL|SharePoint|Visual|Dynamics|DynGP|BizTalk' + ) + if windows_server_regex.search(image.id, re.I): machine_config = WindowsConfigurationSet( computer_name=name, @@ -502,7 +500,10 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): ) for instances in endpoints.role_instance_list: - ports = [ep.public_port for ep in instances.instance_endpoints] + ports = [ + ep.public_port + for ep in instances.instance_endpoints + ] while port in ports: port = random.randint(41952, 65535) @@ -555,9 +556,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): service_name=ex_cloud_service_name ) - if "ex_storage_service_name" in kwargs: - ex_storage_service_name = kwargs['ex_storage_service_name'] - else: + if ex_storage_service_name is None: ex_storage_service_name = ex_cloud_service_name ex_storage_service_name = re.sub( ur'[\W_]+', @@ -596,7 +595,9 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): vm_image_id = image.id #network_config = None else: - blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + blob_url = "http://{0}.blob.core.windows.net".format( + ex_storage_service_name + ) # Azure's pattern in the UI. disk_name = "{0}-{1}-{2}.vhd".format( @@ -605,7 +606,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): time.strftime("%Y-%m-%d") ) - media_link = blob_url + "/vhds/" + disk_name + media_link = "{0}/vhds/{1}".format(blob_url, disk_name) disk_config = OSVirtualHardDisk(image.id, media_link) @@ -627,14 +628,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): vm_image_id ) ) - - if response.status != 202: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self - ) - + self.raise_for_response(response, 202) self._ex_complete_async_azure_operation(response) else: _deployment_name = self._get_deployment( @@ -649,13 +643,15 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): vm_image_id = image.id #network_config = None else: - blob_url = "http://" + ex_storage_service_name + ".blob.core.windows.net" + blob_url = "http://{0}.blob.core.windows.net".format( + ex_storage_service_name + ) disk_name = "{0}-{1}-{2}.vhd".format( ex_cloud_service_name, name, time.strftime("%Y-%m-%d") ) - media_link = blob_url + "/vhds/" + disk_name + media_link = "{0}/vhds/{1}".format(blob_url, disk_name) disk_config = OSVirtualHardDisk(image.id, media_link) response = self._perform_post( @@ -676,12 +672,7 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): ) ) - if response.status != 202: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self.connection.driver - ) + self.raise_for_response(response, 202) self._ex_complete_async_azure_operation(response) @@ -698,9 +689,9 @@ def create_node(self, ex_cloud_service_name=None, **kwargs): ) def destroy_node(self, - node=None, + node, ex_cloud_service_name=None, - ex_deployment_slot=None): + ex_deployment_slot="Production"): """Remove Azure Virtual Machine This removes the instance, but does not @@ -718,16 +709,14 @@ def destroy_node(self, :type ex_deployment_slot: ``str`` """ - if not node: - raise ValueError("node is required.") + if not isinstance(node, Node): + raise ValueError("A libcloud Node object is required.") - ex_cloud_service_name = ex_cloud_service_name or (node.extra and node.extra.get('ex_cloud_service_name')) + if ex_cloud_service_name is None and node.extra is not None: + ex_cloud_service_name = node.extra.get('ex_cloud_service_name') if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") - - if not ex_deployment_slot: - ex_deployment_slot = "production" + raise ValueError("Unable to get ex_cloud_service_name from Node.") _deployment = self._get_deployment( service_name=ex_cloud_service_name, @@ -757,8 +746,8 @@ def destroy_node(self, return True def create_cloud_service(self, - ex_cloud_service_name=None, - location=None, + ex_cloud_service_name, + location, description=None, extended_properties=None): """ @@ -778,11 +767,6 @@ def create_cloud_service(self, :rtype: ``bool`` """ - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") - - if not location: - raise ValueError("location is required.") response = self._perform_cloud_service_create( self._get_hosted_service_path(), @@ -796,16 +780,11 @@ def create_cloud_service(self, ) ) - if response.status != 201: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' - % (response.error, response.body, response.status), - driver=self - ) + self.raise_for_response(response, 201) return True - def destroy_cloud_service(self, ex_cloud_service_name=None): + def destroy_cloud_service(self, ex_cloud_service_name): """ deletes an azure cloud service. @@ -816,20 +795,12 @@ def destroy_cloud_service(self, ex_cloud_service_name=None): :rtype: ``bool`` """ - if not ex_cloud_service_name: - raise ValueError("ex_cloud_service_name is required.") - #add check to ensure all nodes have been deleted response = self._perform_cloud_service_delete( self._get_hosted_service_path(ex_cloud_service_name) ) - if response.status != 200: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self - ) + self.raise_for_response(response, 200) return True @@ -941,7 +912,7 @@ def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None): def _to_location(self, data): """ - Convert the data from a Azure resonse object into a location + Convert the data from a Azure response object into a location """ country = data.display_name @@ -960,13 +931,15 @@ def _to_location(self, data): if "Brazil" in data.display_name: country = "Brazil" + vm_role_sizes = data.compute_capabilities.virtual_machines_role_sizes + return AzureNodeLocation( id=data.name, name=data.display_name, country=country, driver=self.connection.driver, available_services=data.available_services, - virtual_machine_role_sizes=data.compute_capabilities.virtual_machines_role_sizes + virtual_machine_role_sizes=vm_role_sizes ) def _to_node_size(self, data): @@ -1023,49 +996,46 @@ def _vm_to_image(self, data): ) def _to_volume(self, volume, node): + extra = { + 'affinity_group': volume.affinity_group, + 'os': volume.os, + 'location': volume.location, + 'media_link': volume.media_link, + 'source_image_name': volume.source_image_name + } + + role_name = getattr(volume.attached_to, 'role_name', None) + hosted_service_name = getattr( + volume.attached_to, + 'hosted_service_name', + None + ) - if node: - if hasattr(volume.attached_to, 'role_name'): - if volume.attached_to.role_name == node.id: - extra = {'affinity_group': volume.affinity_group} - - if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = volume.attached_to.hosted_service_name + deployment_name = getattr( + volume.attached_to, + 'deployment_name', + None + ) - if hasattr(volume.attached_to, 'role_name'): - extra['role_name'] = volume.attached_to.role_name + if role_name is not None: + extra['role_name'] = role_name - if hasattr(volume.attached_to, 'deployment_name'): - extra['deployment_name'] = volume.attached_to.deployment_name + if hosted_service_name is not None: + extra['hosted_service_name'] = hosted_service_name - extra['os'] = volume.os - extra['location'] = volume.location - extra['media_link'] = volume.media_link - extra['source_image_name'] = volume.source_image_name + if deployment_name is not None: + extra['deployment_name'] = deployment_name - return StorageVolume( - id=volume.name, - name=volume.name, - size=int(volume.logical_disk_size_in_gb), - driver=self.connection.driver, - extra=extra - ) + if node: + if role_name is not None and role_name == node.id: + return StorageVolume( + id=volume.name, + name=volume.name, + size=int(volume.logical_disk_size_in_gb), + driver=self.connection.driver, + extra=extra + ) else: - extra = {'affinity_group': volume.affinity_group} - - if hasattr(volume.attached_to, 'hosted_service_name'): - extra['hosted_service_name'] = volume.attached_to.hosted_service_name - - if hasattr(volume.attached_to, 'role_name'): - extra['role_name'] = volume.attached_to.role_name - - if hasattr(volume.attached_to, 'deployment_name'): - extra['deployment_name'] = volume.attached_to.deployment_name - extra['os'] = volume.os - extra['location'] = volume.location - extra['media_link'] = volume.media_link - extra['source_image_name'] = volume.source_image_name - return StorageVolume( id=volume.name, name=volume.name, @@ -1086,12 +1056,7 @@ def _get_deployment(self, **kwargs): None ) - if response.status != 200: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self.connection.driver - ) + self.raise_for_response(response, 200) return self._parse_response(response, Deployment) @@ -1101,7 +1066,9 @@ def _get_cloud_service_location(self, service_name=None): raise ValueError("service_name is required.") res = self._perform_get( - self._get_hosted_service_path(service_name) + '?embed-detail=False', + '{0}?embed-detail=False'.format( + self._get_hosted_service_path(service_name) + ), HostedService ) @@ -1120,9 +1087,11 @@ def _is_storage_service_unique(self, service_name=None): raise ValueError("service_name is required.") _check_availability = self._perform_get( - self._get_storage_service_path() + - '/operations/isavailable/' + - _str(service_name) + '', + '{0}/operations/isavailable/{1}{2}'.format( + self._get_storage_service_path(), + _str(service_name), + '' + ), AvailabilityResponse ) @@ -1143,12 +1112,7 @@ def _create_storage_account(self, **kwargs): ) ) - if response.status != 202: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self.connection.driver - ) + self.raise_for_response(response, 202) else: response = self._perform_post( @@ -1164,12 +1128,7 @@ def _create_storage_account(self, **kwargs): ) ) - if response.status != 202: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self.connection.driver - ) + self.raise_for_response(response, 202) # We need to wait for this to be created before we can # create the storage container and the instance. @@ -1192,7 +1151,7 @@ def _perform_get(self, path, response_type): request.path, request.query = self._update_request_uri_query(request) request.headers = self._update_management_header(request) response = self._perform_request(request) - +#TODO if response_type is not None: return self._parse_response(response, response_type) @@ -1219,12 +1178,7 @@ def _perform_delete(self, path, async=False): request.headers = self._update_management_header(request) response = self._perform_request(request) - if response.status != 202: - raise LibcloudError( - 'Message: %s, Body: %s, Status code: %d' % - (response.error, response.body, response.status), - driver=self.connection.driver - ) + self.raise_for_response(response, 202) if async: return self._parse_response_for_async_op(response) @@ -1243,9 +1197,10 @@ def _perform_request(self, request): parsed_url = urlparse.urlparse(e.location) request.host = parsed_url.netloc return self._perform_request(request) - except Exception, e: - import traceback - print "Exception performing request: {}".format(traceback.format_exc()) + except Exception: + print "Exception performing request:\n{0}".format( + sys.exc_info()[1] + ) def _update_request_uri_query(self, request): """ @@ -1271,7 +1226,11 @@ def _update_request_uri_query(self, request): request.path += '?' for name, value in request.query: if value is not None: - request.path += name + '=' + url_quote(value, '/()$=\',') + '&' + request.path += '{0}={1}{2}'.format( + name, + url_quote(value, '/()$=\','), + '&' + ) request.path = request.path[:-1] return request.path, request.query @@ -1426,7 +1385,10 @@ def _fill_scalar_list_of(self, xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) if xmlelements: - xmlelements = self._get_child_nodes(xmlelements[0], xml_element_name) + xmlelements = self._get_child_nodes( + xmlelements[0], + xml_element_name + ) return [ self._get_node_value(xmlelement, element_type) for xmlelement in xmlelements @@ -1472,7 +1434,10 @@ def _fill_dict_of(self, xmlelements = self._get_child_nodes(xmldoc, parent_xml_element_name) if xmlelements: - xmlelements = self._get_child_nodes(xmlelements[0], pair_xml_element_name) + xmlelements = self._get_child_nodes( + xmlelements[0], + pair_xml_element_name + ) for pair in xmlelements: keys = self._get_child_nodes(pair, key_xml_element_name) values = self._get_child_nodes(pair, value_xml_element_name) @@ -1588,8 +1553,11 @@ def _construct_element_tree(self, source, etree): for name, value in vars(source).items(): if value is not None: - if isinstance(value, list) or isinstance(value, WindowsAzureData): - etree.append(self._construct_element_tree(value, etree)) + if (isinstance(value, list) or + isinstance(value, WindowsAzureData)): + etree.append( + self._construct_element_tree(value, etree) + ) else: ele = ET.Element(self._get_serialization_name(name)) ele.text = xml_escape(str(value)) @@ -1636,8 +1604,12 @@ def _get_hosted_service_path(self, service_name=None): return self._get_path('services/hostedservices', service_name) def _get_deployment_path_using_slot(self, service_name, slot=None): - return self._get_path('services/hostedservices/' + _str(service_name) + - '/deploymentslots', slot) + return self._get_path( + 'services/hostedservices/{0}/deploymentslots'.format( + _str(service_name) + ), + slot + ) def _get_disk_path(self, disk_name=None): return self._get_path('services/disks', disk_name) @@ -1681,6 +1653,17 @@ def _ex_complete_async_azure_operation(self, driver=self.connection.driver ) + def raise_for_response(self, response, valid_response): + if response.status != valid_response: + raise LibcloudError( + 'Message: {0}, Body: {1}, Status code: {2}'.format( + response.error, + response.body, + response.status + ), + driver=self + ) + #def get_connection(self): # certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" # port = HTTPS_PORT @@ -1991,21 +1974,44 @@ def start_role_operation_to_xml(): @staticmethod def windows_configuration_to_xml(configuration, xml): - AzureXmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type)], xml) - AzureXmlSerializer.data_to_xml([('ComputerName', configuration.computer_name)], xml) - AzureXmlSerializer.data_to_xml([('AdminPassword', configuration.admin_password)], xml) - AzureXmlSerializer.data_to_xml( - [('ResetPasswordOnFirstLogon', configuration.reset_password_on_first_logon, _lower)], + [('ConfigurationSetType', configuration.configuration_set_type)], + xml + ) + AzureXmlSerializer.data_to_xml( + [('ComputerName', configuration.computer_name)], + xml + ) + AzureXmlSerializer.data_to_xml( + [('AdminPassword', configuration.admin_password)], + xml + ) + AzureXmlSerializer.data_to_xml( + [ + ( + 'ResetPasswordOnFirstLogon', + configuration.reset_password_on_first_logon, + _lower + ) + ], xml ) AzureXmlSerializer.data_to_xml( - [('EnableAutomaticUpdates', configuration.enable_automatic_updates, _lower)], + [ + ( + 'EnableAutomaticUpdates', + configuration.enable_automatic_updates, + _lower + ) + ], xml ) - AzureXmlSerializer.data_to_xml([('TimeZone', configuration.time_zone)], xml) + AzureXmlSerializer.data_to_xml( + [('TimeZone', configuration.time_zone)], + xml + ) if configuration.domain_join is not None: domain = ET.xml("DomainJoin") @@ -2018,8 +2024,24 @@ def windows_configuration_to_xml(configuration, xml): creds ) - AzureXmlSerializer.data_to_xml([('Username', configuration.domain_join.credentials.username)], creds) - AzureXmlSerializer.data_to_xml([('Password', configuration.domain_join.credentials.password)], creds) + AzureXmlSerializer.data_to_xml( + [ + ( + 'Username', + configuration.domain_join.credentials.username + ) + ], + creds + ) + AzureXmlSerializer.data_to_xml( + [ + ( + 'Password', + configuration.domain_join.credentials.password + ) + ], + creds + ) AzureXmlSerializer.data_to_xml( [('JoinDomain', configuration.domain_join.join_domain)], @@ -2027,22 +2049,33 @@ def windows_configuration_to_xml(configuration, xml): ) AzureXmlSerializer.data_to_xml( - [('MachineObjectOU', configuration.domain_join.machine_object_ou)], + [ + ( + 'MachineObjectOU', + configuration.domain_join.machine_object_ou + ) + ], domain ) if configuration.stored_certificate_settings is not None: - certsettings = ET.Element("StoredCertificateSettings") - xml.append(certsettings) + cert_settings = ET.Element("StoredCertificateSettings") + xml.append(cert_settings) for cert in configuration.stored_certificate_settings: - certsetting = ET.Element("CertificateSetting") - certsettings.append(certsetting) + cert_setting = ET.Element("CertificateSetting") + cert_settings.append(cert_setting) - certsetting.append(AzureXmlSerializer.data_to_xml( + cert_setting.append(AzureXmlSerializer.data_to_xml( [('StoreLocation', cert.store_location)]) ) - AzureXmlSerializer.data_to_xml([('StoreName', cert.store_name)], certsetting) - AzureXmlSerializer.data_to_xml([('Thumbprint', cert.thumbprint)], certsetting) + AzureXmlSerializer.data_to_xml( + [('StoreName', cert.store_name)], + cert_setting + ) + AzureXmlSerializer.data_to_xml( + [('Thumbprint', cert.thumbprint)], + cert_setting + ) AzureXmlSerializer.data_to_xml( [('AdminUsername', configuration.admin_user_name)], @@ -2052,13 +2085,30 @@ def windows_configuration_to_xml(configuration, xml): @staticmethod def linux_configuration_to_xml(configuration, xml): - AzureXmlSerializer.data_to_xml([('ConfigurationSetType', configuration.configuration_set_type)], xml) - AzureXmlSerializer.data_to_xml([('HostName', configuration.host_name)], xml) - AzureXmlSerializer.data_to_xml([('UserName', configuration.user_name)], xml) - AzureXmlSerializer.data_to_xml([('UserPassword', configuration.user_password)], xml) - AzureXmlSerializer.data_to_xml( - [('DisableSshPasswordAuthentication', configuration.disable_ssh_password_authentication, _lower)], + [('ConfigurationSetType', configuration.configuration_set_type)], + xml + ) + AzureXmlSerializer.data_to_xml( + [('HostName', configuration.host_name)], + xml + ) + AzureXmlSerializer.data_to_xml( + [('UserName', configuration.user_name)], + xml + ) + AzureXmlSerializer.data_to_xml( + [('UserPassword', configuration.user_password)], + xml + ) + AzureXmlSerializer.data_to_xml( + [ + ( + 'DisableSshPasswordAuthentication', + configuration.disable_ssh_password_authentication, + _lower + ) + ], xml ) @@ -2073,13 +2123,19 @@ def linux_configuration_to_xml(configuration, xml): for key in configuration.ssh.public_keys: pkey = ET.Element("PublicKey") pkeys.append(pkey) - AzureXmlSerializer.data_to_xml([('Fingerprint', key.fingerprint)], pkey) + AzureXmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint)], + pkey + ) AzureXmlSerializer.data_to_xml([('Path', key.path)], pkey) for key in configuration.ssh.key_pairs: kpair = ET.Element("KeyPair") kpairs.append(kpair) - AzureXmlSerializer.data_to_xml([('Fingerprint', key.fingerprint)], kpair) + AzureXmlSerializer.data_to_xml( + [('Fingerprint', key.fingerprint)], + kpair + ) AzureXmlSerializer.data_to_xml([('Path', key.path)], kpair) return xml @@ -2099,37 +2155,69 @@ def network_configuration_to_xml(configuration, xml): input_endpoints.append(input_endpoint) AzureXmlSerializer.data_to_xml( - [('LoadBalancedEndpointSetName', endpoint.load_balanced_endpoint_set_name)], + [ + ( + 'LoadBalancedEndpointSetName', + endpoint.load_balanced_endpoint_set_name + ) + ], input_endpoint ) - AzureXmlSerializer.data_to_xml([('LocalPort', endpoint.local_port)], input_endpoint) - AzureXmlSerializer.data_to_xml([('Name', endpoint.name)], input_endpoint) - AzureXmlSerializer.data_to_xml([('Port', endpoint.port)], input_endpoint) + AzureXmlSerializer.data_to_xml( + [('LocalPort', endpoint.local_port)], + input_endpoint + ) + AzureXmlSerializer.data_to_xml( + [('Name', endpoint.name)], + input_endpoint + ) + AzureXmlSerializer.data_to_xml( + [('Port', endpoint.port)], + input_endpoint + ) if (endpoint.load_balancer_probe.path or - endpoint.load_balancer_probe.port or - endpoint.load_balancer_probe.protocol): + endpoint.load_balancer_probe.port or + endpoint.load_balancer_probe.protocol): load_balancer_probe = ET.Element("LoadBalancerProbe") input_endpoint.append(load_balancer_probe) - AzureXmlSerializer.data_to_xml([('Path', endpoint.load_balancer_probe.path)], load_balancer_probe) - AzureXmlSerializer.data_to_xml([('Port', endpoint.load_balancer_probe.port)], load_balancer_probe) + AzureXmlSerializer.data_to_xml( + [('Path', endpoint.load_balancer_probe.path)], + load_balancer_probe + ) + AzureXmlSerializer.data_to_xml( + [('Port', endpoint.load_balancer_probe.port)], + load_balancer_probe + ) AzureXmlSerializer.data_to_xml( [('Protocol', endpoint.load_balancer_probe.protocol)], load_balancer_probe ) - AzureXmlSerializer.data_to_xml([('Protocol', endpoint.protocol)], input_endpoint) AzureXmlSerializer.data_to_xml( - [('EnableDirectServerReturn', endpoint.enable_direct_server_return, _lower)], + [('Protocol', endpoint.protocol)], + input_endpoint + ) + AzureXmlSerializer.data_to_xml( + [ + ( + 'EnableDirectServerReturn', + endpoint.enable_direct_server_return, + _lower + ) + ], input_endpoint ) subnet_names = ET.Element("SubnetNames") xml.append(subnet_names) for name in configuration.subnet_names: - AzureXmlSerializer.data_to_xml([('SubnetName', name)], subnet_names) + AzureXmlSerializer.data_to_xml( + [('SubnetName', name)], + subnet_names + ) return xml @@ -2176,7 +2264,10 @@ def role_to_xml(availability_set_name, ) if availability_set_name is not None: - AzureXmlSerializer.data_to_xml([('AvailabilitySetName', availability_set_name)], xml) + AzureXmlSerializer.data_to_xml( + [('AvailabilitySetName', availability_set_name)], + xml + ) if data_virtual_hard_disks is not None: vhds = ET.Element("DataVirtualHardDisks") @@ -2185,24 +2276,59 @@ def role_to_xml(availability_set_name, for hd in data_virtual_hard_disks: vhd = ET.Element("DataVirtualHardDisk") vhds.append(vhd) - AzureXmlSerializer.data_to_xml([('HostCaching', hd.host_caching)], vhd) - AzureXmlSerializer.data_to_xml([('DiskLabel', hd.disk_label)], vhd) - AzureXmlSerializer.data_to_xml([('DiskName', hd.disk_name)], vhd) - AzureXmlSerializer.data_to_xml([('Lun', hd.lun)], vhd) - AzureXmlSerializer.data_to_xml([('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)], vhd) - AzureXmlSerializer.data_to_xml([('MediaLink', hd.media_link)], vhd) + AzureXmlSerializer.data_to_xml( + [('HostCaching', hd.host_caching)], + vhd + ) + AzureXmlSerializer.data_to_xml( + [('DiskLabel', hd.disk_label)], + vhd + ) + AzureXmlSerializer.data_to_xml( + [('DiskName', hd.disk_name)], + vhd + ) + AzureXmlSerializer.data_to_xml( + [('Lun', hd.lun)], + vhd + ) + AzureXmlSerializer.data_to_xml( + [('LogicalDiskSizeInGB', hd.logical_disk_size_in_gb)], + vhd + ) + AzureXmlSerializer.data_to_xml( + [('MediaLink', hd.media_link)], + vhd + ) if os_virtual_hard_disk is not None: hd = ET.Element("OSVirtualHardDisk") xml.append(hd) - AzureXmlSerializer.data_to_xml([('HostCaching', os_virtual_hard_disk.host_caching)], hd) - AzureXmlSerializer.data_to_xml([('DiskLabel', os_virtual_hard_disk.disk_label)], hd) - AzureXmlSerializer.data_to_xml([('DiskName', os_virtual_hard_disk.disk_name)], hd) - AzureXmlSerializer.data_to_xml([('MediaLink', os_virtual_hard_disk.media_link)], hd) - AzureXmlSerializer.data_to_xml([('SourceImageName', os_virtual_hard_disk.source_image_name)], hd) + AzureXmlSerializer.data_to_xml( + [('HostCaching', os_virtual_hard_disk.host_caching)], + hd + ) + AzureXmlSerializer.data_to_xml( + [('DiskLabel', os_virtual_hard_disk.disk_label)], + hd + ) + AzureXmlSerializer.data_to_xml( + [('DiskName', os_virtual_hard_disk.disk_name)], + hd + ) + AzureXmlSerializer.data_to_xml( + [('MediaLink', os_virtual_hard_disk.media_link)], + hd + ) + AzureXmlSerializer.data_to_xml( + [('SourceImageName', os_virtual_hard_disk.source_image_name)], + hd + ) if vm_image_name is not None: - xml += AzureXmlSerializer.data_to_xml([('VMImageName', vm_image_name)]) + xml += AzureXmlSerializer.data_to_xml( + [('VMImageName', vm_image_name)] + ) if role_size is not None: AzureXmlSerializer.data_to_xml([('RoleSize', role_size)], xml) @@ -2265,8 +2391,13 @@ def capture_role_to_xml(post_capture_action, target_image_name, target_image_label, provisioning_configuration): - xml = AzureXmlSerializer.data_to_xml([('OperationType', 'CaptureRoleOperation')]) - AzureXmlSerializer.data_to_xml([('PostCaptureAction', post_capture_action)], xml) + xml = AzureXmlSerializer.data_to_xml( + [('OperationType', 'CaptureRoleOperation')] + ) + AzureXmlSerializer.data_to_xml( + [('PostCaptureAction', post_capture_action)], + xml + ) if provisioning_configuration is not None: provisioning_config = ET.Element("ProvisioningConfiguration") @@ -2283,8 +2414,14 @@ def capture_role_to_xml(post_capture_action, provisioning_config ) - AzureXmlSerializer.data_to_xml([('TargetImageLabel', target_image_label)], xml) - AzureXmlSerializer.data_to_xml([('TargetImageName', target_image_name)], xml) + AzureXmlSerializer.data_to_xml( + [('TargetImageLabel', target_image_label)], + xml + ) + AzureXmlSerializer.data_to_xml( + [('TargetImageName', target_image_name)], + xml + ) doc = AzureXmlSerializer.doc_from_xml('CaptureRoleOperation', xml) return ET.tostring(doc, "UTF-8").replace("\n", "") @@ -2305,7 +2442,10 @@ def virtual_machine_deployment_to_xml(deployment_name, doc = AzureXmlSerializer.doc_from_xml('Deployment') AzureXmlSerializer.data_to_xml([('Name', deployment_name)], doc) - AzureXmlSerializer.data_to_xml([('DeploymentSlot', deployment_slot)], doc) + AzureXmlSerializer.data_to_xml( + [('DeploymentSlot', deployment_slot)], + doc + ) AzureXmlSerializer.data_to_xml([('Label', label)], doc) role_list = ET.Element("RoleList") @@ -2521,7 +2661,7 @@ class CertificateSetting(WindowsAzureData): Specifies the name of the certificate store from which retrieve certificate. store_location: - Specifies the target certificate store location on the virtual machine. + Specifies the target certificate store location on the virtual machine The only supported value is LocalMachine. """ @@ -3041,7 +3181,8 @@ def __init__(self, class AzureNodeLocation(NodeLocation): - # we can also have something in here for available services which is an extra to the API with Azure + # we can also have something in here for available services which is an + # extra to the API with Azure def __init__(self, id, name, diff --git a/libcloud/test/compute/test_azure.py b/libcloud/test/compute/test_azure.py index 0538604aa9..ae16757f06 100644 --- a/libcloud/test/compute/test_azure.py +++ b/libcloud/test/compute/test_azure.py @@ -15,6 +15,7 @@ from libcloud.test import MockHttp from libcloud.test.file_fixtures import ComputeFileFixtures +from libcloud.compute.base import Node, NodeState from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver @@ -54,7 +55,12 @@ def test_locations_returned_successfully(self): if location.name == 'Southeast Asia' ) services_result = matched_location.available_services - services_expected = ['Compute','Storage','PersistentVMRole','HighMemory'] + services_expected = [ + 'Compute', + 'Storage', + 'PersistentVMRole', + 'HighMemory' + ] self.assertListEqual(services_result, services_expected) vm_role_sizes_result = matched_location.virtual_machine_role_sizes @@ -112,11 +118,18 @@ def test_list_nodes_returned_no_deployments(self): def test_list_nodes_returned_no_cloud_service(self): with self.assertRaises(LibcloudError): - self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest04") + self.driver.list_nodes(ex_cloud_service_name="dcoddkinztest04") def test_restart_node_success(self): - node = type('Node', (object,), dict(id="dc03")) + node = Node( + id="dc03", + name="dc03", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) result = self.driver.reboot_node( node=node, ex_cloud_service_name="dcoddkinztest01", @@ -128,7 +141,14 @@ def test_restart_node_success(self): #simulating attempting to reboot a node that ifas already rebooting def test_restart_node_fail_no_deployment(self): - node = type('Node', (object,), dict(id="dc03")) + node = Node( + id="dc03", + name="dc03", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) with self.assertRaises(LibcloudError): self.driver.reboot_node( @@ -139,7 +159,14 @@ def test_restart_node_fail_no_deployment(self): def test_restart_node_fail_no_cloud_service(self): - node = type('Node', (object,), dict(id="dc03")) + node = Node( + id="dc03", + name="dc03", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) with self.assertRaises(LibcloudError): self.driver.reboot_node( @@ -150,7 +177,14 @@ def test_restart_node_fail_no_cloud_service(self): def test_restart_node_fail_node_not_found(self): - node = type('Node', (object,), dict(id="dc13")) + node = Node( + id="dc13", + name="dc13", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) result = self.driver.reboot_node( node=node, @@ -161,7 +195,14 @@ def test_restart_node_fail_node_not_found(self): def test_destroy_node_success_single_node_in_cloud_service(self): - node = type('Node', (object,), dict(id="oddkinz1")) + node = Node( + id="oddkinz1", + name="oddkinz1", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) result = self.driver.destroy_node( node=node, @@ -172,7 +213,14 @@ def test_destroy_node_success_single_node_in_cloud_service(self): def test_destroy_node_success_multiple_nodes_in_cloud_service(self): - node = type('Node', (object,), dict(id="oddkinz1")) + node = Node( + id="oddkinz1", + name="oddkinz1", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) result = self.driver.destroy_node( node=node, @@ -183,7 +231,14 @@ def test_destroy_node_success_multiple_nodes_in_cloud_service(self): def test_destroy_node_fail_node_does_not_exist(self): - node = type('Node', (object,), dict(id="oddkinz2")) + node = Node( + id="oddkinz2", + name="oddkinz2", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) with self.assertRaises(LibcloudError): self.driver.destroy_node( @@ -194,8 +249,14 @@ def test_destroy_node_fail_node_does_not_exist(self): def test_destroy_node_success_cloud_service_not_found(self): - node = dict() - node["name"]="cloudredis" + node = Node( + id="cloudredis", + name="cloudredis", + state=NodeState.RUNNING, + public_ips=[], + private_ips=[], + driver=self.driver + ) with self.assertRaises(LibcloudError): self.driver.destroy_node( @@ -296,7 +357,6 @@ def test_create_node_and_deployment_second_node(self): ) kwargs["name"] = "dcoddkinztest03" - node = type('Node', (object,), dict(id="dc14")) result = self.driver.create_node( ex_cloud_service_name="testdcabc2", **kwargs @@ -304,15 +364,15 @@ def test_create_node_and_deployment_second_node(self): self.assertIsNotNone(result) def test_create_node_and_deployment_second_node_307_response(self): - kwargs = {} - - kwargs["ex_storage_service_name"]="mtlytics" - kwargs["ex_deployment_name"]="dcoddkinztest04" - kwargs["ex_deployment_slot"]="Production" - kwargs["ex_admin_user_id"]="azurecoder" + kwargs = { + "ex_storage_service_name": "mtlytics", + "ex_deployment_name": "dcoddkinztest04", + "ex_deployment_slot": "Production", + "ex_admin_user_id": "azurecoder" + } auth = NodeAuthPassword("Pa55w0rd", False) - kwargs["auth"]= auth + kwargs["auth"] = auth kwargs["size"] = NodeSize( id="ExtraSmall", @@ -458,7 +518,7 @@ def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc(self return (httplib.OK, body, headers, httplib.responses[httplib.OK]) def _3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments(self, method, url, body, headers): - headers["x-ms-request-id"]="acc33f6756cda6fd96826394fce4c9f3" + headers["x-ms-request-id"] = "acc33f6756cda6fd96826394fce4c9f3" if method == "GET": body = self.fixtures.load('_3761b98b_673d_526c_8d55_fee918758e6e_services_hostedservices_testdcabc_deployments.xml') From ef5b24f28d1890c744a79b448ca71330f03dfe53 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Mon, 24 Nov 2014 09:42:33 -0500 Subject: [PATCH 312/315] Updated XML generation for parts added for handling VMImages --- libcloud/compute/drivers/azure.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 98048148f2..65cd0aa94c 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -70,7 +70,7 @@ def _str(value): """ AZURE_COMPUTE_INSTANCE_TYPES = { 'A0': { - 'id': 'extra small', + 'id': 'ExtraSmall', 'name': 'Extra Small Instance', 'ram': 768, 'disk': 127, @@ -80,7 +80,7 @@ def _str(value): 'cores': 'Shared' }, 'A1': { - 'id': 'small', + 'id': 'Small', 'name': 'Small Instance', 'ram': 1792, 'disk': 127, @@ -90,7 +90,7 @@ def _str(value): 'cores': 1 }, 'A2': { - 'id': 'medium', + 'id': 'Medium', 'name': 'Medium Instance', 'ram': 3584, 'disk': 127, @@ -100,7 +100,7 @@ def _str(value): 'cores': 2 }, 'A3': { - 'id': 'large', + 'id': 'Large', 'name': 'Large Instance', 'ram': 7168, 'disk': 127, @@ -110,7 +110,7 @@ def _str(value): 'cores': 4 }, 'A4': { - 'id': 'extra large', + 'id': 'ExtraLarge', 'name': 'Extra Large Instance', 'ram': 14336, 'disk': 127, @@ -2326,8 +2326,9 @@ def role_to_xml(availability_set_name, ) if vm_image_name is not None: - xml += AzureXmlSerializer.data_to_xml( - [('VMImageName', vm_image_name)] + AzureXmlSerializer.data_to_xml( + [('VMImageName', vm_image_name)], + xml ) if role_size is not None: From 123dda8d2202b2ebd84f23e7ec3c43d0710a585f Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Mon, 24 Nov 2014 11:03:58 -0500 Subject: [PATCH 313/315] Fixed a bug where a cloud service name wasn't converted to a storage account name properly and fixed a bug in checking the cloud storage name uniqueness where an invalid service name would cause a bad request and would return None causing it to look like the storage account already existed, even if it did not --- libcloud/compute/drivers/azure.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index 65cd0aa94c..b43ba82740 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -559,7 +559,7 @@ def create_node(self, if ex_storage_service_name is None: ex_storage_service_name = ex_cloud_service_name ex_storage_service_name = re.sub( - ur'[\W_]+', + ur'[\W_-]+', u'', ex_storage_service_name.lower(), flags=re.UNICODE @@ -1094,6 +1094,8 @@ def _is_storage_service_unique(self, service_name=None): ), AvailabilityResponse ) + + self.raise_for_response(_check_availability, 200) return _check_availability.result From 126073cc81e7125a2a309700c1524a9529bb6bcb Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Wed, 26 Nov 2014 16:16:19 -0500 Subject: [PATCH 314/315] Removed unused/redundant azure_compute Provider --- libcloud/compute/providers.py | 2 -- libcloud/compute/types.py | 1 - 2 files changed, 3 deletions(-) diff --git a/libcloud/compute/providers.py b/libcloud/compute/providers.py index f17a5352af..c1b212f972 100644 --- a/libcloud/compute/providers.py +++ b/libcloud/compute/providers.py @@ -155,8 +155,6 @@ ('libcloud.compute.drivers.profitbricks', 'ProfitBricksNodeDriver'), Provider.VULTR: ('libcloud.compute.drivers.vultr', 'VultrNodeDriver'), - Provider.AZURE_COMPUTE: - ('libcloud.compute.drivers.azure_compute', 'AzureNodeDriver'), # Deprecated Provider.CLOUDSIGMA_US: diff --git a/libcloud/compute/types.py b/libcloud/compute/types.py index f617df0218..84732b34e8 100644 --- a/libcloud/compute/types.py +++ b/libcloud/compute/types.py @@ -131,7 +131,6 @@ class Provider(object): VSPHERE = 'vsphere' PROFIT_BRICKS = 'profitbricks' VULTR = 'vultr' - AZURE_COMPUTE = 'azure_compute' # OpenStack based providers HPCLOUD = 'hpcloud' From 65f4a7ac2a91d87c1e3e12f763e6a07ae8109501 Mon Sep 17 00:00:00 2001 From: Michael Bennett Date: Mon, 12 Jan 2015 14:24:55 -0500 Subject: [PATCH 315/315] Added methods for adding, and setting the instance endpoints of a VM. Also added existing instance endpoints for a node to the extra dict --- libcloud/compute/drivers/azure.py | 92 ++++++++++++++++++++++++++++++- 1 file changed, 90 insertions(+), 2 deletions(-) diff --git a/libcloud/compute/drivers/azure.py b/libcloud/compute/drivers/azure.py index b43ba82740..2b48c4f5a9 100644 --- a/libcloud/compute/drivers/azure.py +++ b/libcloud/compute/drivers/azure.py @@ -811,6 +811,83 @@ def list_cloud_services(self): HostedServices ) + def ex_add_instance_endpoints(self, + node, + endpoints, + ex_deployment_slot="Production"): + + all_endpoints = [ + { + "name": endpoint.name, + "protocol": endpoint.protocol, + "port": endpoint.public_port, + "local_port": endpoint.local_port, + + } + for endpoint in node.extra['instance_endpoints'] + ] + + all_endpoints.extend(endpoints) + + return self.ex_set_instance_endpoints(node, all_endpoints, ex_deployment_slot) + + def ex_set_instance_endpoints(self, + node, + endpoints, + ex_deployment_slot="Production"): + """ + endpoint = ConfigurationSetInputEndpoint( + name=u'SSH', + protocol=u'tcp', + port=port, + local_port=u'22', + load_balanced_endpoint_set_name=None, + enable_direct_server_return=False + ) + { + 'name': u'SSH', + 'protocol': u'tcp', + 'port': port, + 'local_port': u'22' + } + """ + + ex_cloud_service_name = node.extra['ex_cloud_service_name'] + vm_role_name = node.name + + network_config = ConfigurationSet() + network_config.configuration_set_type = 'NetworkConfiguration' + + for endpoint in endpoints: + new_endpoint = ConfigurationSetInputEndpoint(**endpoint) + network_config.input_endpoints.items.append(new_endpoint) + + _deployment_name = self._get_deployment( + service_name=ex_cloud_service_name, + deployment_slot=ex_deployment_slot + ).name + + response = self._perform_put( + self._get_role_path( + ex_cloud_service_name, + _deployment_name, + vm_role_name + ), + AzureXmlSerializer.add_role_to_xml( + None, # role_name + None, # system_config + None, # os_virtual_hard_disk + 'PersistentVMRole', # role_type + network_config, # network_config + None, # availability_set_name + None, # data_virtual_hard_disks + None, # vm_image + None # role_size + ) + ) + + self.raise_for_response(response, 202) + """ Functions not implemented """ @@ -902,6 +979,7 @@ def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None): private_ips=[data.ip_address], driver=self.connection.driver, extra={ + 'instance_endpoints': data.instance_endpoints, 'remote_desktop_port': remote_desktop_port, 'ssh_port': ssh_port, 'power_state': data.power_state, @@ -1171,6 +1249,18 @@ def _perform_post(self, path, body, response_type=None, async=False): return response + def _perform_put(self, path, body, response_type=None, async=False): + request = AzureHTTPRequest() + request.method = 'PUT' + request.host = azure_service_management_host + request.path = path + request.body = self._get_request_body(body) + request.path, request.query = self._update_request_uri_query(request) + request.headers = self._update_management_header(request) + response = self._perform_request(request) + + return response + def _perform_delete(self, path, async=False): request = AzureHTTPRequest() request.method = 'DELETE' @@ -1669,12 +1759,10 @@ def raise_for_response(self, response, valid_response): #def get_connection(self): # certificate_path = "/Users/baldwin/.azure/managementCertificate.pem" # port = HTTPS_PORT - # connection = HTTPSConnection( # azure_service_management_host, # int(port), # cert_file=certificate_path) - # return connection """