diff --git a/.gitignore b/.gitignore index 7346003bc84..c003f036c14 100644 --- a/.gitignore +++ b/.gitignore @@ -60,6 +60,7 @@ src/build /.vs/config/applicationhost.config .vscode/settings.json .vscode/.ropeproject/ +.vscode/cSpell.json .project .pydevproject diff --git a/src/command_modules/azure-cli-sf/HISTORY.rst b/src/command_modules/azure-cli-sf/HISTORY.rst new file mode 100644 index 00000000000..21ef530546f --- /dev/null +++ b/src/command_modules/azure-cli-sf/HISTORY.rst @@ -0,0 +1,10 @@ +.. :changelog: + +Release History +=============== + +1.0.0 (2017-05-04) +++++++++++++++++++ + +* Initial release of Service Fabric module. This corresponds to 5.6 Service +Sabric product release. diff --git a/src/command_modules/azure-cli-sf/MANIFEST.in b/src/command_modules/azure-cli-sf/MANIFEST.in new file mode 100644 index 00000000000..bb37a2723da --- /dev/null +++ b/src/command_modules/azure-cli-sf/MANIFEST.in @@ -0,0 +1 @@ +include *.rst diff --git a/src/command_modules/azure-cli-sf/README.rst b/src/command_modules/azure-cli-sf/README.rst new file mode 100644 index 00000000000..0d3ce7bc4a8 --- /dev/null +++ b/src/command_modules/azure-cli-sf/README.rst @@ -0,0 +1,6 @@ +Microsoft Azure CLI Service Fabric Module +========================================= + +This package is for the `sf` module. It contains commands that can be used +to manage and administer Service Fabric clusters. + diff --git a/src/command_modules/azure-cli-sf/azure/__init__.py b/src/command_modules/azure-cli-sf/azure/__init__.py new file mode 100644 index 00000000000..f299979eee7 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/__init__.py @@ -0,0 +1,8 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ------------------------------------------------------------------------------ + +import pkg_resources +pkg_resources.declare_namespace(__name__) diff --git a/src/command_modules/azure-cli-sf/azure/cli/__init__.py b/src/command_modules/azure-cli-sf/azure/cli/__init__.py new file mode 100644 index 00000000000..f299979eee7 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/__init__.py @@ -0,0 +1,8 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ------------------------------------------------------------------------------ + +import pkg_resources +pkg_resources.declare_namespace(__name__) diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/__init__.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/__init__.py new file mode 100644 index 00000000000..f299979eee7 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/__init__.py @@ -0,0 +1,8 @@ +# ------------------------------------------------------------------------------ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ------------------------------------------------------------------------------ + +import pkg_resources +pkg_resources.declare_namespace(__name__) diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/__init__.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/__init__.py new file mode 100644 index 00000000000..ec62a0091c3 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/__init__.py @@ -0,0 +1,17 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +import azure.cli.command_modules.sf._help # pylint: disable=unused-import + + +def load_params(_): + # pylint: disable=redefined-outer-name + import azure.cli.command_modules.sf._params + + +def load_commands(): + # pylint: disable=redefined-outer-name + import azure.cli.command_modules.sf.commands diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_factory.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_factory.py new file mode 100644 index 00000000000..fc3e4d06d17 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_factory.py @@ -0,0 +1,40 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + + +def cf_sf_client(_): + from azure.cli.core.util import CLIError + from azure.servicefabric import ServiceFabricClientAPIs + from azure.cli.command_modules.sf.custom import ( + sf_get_cert_info, sf_get_connection_endpoint, + sf_get_ca_cert_info, sf_get_verify_setting + ) + from azure.cli.command_modules.sf.cluster_auth import ( + ClientCertAuthentication + ) + from azure.cli.core.commands.client_factory import ( + configure_common_settings + ) + + endpoint = sf_get_connection_endpoint() + if endpoint is None: + raise CLIError( + "Connection endpoint not specified, run 'az sf cluster " + "select' first." + ) + + cert = sf_get_cert_info() + if cert is not None: + ca_cert = sf_get_ca_cert_info() + else: + ca_cert = None + + no_verify = sf_get_verify_setting() + + auth = ClientCertAuthentication(cert, ca_cert, no_verify) + client = ServiceFabricClientAPIs(auth, base_url=endpoint) + configure_common_settings(client) + return client diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_help.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_help.py new file mode 100644 index 00000000000..875f5a30f61 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_help.py @@ -0,0 +1,47 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +from azure.cli.core.help_files import helps + +# pylint: disable=line-too-long + +helps["sf"] = """ + type: group + short-summary: Manage and administer a Service Fabric cluster +""" +helps["sf application"] = """ + type: group + short-summary: Manage the applications running on a Service Fabric cluster +""" +helps["sf chaos"] = """ + type: group + short-summary: Manage the Service Fabric Chaos service, designed to + simulate real failures +""" +helps["sf cluster"] = """ + type: group + short-summary: Select and manage a Service Fabric cluster +""" +helps["sf compose"] = """ + type: group + short-summary: Manage and deploy applications created from Docker Compose +""" +helps["sf node"] = """ + type: group + short-summary: Manage the nodes that create a Service Fabric cluster +""" +helps["sf partition"] = """ + type: group + short-summary: Manage the partitions of a Service Fabric service +""" +helps["sf replica"] = """ + type: group + short-summary: Manage the replicas of a Service Fabric service partition +""" +helps["sf service"] = """ + type: group + short-summary: Manage the services of a Service Fabric application +""" diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_params.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_params.py new file mode 100644 index 00000000000..f140b17f7a0 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/_params.py @@ -0,0 +1,87 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +from azure.cli.core.sdk.util import ParametersContext +from azure.cli.core.util import get_json_object + +# For some commands we take JSON strings as possible +with ParametersContext(command="sf application create") as c: + c.register("parameters", ("--parameters",), type=get_json_object, + help="JSON encoded list of application parameters.") + +with ParametersContext(command="sf application create") as c: + c.register("metrics", ("--metrics",), type=get_json_object, + help="JSON encoded list of application metrics and their \ + descriptions.") + +with ParametersContext(command="sf application upgrade") as c: + c.register("parameters", ("--parameters",), type=get_json_object, + help="JSON encoded list of application parameter overrides to \ + be applied when upgrading an application. Note, when starting \ + an upgrade, be sure to include the existing application \ + parameters, if any.") + +with ParametersContext(command="sf application upgrade") as c: + c.register("default_service_health_policy", + ("--default_service_health_policy",), + type=get_json_object, + help="JSON encoded specification of the health policy used by \ + default to evaluate the health of a service type.") + +with ParametersContext(command="sf application upgrade") as c: + c.register("service_health_policy", ("--service_health_policy",), + type=get_json_object, + help="JSON encoded map with service type health policy per \ + service type name. The map is empty be default.") + +with ParametersContext(command="sf service create") as c: + c.register("load_metrics", ("--load_metrics",), + type=get_json_object, + help="JSON encoded list of metrics used when load balancing \ + services across nodes.") + +with ParametersContext(command="sf service create") as c: + c.register("placement_policy_list", ("--placement_policy_list",), + type=get_json_object, + help="JSON encoded list of placement policies for the service, \ + and any associated domain names. Policies can be one or more \ + of: `NonPartiallyPlaceService`, `PreferPrimaryDomain`, \ + `RequireDomain`, `requireDomainDistribution`") + +with ParametersContext(command="sf service update") as c: + c.register("load_metrics", ("--load_metrics",), + type=get_json_object, + help="JSON encoded list of metrics used when load balancing \ + services across nodes.") + +with ParametersContext(command="sf service update") as c: + c.register("placement_policy_list", ("--placement_policy_list",), + type=get_json_object, + help="JSON encoded list of placement policies for the service, \ + and any associated domain names. Policies can be one or more \ + of: `NonPartiallyPlaceService`, `PreferPrimaryDomain`, \ + `RequireDomain`, `requireDomainDistribution`") + +with ParametersContext(command="sf chaos start") as c: + c.register("application_type_health_policy_map", + ("--application_type_health_policy_map",), + type=get_json_object, + help="JSON encoded list with max percentage unhealthy \ + applications for specific application types. Each entry \ + specifies as a key the application type name and as a value \ + an integer that represents the MaxPercentUnhealthyApplications \ + percentage used to evaluate the applications of the specified \ + application type.") + +with ParametersContext(command="sf node service-package-upload") as c: + c.register("share_policy", + ("--share_policy",), + type=get_json_object, + help="JSON encoded list of sharing policies. Each sharing \ + policy element is composed of a 'name' and 'scope'. The name \ + corresponds to the name of the code, configuration, or data \ + package that is to be shared. The scope can either 'None', \ + 'All', 'Code', 'Config' or 'Data'.") diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/cluster_auth.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/cluster_auth.py new file mode 100644 index 00000000000..48c0674c16c --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/cluster_auth.py @@ -0,0 +1,32 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +from msrest.authentication import Authentication + + +# pylint: disable=too-few-public-methods +class ClientCertAuthentication(Authentication): + """Client certificate authentication for Service Fabric clusters""" + def __init__(self, cert=None, ca_cert=None, no_verify=False): + self.cert = cert + self.ca_cert = ca_cert + self.no_verify = no_verify + + def signed_session(self): + """Create requests session with any required auth headers + applied. + + :rtype: requests.Session. + """ + session = super(ClientCertAuthentication, self).signed_session() + if self.cert is not None: + session.cert = self.cert + if self.ca_cert is not None: + session.verify = self.ca_cert + if self.no_verify: + session.verify = False + + return session diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/commands.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/commands.py new file mode 100644 index 00000000000..98ae24be547 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/commands.py @@ -0,0 +1,108 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +from azure.cli.core.commands import cli_command +from azure.cli.command_modules.sf._factory import cf_sf_client +from azure.cli.core.sdk.util import ( + create_service_adapter, + ServiceGroup +) + +custom_path = "azure.cli.command_modules.sf.custom#{}" +cluster_operations = create_service_adapter("azure.servicefabric", + "ServiceFabricClientAPIs") + +# No client for specific custom commands +cli_command(__name__, "sf cluster select", + "azure.cli.command_modules.sf.custom#sf_select") +cli_command(__name__, "sf application upload", + "azure.cli.command_modules.sf.custom#sf_upload_app") + +with ServiceGroup(__name__, cf_sf_client, cluster_operations, + custom_path) as sg: + # Cluster level commands + with sg.group("sf cluster") as cl_group: + cl_group.command("manifest", "get_cluster_manifest") + cl_group.command("code-version", + "get_provisioned_fabric_code_version_info_list") + cl_group.command("config-version", + "get_provisioned_fabric_config_version_info_list") + cl_group.command("health", "get_cluster_health") + + # Application level commands + with sg.group("sf application") as app_group: + app_group.custom_command("create", "sf_create_app") + app_group.custom_command("report-health", "sf_report_app_health") + app_group.custom_command("upgrade", "sf_upgrade_app") + app_group.command("health", "get_application_health") + app_group.command("manifest", "get_application_health") + app_group.command("provision", "provision_application_type") + app_group.command("delete", "delete_application") + app_group.command("unprovision", "unprovision_application_type") + app_group.command("package-delete", "delete_image_store_content") + app_group.command("type", "get_application_type_info_list") + app_group.command("list", "get_application_info_list") + + # Service level commands + with sg.group("sf service") as svc_group: + svc_group.custom_command("create", "sf_create_service") + svc_group.custom_command("update", "sf_update_service") + svc_group.custom_command("report-health", "sf_report_svc_health") + svc_group.command("list", "get_service_info_list") + svc_group.command("manifest", "get_service_manifest") + svc_group.command("application-name", "get_application_name_info") + svc_group.command("description", "get_service_description") + svc_group.command("health", "get_service_health") + svc_group.command("resolve", "resolve_service") + + # Partition level commands + with sg.group("sf partition") as partition_group: + partition_group.custom_command("report-health", + "sf_report_partition_health") + partition_group.command("info", "get_partition_info") + partition_group.command("service-name", "get_service_name_info") + partition_group.command("health", "get_partition_health") + + # Replica level commands + with sg.group("sf replica") as replica_group: + replica_group.custom_command("report-health", + "sf_report_replica_health") + replica_group.command("health", "get_replica_health") + + # Node level commands + with sg.group("sf node") as node_group: + node_group.custom_command("report-health", "sf_report_node_health") + node_group.custom_command("service-package-upload", + "sf_service_package_upload") + node_group.command("list", "get_node_info_list") + node_group.command("remove-state", "remove_node_state") + node_group.command("stop", "stop_node") + node_group.command("restart", "restart_node") + node_group.command("start", "start_node") + node_group.command("replica-list", + "get_deployed_service_replica_info_list") + node_group.command("load", "get_node_load_info") + node_group.command("service-package-list", + "get_deployed_service_package_info_list") + node_group.command("service-package", + "get_deployed_service_package_info_list_by_name") + node_group.command("service-type-list", + "get_deployed_service_type_info_list") + node_group.command("service-type", + "get_deployed_service_type_info_by_name") + node_group.command("code-package", + "get_deployed_code_package_info_list") + + # Docker Compose commands + with sg.group("sf compose") as compose_group: + compose_group.custom_command("create", "sf_create_compose_application") + compose_group.command("status", "get_compose_application_status") + compose_group.command("list", "get_compose_application_status_list") + compose_group.command("remove", "remove_compose_application") + + # Chaos test commands + with sg.group("sf chaos") as chaos_group: + chaos_group.custom_command("start", "sf_start_chaos") diff --git a/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/custom.py b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/custom.py new file mode 100644 index 00000000000..c4117c114f6 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/custom.py @@ -0,0 +1,1499 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +import os +import sys +import urllib.parse +import requests + +import azure.cli.core.azlogging as azlogging + +from azure.cli.core._environment import get_config_dir +from azure.cli.core._config import AzConfig +from azure.cli.core.util import CLIError + +# Really the CLI should do this for us but I cannot see how to get it to +CONFIG_PATH = os.path.join(get_config_dir(), "config") +az_config = AzConfig() + +logger = azlogging.get_az_logger(__name__) + + +def sf_create_compose_application( # pylint: disable=too-many-arguments + file, application_id, repo_user=None, encrypted=False, repo_pass=None, + timeout=60): + # We need to read from a file which makes this a custom command + # Encrypted param to indicate a password will be prompted + """ + Creates a Service Fabric application from a Compose file + + :param str application_id: The id of application to create from + Compose file. This is typically the full id of the application + including "fabric:" URI scheme + :param str file: Path to the Compose file to use + :param str repo_user: Container repository user name if needed for + authentication + :param bool encrypted: If true, indicate to use an encrypted password + rather than prompting for a plaintext one + :param str repo_pass: Encrypted container repository password + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value for this + parameter is 60 seconds. + """ + from azure.cli.core.util import read_file_content + from azure.cli.command_modules.sf._factory import cf_sf_client + from azure.cli.core.prompting import prompt_pass + # pylint: disable=line-too-long + from azure.servicefabric.models.create_compose_application_description import ( # noqa: justification, no way to shorten + CreateComposeApplicationDescription + ) + from azure.servicefabric.models.repository_credential import ( + RepositoryCredential + ) + + if (any([encrypted, repo_pass]) and + not all([encrypted, repo_pass, repo_user])): + raise CLIError( + "Invalid arguments: [ --application_id --file | " + "--application_id --file --repo_user | --application_id --file " + "--repo_user --encrypted --repo_pass ])" + ) + + if repo_user: + plaintext_pass = prompt_pass("Container repository password: ", False, + "Password for container repository " + "containing container images") + repo_pass = plaintext_pass + + repo_cred = RepositoryCredential(repo_user, repo_pass, encrypted) + + file_contents = read_file_content(file) + + model = CreateComposeApplicationDescription(application_id, file_contents, + repo_cred) + + sf_client = cf_sf_client(None) + sf_client.create_compose_application(model, timeout) + + +def sf_select(endpoint, cert=None, # pylint: disable=too-many-arguments + key=None, pem=None, ca=None, no_verify=False): + """ + Connects to a Service Fabric cluster endpoint. + + + If connecting to secure cluster specify a cert (.crt) and key file (.key) + or a single file with both (.pem). Do not specify both. Optionally, if + connecting to a secure cluster, specify also a path to a CA bundle file + or directory of trusted CA certs. + + :param str endpoint: Cluster endpoint URL, including port and HTTP or HTTPS + prefix + :param str cert: Path to a client certificate file + :param str key: Path to client certificate key file + :param str pem: Path to client certificate, as a .pem file + :param str ca: Path to CA certs directory to treat as valid or CA bundle + file + :param bool no_verify: Disable verification for certificates when using + HTTPS, note: this is an insecure option and should not be used for + production environments + """ + from azure.cli.core._config import set_global_config_value + + usage = ("Valid syntax : --endpoint [ [ --key --cert | --pem ] " + "[ --ca | --no-verify ] ]") + + if ca and not (pem or all([key, cert])): + raise CLIError(usage) + + if no_verify and not (pem or all([key, cert])): + raise CLIError(usage) + + if no_verify and ca: + raise CLIError(usage) + + if any([cert, key]) and not all([cert, key]): + raise CLIError(usage) + + if pem and any([cert, key]): + raise CLIError(usage) + + if pem: + set_global_config_value("servicefabric", "pem_path", pem) + set_global_config_value("servicefabric", "security", "pem") + elif cert: + set_global_config_value("servicefabric", "cert_path", cert) + set_global_config_value("servicefabric", "key_path", key) + set_global_config_value("servicefabric", "security", "cert") + else: + set_global_config_value("servicefabric", "security", "none") + + if ca: + set_global_config_value("servicefabric", "ca_path", ca) + + if no_verify: + set_global_config_value("servicefabric", "no_verify", "True") + else: + set_global_config_value("servicefabric", "no_verify", "False") + + set_global_config_value("servicefabric", "endpoint", endpoint) + + +def sf_get_verify_setting(): + az_config.config_parser.read(CONFIG_PATH) + no_verify = az_config.get("servicefabric", "no_verify", fallback="False") + return no_verify == "True" + + +def sf_get_ca_cert_info(): + az_config.config_parser.read(CONFIG_PATH) + ca_cert = az_config.get("servicefabric", "ca_path", fallback=None) + return ca_cert + + +def sf_get_connection_endpoint(): + az_config.config_parser.read(CONFIG_PATH) + return az_config.get("servicefabric", "endpoint", fallback=None) + + +def sf_get_cert_info(): + az_config.config_parser.read(CONFIG_PATH) + security_type = str(az_config.get("servicefabric", + "security", fallback="")) + if security_type == "pem": + pem_path = az_config.get("servicefabric", "pem_path", fallback=None) + return pem_path + elif security_type == "cert": + cert_path = az_config.get("servicefabric", "cert_path", fallback=None) + key_path = az_config.get("servicefabric", "key_path", fallback=None) + return cert_path, key_path + elif security_type == "none": + return None + else: + raise CLIError("Cluster security type not set") + + +class FileIter: # pylint: disable=too-few-public-methods + def __init__(self, file, rel_file_path, print_progress): + self.file = file + self.rel_file_path = rel_file_path + self.print_progress = print_progress + + def __iter__(self): + return self + + def __next__(self): + chunk = self.file.read(100000) + if chunk == b'': + raise StopIteration + else: + self.print_progress(len(chunk), self.rel_file_path) + return chunk + + +def sf_upload_app(path, show_progress=False): + """ + Copies a Service Fabric application package to the image store. + + + The cmdlet copies a Service Fabric application package to the image store. + After copying the application package, use the sf application provision + cmdlet to register the application type. + + Can optionally display upload progress for each file in the package. + Upload progress is sent to `stderr`. + + :param str path: The path to your local application package + :param bool show_progress: Show file upload progress + """ + abspath = os.path.abspath(path) + basename = os.path.basename(abspath) + endpoint = sf_get_connection_endpoint() + cert = sf_get_cert_info() + ca_cert = False + if cert is not None: + ca_cert = sf_get_ca_cert_info() + total_files_count = 0 + current_files_count = 0 + total_files_size = 0 + # For py2 we use dictionary instead of nonlocal + current_files_size = {"size": 0} + + for root, _, files in os.walk(abspath): + total_files_count += (len(files) + 1) + for file in files: + t = os.stat(os.path.join(root, file)) + total_files_size += t.st_size + + def print_progress(size, rel_file_path): + current_files_size["size"] += size + if show_progress: + print("[{}/{}] files, [{}/{}] bytes, {}".format( + current_files_count, + total_files_count, + current_files_size["size"], + total_files_size, + rel_file_path), file=sys.stderr) + + for root, _, files in os.walk(abspath): + rel_path = os.path.normpath(os.path.relpath(root, abspath)) + for file in files: + url_path = ( + os.path.normpath(os.path.join("ImageStore", basename, + rel_path, file)) + ).replace("\\", "/") + fp = os.path.normpath(os.path.join(root, file)) + with open(fp, 'rb') as file_opened: + url_parsed = list(urllib.parse.urlparse(endpoint)) + url_parsed[2] = url_path + url_parsed[4] = urllib.parse.urlencode( + {"api-version": "3.0-preview"}) + url = urllib.parse.urlunparse(url_parsed) + file_iter = FileIter(file_opened, os.path.normpath( + os.path.join(rel_path, file) + ), print_progress) + requests.put(url, data=file_iter, cert=cert, + verify=ca_cert) + current_files_count += 1 + print_progress(0, os.path.normpath( + os.path.join(rel_path, file) + )) + url_path = ( + os.path.normpath(os.path.join("ImageStore", basename, + rel_path, "_.dir")) + ).replace("\\", "/") + url_parsed = list(urllib.parse.urlparse(endpoint)) + url_parsed[2] = url_path + url_parsed[4] = urllib.parse.urlencode({"api-version": "3.0-preview"}) + url = urllib.parse.urlunparse(url_parsed) + requests.put(url, cert=cert, verify=ca_cert) + current_files_count += 1 + print_progress(0, os.path.normpath(os.path.join(rel_path, '_.dir'))) + + if show_progress: + print("[{}/{}] files, [{}/{}] bytes sent".format( + current_files_count, + total_files_count, + current_files_size["size"], + total_files_size), file=sys.stderr) + + +def sf_create_app(name, # pylint: disable=too-many-locals,too-many-arguments + app_type, version, parameters=None, min_node_count=0, + max_node_count=0, metrics=None, timeout=60): + """ + Creates a Service Fabric application using the specified description. + + :param str name: Application name + :param str app_type: Application type + :param str version: Application version + :param long min_node_count: The minimum number of nodes where Service + Fabric will reserve capacity for this application. Note that this does not + mean that the services of this application will be placed on all of those + nodes. + :param long max_node_count: The maximum number of nodes where Service + Fabric will reserve capacity for this application. Note that this does not + mean that the services of this application will be placed on all of those + nodes. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value for this + parameter is 60 seconds. + """ + from azure.servicefabric.models.application_description import ( + ApplicationDescription + ) + from azure.servicefabric.models.application_parameter import ( + ApplicationParameter + ) + from azure.servicefabric.models.application_capacity_description import ( + ApplicationCapacityDescription + ) + from azure.servicefabric.models.application_metric_description import ( + ApplicationMetricDescription + ) + from azure.cli.command_modules.sf._factory import cf_sf_client + + if min_node_count > max_node_count: + raise CLIError("Note, the minimum node reserve capacity count cannot " + "be more than the maximum node count") + + app_params = None + if parameters: + app_params = [] + for k in parameters: + # Create an application parameter for every of these + p = ApplicationParameter(k, parameters[k]) + app_params.append(p) + + # For simplicity, we assume user pass in valid key names in the list, or + # ignore the input + app_metrics = None + if metrics: + app_metrics = [] + for k in metrics: + metric = metrics[k] + metric_name = metric.get("name", None) + if metric_name is None: + raise CLIError("Could not decode required application metric " + "name") + metric_max_cap = metric.get("maximum_capacity", 0) + metric_reserve_cap = metric.get("reservation_capacity", 0) + metric_total_cap = metric.get("total_application_capacity", 0) + metric_desc = ApplicationMetricDescription(metric_name, + metric_max_cap, + metric_reserve_cap, + metric_total_cap) + app_metrics.append(metric_desc) + + app_cap_desc = ApplicationCapacityDescription(min_node_count, + max_node_count, + app_metrics) + + app_desc = ApplicationDescription(name, app_type, version, app_params, + app_cap_desc) + + sf_client = cf_sf_client(None) + sf_client.create_application(app_desc, timeout) + + +def sf_upgrade_app( # pylint: disable=too-many-arguments,too-many-locals + name, version, parameters, mode="UnmonitoredAuto", + replica_set_check_timeout=None, force_restart=None, + failure_action=None, health_check_wait_duration=None, + health_check_stable_duration=None, + health_check_retry_timeout=None, upgrade_timeout=None, + upgrade_domain_timeout=None, warning_as_error=False, + max_unhealthy_apps=0, default_service_health_policy=None, + service_health_policy=None, timeout=60): + """ + Starts upgrading an application in the Service Fabric cluster. + + Validates the supplied application upgrade parameters and starts upgrading + the application if the parameters are valid. + + :param str name: Application name. The name of the target application, + including the 'fabric' URI scheme. + :param str version: The target application type version (found in the + application manifest) for the application upgrade. + :param str mode: The mode used to monitor health during a rolling upgrade. + :param long replica_set_check_timeout: The maximum amount of time to block + processing of an upgrade domain and prevent loss of availability when + there are unexpected issues. Measured in seconds. + :param bool force_restart: Forcefully restart processes during upgrade even + when the code version has not changed. + :param str failure_action: The action to perform when a Monitored upgrade + encounters monitoring policy or health policy violations. + :param int health_check_wait_duration: The amount of time to wait after + completing an upgrade domain before applying health policies. Measured in + milliseconds. + :param int health_check_stable_duration: The amount of time that the + application or cluster must remain healthy before the upgrade proceeds + to the next upgrade domain. Measured in milliseconds. + :param int health_check_retry_timeout: The amount of time to retry health + evaluations when the application or cluster is unhealthy before the failure + action is executed. Measured in milliseconds. + :param int upgrade_timeout: The amount of time the overall upgrade has to + complete before FailureAction is executed. Measured in milliseconds. + :param int upgrade_domain_timeout: The amount of time each upgrade domain + has to complete before FailureAction is executed. Measured in milliseconds. + :param bool warning_as_error: Treat health evaluation warnings with the + same severity as errors. + :param int max_unhealthy_apps: The maximum allowed percentage of unhealthy + deployed applications. Represented as a number between 0 and 100. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value for this + parameter is 60 seconds. + """ + from azure.servicefabric.models.application_upgrade_description import ( + ApplicationUpgradeDescription + ) + from azure.servicefabric.models.application_parameter import ( + ApplicationParameter + ) + from azure.servicefabric.models.monitoring_policy_description import ( + MonitoringPolicyDescription + ) + from azure.servicefabric.models.application_health_policy import ( + ApplicationHealthPolicy + ) + from azure.servicefabric.models.service_type_health_policy import ( + ServiceTypeHealthPolicy + ) + # pylint: disable=line-too-long + from azure.servicefabric.models.service_type_health_policy_map_item import ( # noqa: justification, no way to shorten + ServiceTypeHealthPolicyMapItem + ) + from azure.cli.command_modules.sf._factory import cf_sf_client + + monitoring_policy = MonitoringPolicyDescription( + failure_action, health_check_wait_duration, + health_check_stable_duration, health_check_retry_timeout, + upgrade_timeout, upgrade_domain_timeout + ) + + app_params = None + if parameters: + app_params = [] + for k in parameters: + # Create an application parameter for every of these + p = ApplicationParameter(k, parameters[k]) + app_params.append(p) + + def_shp = None + if default_service_health_policy: + # Extract properties from dict using previously defined names + shp = default_service_health_policy.get( + "max_percent_unhealthy_partitions_per_service", 0 + ) + rhp = default_service_health_policy.get( + "max_percent_unhealthy_replicas_per_partition", 0 + ) + ushp = default_service_health_policy.get( + "max_percent_unhealthy_services", 0 + ) + def_shp = ServiceTypeHealthPolicy(shp, rhp, ushp) + + map_shp = None + if service_health_policy: + map_shp = [] + for st_desc in service_health_policy: + st_name = st_desc.get("Key", None) + if st_name is None: + raise CLIError("Could not find service type name in service " + "health policy map") + st_policy = st_desc.get("Value", None) + if st_policy is None: + raise CLIError("Could not find service type policy in service " + "health policy map") + st_shp = st_policy.get( + "max_percent_unhealthy_partitions_per_service", 0 + ) + st_rhp = st_policy.get( + "max_percent_unhealthy_replicas_per_partition", 0 + ) + st_ushp = st_policy.get( + "max_percent_unhealthy_services", 0 + ) + + std_policy = ServiceTypeHealthPolicy(st_shp, st_rhp, st_ushp) + std_list_item = ServiceTypeHealthPolicyMapItem(st_name, std_policy) + + map_shp.append(std_list_item) + + app_health_policy = ApplicationHealthPolicy(warning_as_error, + max_unhealthy_apps, def_shp, + map_shp) + + desc = ApplicationUpgradeDescription(name, version, app_params, "Rolling", + mode, replica_set_check_timeout, + force_restart, monitoring_policy, + app_health_policy) + + sf_client = cf_sf_client(None) + sf_client.start_application_upgrade(name, desc, timeout) + # TODO consider additional parameter validation here rather than allowing + # the gateway to reject it and return failure response + + +def sup_correlation_scheme(correlated_service, correlation): + from azure.servicefabric.models.service_correlation_description import ( + ServiceCorrelationDescription + ) + + if (any([correlated_service, correlation]) and + not all([correlated_service, correlation])): + raise CLIError("Must specify both a correlation service and " + "correlation scheme") + + return ServiceCorrelationDescription(correlation, correlated_service) + + +def sup_load_metrics(formatted_metrics): + from azure.servicefabric.models.service_load_metric_description import ( + ServiceLoadMetricDescription + ) + + r = None + if formatted_metrics: + r = [] + for l in formatted_metrics: + l_name = l.get("name", None) + if l_name is None: + raise CLIError("Could not find specified load metric name") + l_weight = l.get("weight", None) + l_primary = l.get("primary_default_load", None) + l_secondary = l.get("secondary_default_load", None) + l_default = l.get("default_load", None) + l_desc = ServiceLoadMetricDescription(l_name, l_weight, l_primary, + l_secondary, l_default) + r.append(l_desc) + + return r + + +def sup_placement_policies(formatted_placement_policies): + # pylint: disable=line-too-long + from azure.servicefabric.models.service_placement_non_partially_place_service_policy_description import ( # noqa: justification, no way to shorten + ServicePlacementNonPartiallyPlaceServicePolicyDescription + ) + # pylint: disable=line-too-long + from azure.servicefabric.models.service_placement_prefer_primary_domain_policy_description import ( # noqa: justification, no way to shorten + ServicePlacementPreferPrimaryDomainPolicyDescription + ) + # pylint: disable=line-too-long + from azure.servicefabric.models.service_placement_required_domain_policy_description import ( # noqa: justification, no way to shorten + ServicePlacementRequiredDomainPolicyDescription + ) + # pylint: disable=line-too-long + from azure.servicefabric.models.service_placement_require_domain_distribution_policy_description import ( # noqa: justification, no way to shorten + ServicePlacementRequireDomainDistributionPolicyDescription + ) + + if formatted_placement_policies: + r = [] + # Not entirely documented but similar to the property names + for p in formatted_placement_policies: + p_type = p.get("type", None) + if p_type is None: + raise CLIError( + "Could not determine type of specified placement policy" + ) + if p_type not in ["NonPartiallyPlaceService", + "PreferPrimaryDomain", "RequireDomain", + "RequireDomainDistribution"]: + raise CLIError("Invalid type of placement policy specified") + p_domain_name = p.get("domain_name", None) + if ( + p_domain_name is None and + p_type != "NonPartiallyPlaceService" + ): + raise CLIError( + "Placement policy type requires target domain name" + ) + if p_type == "NonPartiallyPlaceService": + r.append( + ServicePlacementNonPartiallyPlaceServicePolicyDescription() + ) + elif p_type == "PreferPrimaryDomain": + r.append( + ServicePlacementPreferPrimaryDomainPolicyDescription(p_domain_name) # noqa: justification, no way to shorten + ) + elif p_type == "RequireDomain": + r.append( + ServicePlacementRequiredDomainPolicyDescription(p_domain_name) # noqa: justification, no way to shorten + ) + elif p_type == "RequireDomainDistribution": + r.append( + ServicePlacementRequireDomainDistributionPolicyDescription(p_domain_name) # noqa: justification, no way to shorten + ) + return r + else: + return None + + +def sup_validate_move_cost(move_cost): + + if move_cost is not None: + if move_cost not in ["Zero", "Low", "Medium", "High"]: + raise CLIError("Invalid move cost specified") + + +def sup_stateful_flags(rep_restart_wait=None, quorum_loss_wait=None, + standby_replica_keep=None): + f = 0 + if rep_restart_wait is not None: + f += 1 + if quorum_loss_wait is not None: + f += 2 + if standby_replica_keep is not None: + f += 4 + return f + + +def sup_service_update_flags( # pylint: disable=too-many-arguments + target_rep_size=None, instance_count=None, rep_restart_wait=None, + quorum_loss_wait=None, standby_rep_keep=None, min_rep_size=None, + placement_constraints=None, placement_policy=None, correlation=None, + metrics=None, move_cost=None): + f = 0 + if (target_rep_size is not None) or (instance_count is not None): + f += 1 + if rep_restart_wait is not None: + f += 2 + if quorum_loss_wait is not None: + f += 4 + if standby_rep_keep is not None: + f += 8 + if min_rep_size is not None: + f += 16 + if placement_constraints is not None: + f += 32 + if placement_policy is not None: + f += 64 + if correlation is not None: + f += 128 + if metrics is not None: + f += 256 + if move_cost is not None: + f += 512 + return f + + +def sf_create_service( # pylint: disable=too-many-arguments, too-many-locals + app_id, name, service_type, stateful=False, stateless=False, + singleton_scheme=False, named_scheme=False, int_scheme=False, + named_scheme_list=None, int_scheme_low=None, int_scheme_high=None, + int_scheme_count=None, constraints=None, correlated_service=None, + correlation=None, load_metrics=None, placement_policy_list=None, + move_cost=None, activation_mode=None, dns_name=None, + target_replica_set_size=None, min_replica_set_size=None, + replica_restart_wait=None, quorum_loss_wait=None, + stand_by_replica_keep=None, no_persisted_state=False, + instance_count=None, timeout=60): + """ + Creates the specified Service Fabric service from the description. + + :param str app_id: The identity of the parent application. This is + typically the full id of the application without the 'fabric:' URI scheme. + :param str name: Name of the service. + :param bool stateless: Indicates the service is a stateless service. + :param bool stateful: Indicates the service is a stateful service. + :param str service_type: Name of the service type. + :param bool singleton_scheme: Indicates the service should have a single + partition or be a non-partitioned service. + :param bool named_scheme: Indicates the service should have multiple named + partitions. + :param list of str named_scheme_list: The list of names to partition the + service across, if using the named partition scheme. + :param bool int_scheme: Indicates the service should be uniformly + partitioned across a range of unsigned integers. + :param str int_scheme_low: The start of the key integer range, if using an + uniform integer partition scheme. + :param str int_scheme_high: The end of the key integer range, if using an + uniform integer partition scheme. + :param str int_scheme_count: The number of partitions inside the integer + key range to create, if using an uniform integer partition scheme. + :param str constraints: The placement constraints as a string. Placement + constraints are boolean expressions on node properties and allow for + restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType + is blue specify the following:"NodeColor == blue". + :param str correlation: Correlate the service with an existing service + using an alignment affinity. Possible values include: 'Invalid', + 'Affinity', 'AlignedAffinity', 'NonAlignedAffinity'. + :param str correlated_service: Name of the target service to correlate + with. + :param str move_cost: Specifies the move cost for the service. Possible + values are: 'Zero', 'Low', 'Medium', 'High'. + :param str activation_mode: The activation mode for the service package. + Possible values include: 'SharedProcess', 'ExclusiveProcess'. + :param str dns_name: The DNS name of the service to be created. The Service + Fabric DNS system service must be enabled for this setting. + :param int target_replica_set_size: The target replica set size as a + number. This applies to stateful services only. + :param int min_replica_set_size: The minimum replica set size as a number. + This applies to stateful services only. + :param int replica_restart_wait: The duration, in seconds, between when a + replica goes down and when a new replica is created. This applies to + stateful services only. + :param int quorum_loss_wait: The maximum duration, in seconds, for which a + partition is allowed to be in a state of quorum loss. This applies to + stateful services only. + :param int stand_by_replica_keep: The maximum duration, in seconds, for + which StandBy replicas will be maintained before being removed. This + applies to stateful services only. + :param bool no_persisted_state: If true, this indicates the service has no + persistent state stored on the local disk, or it only stores state in + memory. + :param int instance_count: The instance count. This applies to stateless + services only. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value for this + parameter is 60 seconds. + """ + from azure.servicefabric.models.stateless_service_description import ( + StatelessServiceDescription + ) + from azure.servicefabric.models.stateful_service_description import ( + StatefulServiceDescription + ) + from azure.servicefabric.models.named_partition_scheme_description import ( + NamedPartitionSchemeDescription + ) + # pylint: disable=line-too-long + from azure.servicefabric.models.singleton_partition_scheme_description import ( # noqa: justification, no way to shorten + SingletonPartitionSchemeDescription + ) + # pylint: disable=line-too-long + from azure.servicefabric.models.uniform_int64_range_partition_scheme_description import ( # noqa: justification, no way to shorten + UniformInt64RangePartitionSchemeDescription + ) + from azure.cli.command_modules.sf._factory import cf_sf_client + + if sum([singleton_scheme, named_scheme, int_scheme]) is not 1: + raise CLIError("Specify exactly one partition scheme") + + part_schema = None + if singleton_scheme: + # pylint: disable=redefined-variable-type + part_schema = SingletonPartitionSchemeDescription() + elif named_scheme: + if not named_scheme_list: + raise CLIError( + "When specifying named partition scheme, must include list " + "of names" + ) + # pylint: disable=redefined-variable-type + part_schema = NamedPartitionSchemeDescription(len(named_scheme_list), + named_scheme_list) + elif int_scheme: + if not all([int_scheme_low, int_scheme_high, int_scheme_count]): + raise CLIError( + "Must specify the full integer range and partition count when " + "using an uniform integer partition scheme" + ) + + # pylint: disable=redefined-variable-type + part_schema = UniformInt64RangePartitionSchemeDescription( + int_scheme_count, + int_scheme_low, + int_scheme_high + ) + + corre = sup_correlation_scheme(correlated_service, correlation) + load_list = sup_load_metrics(load_metrics) + place_policy = sup_placement_policies(placement_policy_list) + flags = sup_stateful_flags(replica_restart_wait, quorum_loss_wait, + stand_by_replica_keep) + + # API weirdness where we both have to specify a move cost, and a indicate + # the existence of a default move cost + move_cost_specified = None + if move_cost is not None: + sup_validate_move_cost(move_cost) + move_cost_specified = True + + if activation_mode not in [None, "SharedProcess", "ExclusiveProcess"]: + raise CLIError("Invalid activation mode specified") + + sd = None + if stateful: + if instance_count is not None: + CLIError("Cannot specify instance count for a stateful service") + sd = StatefulServiceDescription(name, service_type, part_schema, + target_replica_set_size, + min_replica_set_size, + not no_persisted_state, + app_id, None, constraints, + corre, load_list, place_policy, + move_cost, move_cost_specified, + activation_mode, dns_name, flags, + replica_restart_wait, quorum_loss_wait, + stand_by_replica_keep) + + if stateless: + if target_replica_set_size is not None: + CLIError( + "Cannot specify target replica set size for stateless service" + ) + if min_replica_set_size is not None: + CLIError( + "Cannot specify minimum replica set size for stateless service" + ) + if replica_restart_wait is not None: + CLIError( + "Cannot specify replica restart wait duration for stateless " + "service" + ) + if quorum_loss_wait is not None: + CLIError( + "Cannot specify quorum loss wait duration for stateless " + "service" + ) + if stand_by_replica_keep is not None: + CLIError( + "Cannot specify standby replica keep duration for stateless " + "service" + ) + # pylint: disable=redefined-variable-type + sd = StatelessServiceDescription(name, service_type, part_schema, + instance_count, app_id, None, + constraints, corre, load_list, + place_policy, move_cost, + move_cost_specified, activation_mode, + dns_name) + + sf_client = cf_sf_client(None) + sf_client.create_service(app_id, sd, timeout) + # TODO Improve parameter set usage display and also validation + # TODO Consider supporting initialization data for service create + + +def sf_update_service(service_id, # pylint: disable=too-many-arguments + stateless=False, stateful=False, + constraints=None, + correlation=None, correlated_service=None, + load_metrics=None, placement_policy_list=None, + move_cost=None, target_replica_set_size=None, + min_replica_set_size=None, replica_restart_wait=None, + quorum_loss_wait=None, stand_by_replica_keep=None, + instance_count=None, timeout=60): + """ + Updates the specified service using the given update description. + + :param str service_id: Target service to update. This is typically the full + id of the service without the 'fabric:' URI scheme. + :param bool stateless: Indicates the target service is a stateless service. + :param bool stateful: Indicates the target service is a stateful service. + :param str constraints: The placement constraints as a string. Placement + constraints are boolean expressions on node properties and allow for + restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following:"NodeColor == blue". + :param str correlation: Correlate the service with an existing service + using an alignment affinity. Possible values include: 'Invalid', + 'Affinity', 'AlignedAffinity', 'NonAlignedAffinity'. + :param str correlated_service: Name of the target service to correlate + with. + :param str move_cost: Specifies the move cost for the service. Possible + values are: 'Zero', 'Low', 'Medium', 'High'. + :param int target_replica_set_size: The target replica set size as a + number. This applies to stateful services only. + :param int min_replica_set_size: The minimum replica set size as a number. + This applies to stateful services only. + :param int replica_restart_wait: The duration, in seconds, between when a + replica goes down and when a new replica is created. This applies to + stateful services only. + :param int quorum_loss_wait: The maximum duration, in seconds, for which a + partition is allowed to be in a state of quorum loss. This applies to + stateful services only. + :param int stand_by_replica_keep: The maximum duration, in seconds, for + which StandBy replicas will be maintained before being removed. This + applies to stateful services only. + :param int instance_count: The instance count. This applies to stateless + services only. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value for this + parameter is 60 seconds. + """ + # TODO a few of these parameters are shared across commands, should be + # moved to not be bound to individual commands + # TODO Validation for replica numbers inputs + + # pylint: disable=line-too-long + from azure.servicefabric.models.stateful_service_update_description import ( # noqa: justification, no way to shorten + StatefulServiceUpdateDescription + ) + from azure.servicefabric.models.stateless_service_description import ( + StatelessServiceDescription + ) + from azure.cli.command_modules.sf._factory import cf_sf_client + + if sum([stateless, stateful]) != 1: + raise CLIError("Must specify either stateful or stateless, not both") + + corre = sup_correlation_scheme(correlated_service, correlation) + load_list = sup_load_metrics(load_metrics) + place_policy = sup_placement_policies(placement_policy_list) + + if move_cost is not None: + sup_validate_move_cost(move_cost) + + flags = sup_service_update_flags(target_replica_set_size, instance_count, + replica_restart_wait, quorum_loss_wait, + stand_by_replica_keep, + min_replica_set_size, constraints, + place_policy, corre, load_list, + move_cost) + + sud = None + if stateful: + if instance_count is not None: + CLIError("Cannot specify instance count for a stateful service") + + sud = StatefulServiceUpdateDescription(flags, constraints, corre, + load_list, place_policy, + move_cost, + target_replica_set_size, + min_replica_set_size, + replica_restart_wait, + quorum_loss_wait, + stand_by_replica_keep) + + if stateless: + if target_replica_set_size is not None: + CLIError( + "Cannot specify target replica set size for stateless service" + ) + if min_replica_set_size is not None: + CLIError( + "Cannot specify minimum replica set size for stateless service" + ) + if replica_restart_wait is not None: + CLIError( + "Cannot specify replica restart wait duration for stateless " + "service" + ) + if quorum_loss_wait is not None: + CLIError( + "Cannot specify quorum loss wait duration for stateless " + "service" + ) + if stand_by_replica_keep is not None: + CLIError( + "Cannot specify standby replica keep duration for stateless " + "service" + ) + # pylint: disable=redefined-variable-type + sud = StatelessServiceDescription(flags, constraints, corre, load_list, + place_policy, move_cost, + instance_count) + + sf_client = cf_sf_client(None) + sf_client.update_service(service_id, sud, timeout) + + +def sf_start_chaos( # pylint: disable=too-many-arguments + time_to_run="4294967295", max_cluster_stabilization=60, + max_concurrent_faults=1, disable_move_replica_faults=False, + wait_time_between_faults=20, + wait_time_between_iterations=30, warning_as_error=False, + max_percent_unhealthy_nodes=0, + max_percent_unhealthy_applications=0, + application_type_health_policy_map=None, timeout=60): + """ + If Chaos is not already running in the cluster, starts running Chaos with + the specified in Chaos parameters. + + :param str time_to_run: Total time (in seconds) for which Chaos will run + before automatically stopping. The maximum allowed value is 4,294,967,295 + (System.UInt32.MaxValue). + :param long max_cluster_stabilization: The maximum amount of time to wait + for all cluster entities to become stable and healthy. + :param long max_concurrent_faults: The maximum number of concurrent faults + induced per iteration. + :param bool disable_move_replica_faults: Disables the move primary and move + secondary faults. + :param long wait_time_between_faults: Wait time (in seconds) between + consecutive faults within a single iteration. + :param long wait_time_between_iterations: Time-separation (in seconds) + between two consecutive iterations of Chaos. + :param bool warning_as_error: When evaluating cluster health during + Chaos, treat warnings with the same severity as errors. + :param int max_percent_unhealthy_nodes: When evaluating cluster health + during Chaos, the maximum allowed percentage of unhealthy nodes before + reporting an error. + :param int max_percent_unhealthy_applications: When evaluating cluster + health during Chaos, the maximum allowed percentage of unhealthy + applications before reporting an error. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value for this + parameter is 60 seconds. + """ + # pylint: disable=line-too-long + from azure.servicefabric.models.application_type_health_policy_map_item import ( # noqa: justification, no way to shorten + ApplicationTypeHealthPolicyMapItem + ) + from azure.servicefabric.models.chaos_parameters import ChaosParameters + from azure.servicefabric.models.cluster_health_policy import ( + ClusterHealthPolicy + ) + from azure.cli.command_modules.sf._factory import cf_sf_client + + health_map = None + if application_type_health_policy_map: + health_map = [] + for m in application_type_health_policy_map: + name = m.get("key", None) + percent_unhealthy = m.get("value", None) + if name is None: + raise CLIError( + "Cannot find application type health policy map name" + ) + if percent_unhealthy is None: + raise CLIError( + "Cannot find application type health policy map unhealthy " + "percent" + ) + r = ApplicationTypeHealthPolicyMapItem(name, percent_unhealthy) + health_map.append(r) + + health_policy = ClusterHealthPolicy(warning_as_error, + max_percent_unhealthy_nodes, + max_percent_unhealthy_applications, + health_map) + + # Does not support Chaos Context currently + chaos_params = ChaosParameters(time_to_run, max_cluster_stabilization, + max_concurrent_faults, + not disable_move_replica_faults, + wait_time_between_faults, + wait_time_between_iterations, + health_policy, + None) + + sf_client = cf_sf_client(None) + sf_client.start_chaos(chaos_params, timeout) + + +def sf_report_app_health(application_id, # pylint: disable=too-many-arguments + source_id, health_property, + health_state, ttl=None, description=None, + sequence_number=None, remove_when_expired=None, + timeout=60): + """ + Sends a health report on the Service Fabric application. + + Reports health state of the specified Service Fabric application. The + report must contain the information about the source of the health report + and property on which it is reported. The report is sent to a Service + Fabric gateway Application, which forwards to the health store. The report + may be accepted by the gateway, but rejected by the health store after + extra validation. For example, the health store may reject the report + because of an invalid parameter, like a stale sequence number. To see + whether the report was applied in the health store, check that the report + appears in the HealthEvents section. + + :param str application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + :param str source_id: The source name which identifies the + client/watchdog/system component which generated the health information. + :param str health_property: The property of the health information. An + entity can have health reports for different properties. The property is a + string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. For example, a + reporter with SourceId "LocalWatchdog" can monitor the state of the + available disk on a node, so it can report "AvailableDisk" property on + that node. The same reporter can monitor the node connectivity, so it can + report a property "Connectivity" on the same node. In the health store, + these reports are treated as separate health events for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :param str health_state: Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' + :param int ttl: The duration, in milliseconds, for which this health report + is valid. When clients report periodically, they should send reports with + higher frequency than time to live. If not specified, time to live defaults + to infinite value. + :param str sequence_number: The sequence number for this health report as a + numeric string. The report sequence number is used by the health store to + detect stale reports. If not specified, a sequence number is auto-generated + by the health client when a report is added. + :param str description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of + the marker indicates to users that truncation occurred. Note that when + truncated, the description has less than 4096 characters from the original + string. + :param bool remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. If set to true, the report is + removed from the health store after it expires. If set to false, the report + is treated as an error when expired. The value of this property is false by + default. When clients report periodically, they should set this value to + false (default). This way, is the reporter has issues (eg. deadlock) and + can't report, the entity is evaluated at error when the health report + expires. This flags the entity as being in Error health state. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value + for this parameter is 60 seconds. + """ + + from azure.servicefabric.models.health_information import HealthInformation + from azure.cli.command_modules.sf._factory import cf_sf_client + + info = HealthInformation(source_id, health_property, health_state, ttl, + description, sequence_number, remove_when_expired) + + sf_client = cf_sf_client(None) + sf_client.report_application_health(application_id, info, timeout) + + +def sf_report_svc_health(service_id, # pylint: disable=too-many-arguments + source_id, health_property, health_state, + ttl=None, description=None, sequence_number=None, + remove_when_expired=None, timeout=60): + """ + Sends a health report on the Service Fabric service. + + Reports health state of the specified Service Fabric service. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Service, which forwards + to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetServiceHealth and check that the report appears in the + HealthEvents section. + + :param str service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + :param str source_id: The source name which identifies the + client/watchdog/system component which generated the health information. + :param str health_property: The property of the health information. An + entity can have health reports for different properties. The property is a + string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. For example, a + reporter with SourceId "LocalWatchdog" can monitor the state of the + available disk on a node, so it can report "AvailableDisk" property on + that node. The same reporter can monitor the node connectivity, so it can + report a property "Connectivity" on the same node. In the health store, + these reports are treated as separate health events for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :param str health_state: Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' + :param int ttl: The duration, in milliseconds, for which this health report + is valid. When clients report periodically, they should send reports with + higher frequency than time to live. If not specified, time to live defaults + to infinite value. + :param str description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of + the marker indicates to users that truncation occurred. Note that when + truncated, the description has less than 4096 characters from the original + string. + :param str sequence_number: The sequence number for this health report as a + numeric string. The report sequence number is used by the health store to + detect stale reports. If not specified, a sequence number is auto-generated + by the health client when a report is added. + :param bool remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. If set to true, the report is + removed from the health store after it expires. If set to false, the report + is treated as an error when expired. The value of this property is false by + default. When clients report periodically, they should set this value to + false (default). This way, is the reporter has issues (eg. deadlock) and + can't report, the entity is evaluated at error when the health report + expires. This flags the entity as being in Error health state. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value + for this parameter is 60 seconds. + """ + + # TODO Move common HealthInformation params to _params + + from azure.servicefabric.models.health_information import HealthInformation + from azure.cli.command_modules.sf._factory import cf_sf_client + + info = HealthInformation(source_id, health_property, health_state, ttl, + description, sequence_number, remove_when_expired) + + sf_client = cf_sf_client(None) + sf_client.report_service_health(service_id, info, timeout) + + +def sf_report_partition_health( # pylint: disable=too-many-arguments + partition_id, source_id, health_property, health_state, ttl=None, + description=None, sequence_number=None, remove_when_expired=None, + timeout=60): + """ + Sends a health report on the Service Fabric partition. + + Reports health state of the specified Service Fabric partition. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Partition, which + forwards to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetPartitionHealth and check that the report appears in the + HealthEvents section. + + :param str partition_id: The identity of the partition. + :param str source_id: The source name which identifies the + client/watchdog/system component which generated the health information. + :param str health_property: The property of the health information. An + entity can have health reports for different properties. The property is a + string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. For example, a + reporter with SourceId "LocalWatchdog" can monitor the state of the + available disk on a node, so it can report "AvailableDisk" property on + that node. The same reporter can monitor the node connectivity, so it can + report a property "Connectivity" on the same node. In the health store, + these reports are treated as separate health events for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :param str health_state: Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' + :param int ttl: The duration, in milliseconds, for which this health report + is valid. When clients report periodically, they should send reports with + higher frequency than time to live. If not specified, time to live defaults + to infinite value. + :param str description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of + the marker indicates to users that truncation occurred. Note that when + truncated, the description has less than 4096 characters from the original + string. + :param str sequence_number: The sequence number for this health report as a + numeric string. The report sequence number is used by the health store to + detect stale reports. If not specified, a sequence number is auto-generated + by the health client when a report is added. + :param bool remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. If set to true, the report is + removed from the health store after it expires. If set to false, the report + is treated as an error when expired. The value of this property is false by + default. When clients report periodically, they should set this value to + false (default). This way, is the reporter has issues (eg. deadlock) and + can't report, the entity is evaluated at error when the health report + expires. This flags the entity as being in Error health state. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value + for this parameter is 60 seconds. + """ + + # TODO Move common HealthInformation params to _params + + from azure.servicefabric.models.health_information import HealthInformation + from azure.cli.command_modules.sf._factory import cf_sf_client + + info = HealthInformation(source_id, health_property, health_state, ttl, + description, sequence_number, remove_when_expired) + + sf_client = cf_sf_client(None) + sf_client.report_partition_health(partition_id, info, timeout) + + +def sf_report_replica_health( # pylint: disable=too-many-arguments + partition_id, replica_id, source_id, health_state, health_property, + service_kind="Stateful", ttl=None, description=None, + sequence_number=None, remove_when_expired=None, timeout=60): + """ + Sends a health report on the Service Fabric replica. + + Reports health state of the specified Service Fabric replica. The + report must contain the information about the source of the health + report and property on which it is reported. + The report is sent to a Service Fabric gateway Replica, which forwards + to the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetReplicaHealth and check that the report appears in the + HealthEvents section. + + :param str partition_id: The identity of the partition. + :param str replica_id: The identifier of the replica. + :param str source_id: The source name which identifies the + client/watchdog/system component which generated the health information. + :param str health_property: The property of the health information. An + entity can have health reports for different properties. The property is a + string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. For example, a + reporter with SourceId "LocalWatchdog" can monitor the state of the + available disk on a node, so it can report "AvailableDisk" property on + that node. The same reporter can monitor the node connectivity, so it can + report a property "Connectivity" on the same node. In the health store, + these reports are treated as separate health events for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :param str health_state: Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' + :param str service_kind: The kind of service replica (Stateless or + Stateful) for which the health is being reported. Following are the + possible values. + - Stateless - Does not use Service Fabric to make its state highly + available or reliable. The value is 1 + - Stateful - Uses Service Fabric to make its state or part of its + state highly available and reliable. The value is 2. + :param int ttl: The duration, in milliseconds, for which this health report + is valid. When clients report periodically, they should send reports with + higher frequency than time to live. If not specified, time to live defaults + to infinite value. + :param str description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of + the marker indicates to users that truncation occurred. Note that when + truncated, the description has less than 4096 characters from the original + string. + :param str sequence_number: The sequence number for this health report as a + numeric string. The report sequence number is used by the health store to + detect stale reports. If not specified, a sequence number is auto-generated + by the health client when a report is added. + :param bool remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. If set to true, the report is + removed from the health store after it expires. If set to false, the report + is treated as an error when expired. The value of this property is false by + default. When clients report periodically, they should set this value to + false (default). This way, is the reporter has issues (eg. deadlock) and + can't report, the entity is evaluated at error when the health report + expires. This flags the entity as being in Error health state. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value + for this parameter is 60 seconds. + """ + + # TODO Move common HealthInformation params to _params + + from azure.servicefabric.models.health_information import HealthInformation + from azure.cli.command_modules.sf._factory import cf_sf_client + + info = HealthInformation(source_id, health_property, health_state, ttl, + description, sequence_number, remove_when_expired) + + sf_client = cf_sf_client(None) + sf_client.report_replica_health(partition_id, replica_id, info, + service_kind, timeout) + + +def sf_report_node_health(node_name, # pylint: disable=too-many-arguments + source_id, health_property, health_state, + ttl=None, description=None, sequence_number=None, + remove_when_expired=None, timeout=60): + """ + Sends a health report on the Service Fabric node. + + Reports health state of the specified Service Fabric node. The report + must contain the information about the source of the health report + and property on which it is reported. + The report is sent to a Service Fabric gateway node, which forwards to + the health store. + The report may be accepted by the gateway, but rejected by the health + store after extra validation. + For example, the health store may reject the report because of an + invalid parameter, like a stale sequence number. + To see whether the report was applied in the health store, run + GetNodeHealth and check that the report appears in the HealthEvents + section. + + :param str node_name: The name of the node. + :param str source_id: The source name which identifies the + client/watchdog/system component which generated the health information. + :param str health_property: The property of the health information. An + entity can have health reports for different properties. The property is a + string and not a fixed enumeration to allow the reporter flexibility to + categorize the state condition that triggers the report. For example, a + reporter with SourceId "LocalWatchdog" can monitor the state of the + available disk on a node, so it can report "AvailableDisk" property on + that node. The same reporter can monitor the node connectivity, so it can + report a property "Connectivity" on the same node. In the health store, + these reports are treated as separate health events for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :param str health_state: Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' + :param int ttl: The duration, in milliseconds, for which this health report + is valid. When clients report periodically, they should send reports with + higher frequency than time to live. If not specified, time to live defaults + to infinite value. + :param str description: The description of the health information. It + represents free text used to add human readable information about the + report. The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. The presence of + the marker indicates to users that truncation occurred. Note that when + truncated, the description has less than 4096 characters from the original + string. + :param str sequence_number: The sequence number for this health report as a + numeric string. The report sequence number is used by the health store to + detect stale reports. If not specified, a sequence number is auto-generated + by the health client when a report is added. + :param bool remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. If set to true, the report is + removed from the health store after it expires. If set to false, the report + is treated as an error when expired. The value of this property is false by + default. When clients report periodically, they should set this value to + false (default). This way, is the reporter has issues (eg. deadlock) and + can't report, the entity is evaluated at error when the health report + expires. This flags the entity as being in Error health state. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing to + wait for the requested operation to complete. The default value + for this parameter is 60 seconds. + """ + + # TODO Move common HealthInformation params to _params + + from azure.servicefabric.models.health_information import HealthInformation + from azure.cli.command_modules.sf._factory import cf_sf_client + + info = HealthInformation(source_id, health_property, health_state, ttl, + description, sequence_number, remove_when_expired) + + sf_client = cf_sf_client(None) + sf_client.report_node_health(node_name, info, timeout) + + +def sf_service_package_upload(node_name, # pylint: disable=too-many-arguments + service_manifest_name, + application_type_name, application_type_version, + share_policy=None, timeout=60): + """ + Downloads packages associated with specified service manifest to the image + cache on specified node. + + :param str node_name: The name of the node. + :param str service_manifest_name: The name of service manifest associated + with the packages that will be downloaded. + :param str application_type_name: The name of the application manifest for + the corresponding requested service manifest. + :param str application_type_version: The version of the application + manifest for the corresponding requested service manifest. + :param long timeout: The server timeout for performing the operation in + seconds. This specifies the time duration that the client is willing + to wait for the requested operation to complete. The default value + for this parameter is 60 seconds. + """ + # pylint: disable=line-too-long + from azure.servicefabric.models.deploy_service_package_to_node_description import ( # noqa: justification, no way to shorten + DeployServicePackageToNodeDescription + ) + from azure.servicefabric.models.package_sharing_policy_info import ( + PackageSharingPolicyInfo + ) + from azure.cli.command_modules.sf._factory import cf_sf_client + + list_psps = None + if share_policy: + list_psps = [] + for p in share_policy: + policy_name = p.get("name", None) + if policy_name is None: + raise CLIError("Could not find name of sharing policy element") + policy_scope = p.get("scope", None) + if policy_scope not in ["None", "All", "Code", "Config", "Data"]: + raise CLIError("Invalid policy scope specified") + list_psps.append(PackageSharingPolicyInfo(policy_name, + policy_scope)) + + desc = DeployServicePackageToNodeDescription(service_manifest_name, + application_type_name, + application_type_version, + node_name, list_psps) + sf_client = cf_sf_client(None) + sf_client.deployed_service_package_to_node(node_name, desc, timeout) diff --git a/src/command_modules/azure-cli-sf/azure_bdist_wheel.py b/src/command_modules/azure-cli-sf/azure_bdist_wheel.py new file mode 100644 index 00000000000..3ffa5ea50a9 --- /dev/null +++ b/src/command_modules/azure-cli-sf/azure_bdist_wheel.py @@ -0,0 +1,533 @@ +""" +"wheel" copyright (c) 2012-2017 Daniel Holth and +contributors. + +The MIT License + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +Create a Azure wheel (.whl) distribution (a wheel is a built archive format). + +This file is a copy of the official bdist_wheel file from wheel 0.30.0a0, enhanced +of the bottom with some Microsoft extension for Azure SDK for Python + +""" + +import csv +import hashlib +import os +import subprocess +import warnings +import shutil +import json +import sys + +try: + import sysconfig +except ImportError: # pragma nocover + # Python < 2.7 + import distutils.sysconfig as sysconfig + +import pkg_resources + +safe_name = pkg_resources.safe_name +safe_version = pkg_resources.safe_version + +from shutil import rmtree +from email.generator import Generator + +from distutils.core import Command +from distutils.sysconfig import get_python_version + +from distutils import log as logger + +from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform +from wheel.util import native, open_for_csv +from wheel.archive import archive_wheelfile +from wheel.pkginfo import read_pkg_info, write_pkg_info +from wheel.metadata import pkginfo_to_dict +from wheel import pep425tags, metadata +from wheel import __version__ as wheel_version + +def safer_name(name): + return safe_name(name).replace('-', '_') + +def safer_version(version): + return safe_version(version).replace('-', '_') + +class bdist_wheel(Command): + + description = 'create a wheel distribution' + + user_options = [('bdist-dir=', 'b', + "temporary directory for creating the distribution"), + ('plat-name=', 'p', + "platform name to embed in generated filenames " + "(default: %s)" % get_platform()), + ('keep-temp', 'k', + "keep the pseudo-installation tree around after " + + "creating the distribution archive"), + ('dist-dir=', 'd', + "directory to put final built distributions in"), + ('skip-build', None, + "skip rebuilding everything (for testing/debugging)"), + ('relative', None, + "build the archive using relative paths" + "(default: false)"), + ('owner=', 'u', + "Owner name used when creating a tar file" + " [default: current user]"), + ('group=', 'g', + "Group name used when creating a tar file" + " [default: current group]"), + ('universal', None, + "make a universal wheel" + " (default: false)"), + ('python-tag=', None, + "Python implementation compatibility tag" + " (default: py%s)" % get_impl_ver()[0]), + ] + + boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal'] + + def initialize_options(self): + self.bdist_dir = None + self.data_dir = None + self.plat_name = None + self.plat_tag = None + self.format = 'zip' + self.keep_temp = False + self.dist_dir = None + self.distinfo_dir = None + self.egginfo_dir = None + self.root_is_pure = None + self.skip_build = None + self.relative = False + self.owner = None + self.group = None + self.universal = False + self.python_tag = 'py' + get_impl_ver()[0] + self.plat_name_supplied = False + + def finalize_options(self): + if self.bdist_dir is None: + bdist_base = self.get_finalized_command('bdist').bdist_base + self.bdist_dir = os.path.join(bdist_base, 'wheel') + + self.data_dir = self.wheel_dist_name + '.data' + self.plat_name_supplied = self.plat_name is not None + + need_options = ('dist_dir', 'plat_name', 'skip_build') + + self.set_undefined_options('bdist', + *zip(need_options, need_options)) + + self.root_is_pure = not (self.distribution.has_ext_modules() + or self.distribution.has_c_libraries()) + + # Support legacy [wheel] section for setting universal + wheel = self.distribution.get_option_dict('wheel') + if 'universal' in wheel: + # please don't define this in your global configs + val = wheel['universal'][1].strip() + if val.lower() in ('1', 'true', 'yes'): + self.universal = True + + @property + def wheel_dist_name(self): + """Return distribution full name with - replaced with _""" + return '-'.join((safer_name(self.distribution.get_name()), + safer_version(self.distribution.get_version()))) + + def get_tag(self): + # bdist sets self.plat_name if unset, we should only use it for purepy + # wheels if the user supplied it. + if self.plat_name_supplied: + plat_name = self.plat_name + elif self.root_is_pure: + plat_name = 'any' + else: + plat_name = self.plat_name or get_platform() + if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647: + plat_name = 'linux_i686' + plat_name = plat_name.replace('-', '_').replace('.', '_') + + + if self.root_is_pure: + if self.universal: + impl = 'py2.py3' + else: + impl = self.python_tag + tag = (impl, 'none', plat_name) + else: + impl_name = get_abbr_impl() + impl_ver = get_impl_ver() + # PEP 3149 + abi_tag = str(get_abi_tag()).lower() + tag = (impl_name + impl_ver, abi_tag, plat_name) + supported_tags = pep425tags.get_supported( + supplied_platform=plat_name if self.plat_name_supplied else None) + # XXX switch to this alternate implementation for non-pure: + assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0]) + return tag + + def get_archive_basename(self): + """Return archive name without extension""" + + impl_tag, abi_tag, plat_tag = self.get_tag() + + archive_basename = "%s-%s-%s-%s" % ( + self.wheel_dist_name, + impl_tag, + abi_tag, + plat_tag) + return archive_basename + + def run(self): + build_scripts = self.reinitialize_command('build_scripts') + build_scripts.executable = 'python' + + if not self.skip_build: + self.run_command('build') + + install = self.reinitialize_command('install', + reinit_subcommands=True) + install.root = self.bdist_dir + install.compile = False + install.skip_build = self.skip_build + install.warn_dir = False + + # A wheel without setuptools scripts is more cross-platform. + # Use the (undocumented) `no_ep` option to setuptools' + # install_scripts command to avoid creating entry point scripts. + install_scripts = self.reinitialize_command('install_scripts') + install_scripts.no_ep = True + + # Use a custom scheme for the archive, because we have to decide + # at installation time which scheme to use. + for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'): + setattr(install, + 'install_' + key, + os.path.join(self.data_dir, key)) + + basedir_observed = '' + + if os.name == 'nt': + # win32 barfs if any of these are ''; could be '.'? + # (distutils.command.install:change_roots bug) + basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..')) + self.install_libbase = self.install_lib = basedir_observed + + setattr(install, + 'install_purelib' if self.root_is_pure else 'install_platlib', + basedir_observed) + + logger.info("installing to %s", self.bdist_dir) + + self.run_command('install') + + archive_basename = self.get_archive_basename() + + pseudoinstall_root = os.path.join(self.dist_dir, archive_basename) + if not self.relative: + archive_root = self.bdist_dir + else: + archive_root = os.path.join( + self.bdist_dir, + self._ensure_relative(install.install_base)) + + self.set_undefined_options( + 'install_egg_info', ('target', 'egginfo_dir')) + self.distinfo_dir = os.path.join(self.bdist_dir, + '%s.dist-info' % self.wheel_dist_name) + self.egg2dist(self.egginfo_dir, + self.distinfo_dir) + + self.write_wheelfile(self.distinfo_dir) + + self.write_record(self.bdist_dir, self.distinfo_dir) + + # Make the archive + if not os.path.exists(self.dist_dir): + os.makedirs(self.dist_dir) + wheel_name = archive_wheelfile(pseudoinstall_root, archive_root) + + # Sign the archive + if 'WHEEL_TOOL' in os.environ: + subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name]) + + # Add to 'Distribution.dist_files' so that the "upload" command works + getattr(self.distribution, 'dist_files', []).append( + ('bdist_wheel', get_python_version(), wheel_name)) + + if not self.keep_temp: + if self.dry_run: + logger.info('removing %s', self.bdist_dir) + else: + rmtree(self.bdist_dir) + + def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'): + from email.message import Message + msg = Message() + msg['Wheel-Version'] = '1.0' # of the spec + msg['Generator'] = generator + msg['Root-Is-Purelib'] = str(self.root_is_pure).lower() + + # Doesn't work for bdist_wininst + impl_tag, abi_tag, plat_tag = self.get_tag() + for impl in impl_tag.split('.'): + for abi in abi_tag.split('.'): + for plat in plat_tag.split('.'): + msg['Tag'] = '-'.join((impl, abi, plat)) + + wheelfile_path = os.path.join(wheelfile_base, 'WHEEL') + logger.info('creating %s', wheelfile_path) + with open(wheelfile_path, 'w') as f: + Generator(f, maxheaderlen=0).flatten(msg) + + def _ensure_relative(self, path): + # copied from dir_util, deleted + drive, path = os.path.splitdrive(path) + if path[0:1] == os.sep: + path = drive + path[1:] + return path + + def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path): + return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path) + + def license_file(self): + """Return license filename from a license-file key in setup.cfg, or None.""" + metadata = self.distribution.get_option_dict('metadata') + if not 'license_file' in metadata: + return None + return metadata['license_file'][1] + + def setupcfg_requirements(self): + """Generate requirements from setup.cfg as + ('Requires-Dist', 'requirement; qualifier') tuples. From a metadata + section in setup.cfg: + + [metadata] + provides-extra = extra1 + extra2 + requires-dist = requirement; qualifier + another; qualifier2 + unqualified + + Yields + + ('Provides-Extra', 'extra1'), + ('Provides-Extra', 'extra2'), + ('Requires-Dist', 'requirement; qualifier'), + ('Requires-Dist', 'another; qualifier2'), + ('Requires-Dist', 'unqualified') + """ + metadata = self.distribution.get_option_dict('metadata') + + # our .ini parser folds - to _ in key names: + for key, title in (('provides_extra', 'Provides-Extra'), + ('requires_dist', 'Requires-Dist')): + if not key in metadata: + continue + field = metadata[key] + for line in field[1].splitlines(): + line = line.strip() + if not line: + continue + yield (title, line) + + def add_requirements(self, metadata_path): + """Add additional requirements from setup.cfg to file metadata_path""" + additional = list(self.setupcfg_requirements()) + if not additional: return + pkg_info = read_pkg_info(metadata_path) + if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info: + warnings.warn('setup.cfg requirements overwrite values from setup.py') + del pkg_info['Provides-Extra'] + del pkg_info['Requires-Dist'] + for k, v in additional: + pkg_info[k] = v + write_pkg_info(metadata_path, pkg_info) + + def egg2dist(self, egginfo_path, distinfo_path): + """Convert an .egg-info directory into a .dist-info directory""" + def adios(p): + """Appropriately delete directory, file or link.""" + if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p): + shutil.rmtree(p) + elif os.path.exists(p): + os.unlink(p) + + adios(distinfo_path) + + if not os.path.exists(egginfo_path): + # There is no egg-info. This is probably because the egg-info + # file/directory is not named matching the distribution name used + # to name the archive file. Check for this case and report + # accordingly. + import glob + pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info') + possible = glob.glob(pat) + err = "Egg metadata expected at %s but not found" % (egginfo_path,) + if possible: + alt = os.path.basename(possible[0]) + err += " (%s found - possible misnamed archive file?)" % (alt,) + + raise ValueError(err) + + if os.path.isfile(egginfo_path): + # .egg-info is a single file + pkginfo_path = egginfo_path + pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path) + os.mkdir(distinfo_path) + else: + # .egg-info is a directory + pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO') + pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path) + + # ignore common egg metadata that is useless to wheel + shutil.copytree(egginfo_path, distinfo_path, + ignore=lambda x, y: set(('PKG-INFO', + 'requires.txt', + 'SOURCES.txt', + 'not-zip-safe',))) + + # delete dependency_links if it is only whitespace + dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt') + with open(dependency_links_path, 'r') as dependency_links_file: + dependency_links = dependency_links_file.read().strip() + if not dependency_links: + adios(dependency_links_path) + + write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info) + + # XXX deprecated. Still useful for current distribute/setuptools. + metadata_path = os.path.join(distinfo_path, 'METADATA') + self.add_requirements(metadata_path) + + # XXX intentionally a different path than the PEP. + metadata_json_path = os.path.join(distinfo_path, 'metadata.json') + pymeta = pkginfo_to_dict(metadata_path, + distribution=self.distribution) + + if 'description' in pymeta: + description_filename = 'DESCRIPTION.rst' + description_text = pymeta.pop('description') + description_path = os.path.join(distinfo_path, + description_filename) + with open(description_path, "wb") as description_file: + description_file.write(description_text.encode('utf-8')) + pymeta['extensions']['python.details']['document_names']['description'] = description_filename + + # XXX heuristically copy any LICENSE/LICENSE.txt? + license = self.license_file() + if license: + license_filename = 'LICENSE.txt' + shutil.copy(license, os.path.join(self.distinfo_dir, license_filename)) + pymeta['extensions']['python.details']['document_names']['license'] = license_filename + + with open(metadata_json_path, "w") as metadata_json: + json.dump(pymeta, metadata_json, sort_keys=True) + + adios(egginfo_path) + + def write_record(self, bdist_dir, distinfo_dir): + from wheel.util import urlsafe_b64encode + + record_path = os.path.join(distinfo_dir, 'RECORD') + record_relpath = os.path.relpath(record_path, bdist_dir) + + def walk(): + for dir, dirs, files in os.walk(bdist_dir): + dirs.sort() + for f in sorted(files): + yield os.path.join(dir, f) + + def skip(path): + """Wheel hashes every possible file.""" + return (path == record_relpath) + + with open_for_csv(record_path, 'w+') as record_file: + writer = csv.writer(record_file) + for path in walk(): + relpath = os.path.relpath(path, bdist_dir) + if skip(relpath): + hash = '' + size = '' + else: + with open(path, 'rb') as f: + data = f.read() + digest = hashlib.sha256(data).digest() + hash = 'sha256=' + native(urlsafe_b64encode(digest)) + size = len(data) + record_path = os.path.relpath( + path, bdist_dir).replace(os.path.sep, '/') + writer.writerow((record_path, hash, size)) + + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + +from distutils import log as logger +import os.path + +#from wheel.bdist_wheel import bdist_wheel +class azure_bdist_wheel(bdist_wheel): + + description = "Create an Azure wheel distribution" + + user_options = bdist_wheel.user_options + \ + [('azure-namespace-package=', None, + "Name of the deepest nspkg used")] + + def initialize_options(self): + bdist_wheel.initialize_options(self) + self.azure_namespace_package = None + + def finalize_options(self): + bdist_wheel.finalize_options(self) + if self.azure_namespace_package and not self.azure_namespace_package.endswith("-nspkg"): + raise ValueError("azure_namespace_package must finish by -nspkg") + + def run(self): + if not self.distribution.install_requires: + self.distribution.install_requires = [] + self.distribution.install_requires.append( + "{}>=2.0.0".format(self.azure_namespace_package.replace('_', '-'))) + bdist_wheel.run(self) + + def write_record(self, bdist_dir, distinfo_dir): + if self.azure_namespace_package: + # Split and remove last part, assuming it's "nspkg" + subparts = self.azure_namespace_package.split('-')[0:-1] + folder_with_init = [os.path.join(*subparts[0:i+1]) for i in range(len(subparts))] + for azure_sub_package in folder_with_init: + init_file = os.path.join(bdist_dir, azure_sub_package, '__init__.py') + if os.path.isfile(init_file): + logger.info("manually remove {} while building the wheel".format(init_file)) + os.remove(init_file) + else: + raise ValueError("Unable to find {}. Are you sure of your namespace package?".format(init_file)) + bdist_wheel.write_record(self, bdist_dir, distinfo_dir) +cmdclass = { + 'bdist_wheel': azure_bdist_wheel, +} diff --git a/src/command_modules/azure-cli-sf/setup.cfg b/src/command_modules/azure-cli-sf/setup.cfg new file mode 100644 index 00000000000..3326c62a76e --- /dev/null +++ b/src/command_modules/azure-cli-sf/setup.cfg @@ -0,0 +1,3 @@ +[bdist_wheel] +universal=1 +azure-namespace-package=azure-cli-command_modules-nspkg diff --git a/src/command_modules/azure-cli-sf/setup.py b/src/command_modules/azure-cli-sf/setup.py new file mode 100644 index 00000000000..4527e405927 --- /dev/null +++ b/src/command_modules/azure-cli-sf/setup.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +from codecs import open +from setuptools import setup +try: + from azure_bdist_wheel import cmdclass +except ImportError: + from distutils import log as logger + logger.warn("Wheel is not available, disabling bdist_wheel hook") + cmdclass = {} + +VERSION = "1.0.0+dev" + +# The full list of classifiers is available at +# https://pypi.python.org/pypi?%3Aaction=list_classifiers +CLASSIFIERS = [ + 'Development Status :: 4 - Beta', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'Programming Language :: Python', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'License :: OSI Approved :: MIT License', +] + +DEPENDENCIES = [ + 'azure-servicefabric==5.6.130', + 'azure-cli-core', + 'adal==0.4.3' +] + +with open('README.rst', 'r', encoding='utf-8') as f: + README = f.read() +with open('HISTORY.rst', 'r', encoding='utf-8') as f: + HISTORY = f.read() + +setup( + name='azure-cli-sf', + version=VERSION, + description='Microsoft Azure Service Fabric Client Command-Line Tools', + long_description=README + '\n\n' + HISTORY, + license='MIT', + author='Microsoft Corporation', + author_email='azpycli@microsoft.com', + url='https://github.com/Azure/azure-cli', + classifiers=CLASSIFIERS, + packages=[ + 'azure', + 'azure.cli', + 'azure.cli.command_modules', + 'azure.cli.command_modules.sf' + ], + install_requires=DEPENDENCIES, + cmdclass=cmdclass +) diff --git a/src/command_modules/azure-cli-sf/tests/__init__.py b/src/command_modules/azure-cli-sf/tests/__init__.py new file mode 100644 index 00000000000..34913fb394d --- /dev/null +++ b/src/command_modules/azure-cli-sf/tests/__init__.py @@ -0,0 +1,4 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- diff --git a/src/command_modules/azure-cli-sf/tests/manual_scenario_sf_commands.py b/src/command_modules/azure-cli-sf/tests/manual_scenario_sf_commands.py new file mode 100644 index 00000000000..816930e4d55 --- /dev/null +++ b/src/command_modules/azure-cli-sf/tests/manual_scenario_sf_commands.py @@ -0,0 +1,173 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +import os + +from azure.cli.testsdk import ( + ScenarioTest, JMESPathCheck, JMESPathCheckExists, NoneCheck +) +from azure.cli.testsdk.base import execute +from azure.cli.testsdk.preparers import AbstractPreparer +import azure.cli.core.azlogging as azlogging + +logger = azlogging.get_az_logger(__name__) + + +class SelectNoSecClusterPreparer(AbstractPreparer): + def __init__(self, parameter_name="endpoint", + endpoint="http://127.0.0.1:10550", + env_variable_name="AZURE_CLI_SF_ENDPOINT"): + # Name randomization unnecessary + super(SelectNoSecClusterPreparer, self).__init__("test", 10) + self.endpoint = endpoint + self.parameter_name = parameter_name + self.env_variable_name = env_variable_name + + def create_resource(self, _, **kwargs): + # Omit name here since there is no randomization required + endpoint = os.environ.get(self.env_variable_name, self.endpoint) + logger.debug("endpoint %s", endpoint) + template = "az sf cluster select --endpoint {}" + execute(template.format(endpoint)) + return {self.parameter_name: endpoint} + + +class ServiceFabricTests(ScenarioTest): + + # Application tests + + @SelectNoSecClusterPreparer() + def sf_test_good_system_app_health(self): + self.cmd( + "az sf application health --application-id \"System\"", + checks=[ + JMESPathCheck("name", "fabric:/System"), + JMESPathCheckExists("aggregatedHealthState") + ] + ) + + @SelectNoSecClusterPreparer() + def sf_test_good_none_app_type(self): + self.cmd("az sf application type", checks=[NoneCheck()]) + + @SelectNoSecClusterPreparer() + def sf_test_good_none_app_list(self): + self.cmd("az sf application list", checks=[ + JMESPathCheck("items", []) + ]) + + # Service tests + + @SelectNoSecClusterPreparer() + def sf_test_good_system_service_list(self): + self.cmd("az sf service list --application-id \"System\"", checks=[ + JMESPathCheck( + "items[? id == `\"System/ClusterManagerService\"`].name | [0]", + "fabric:/System/ClusterManagerService"), + ]) + + @SelectNoSecClusterPreparer() + def sf_test_good_system_service_app_name(self): + self.cmd("az sf service application-name --service-id " + "System/ClusterManagerService", checks=[ + JMESPathCheck("id", "System"), + JMESPathCheck("name", "fabric:/System")]) + + @SelectNoSecClusterPreparer() + def sf_test_good_system_service_health(self): + self.cmd("az sf service health --service-id " + "System/ClusterManagerService", checks=[ + JMESPathCheck("name", + "fabric:/System/ClusterManagerService"), + JMESPathCheckExists("partitionHealthStates"), + JMESPathCheckExists("healthEvents")]) + + @SelectNoSecClusterPreparer() + def sf_test_good_resolve_system_service(self): + self.cmd("az sf service resolve --service-id " + "System/FailoverManagerService") + + # Partition tests + + @SelectNoSecClusterPreparer() + def sf_test_good_system_partition_info(self): + self.cmd( + "az sf partition info --partition-id " + "00000000-0000-0000-0000-000000000001", + checks=[ + JMESPathCheckExists("ServiceKind"), + JMESPathCheck("partitionInformation.id", + "00000000-0000-0000-0000-000000000001") + ] + ) + + @SelectNoSecClusterPreparer() + def sf_test_good_system_partition_service_name(self): + self.cmd( + "az sf partition service-name --partition-id " + "00000000-0000-0000-0000-000000000001", + checks=[ + JMESPathCheck("name", + "fabric:/System/FailoverManagerService"), + JMESPathCheck("id", "System/FailoverManagerService") + ] + ) + + @SelectNoSecClusterPreparer() + def sf_test_good_system_partition_health(self): + self.cmd( + "az sf partition health --partition-id " + "00000000-0000-0000-0000-000000000001", + checks=[ + JMESPathCheck("partitionId", + "00000000-0000-0000-0000-000000000001"), + JMESPathCheckExists("replicaHealthStates"), + JMESPathCheckExists("healthEvents") + ] + ) + + # Node tests + + @SelectNoSecClusterPreparer() + def sf_test_good_node_list(self): + self.cmd("az sf node list", checks=[ + JMESPathCheckExists("items[0].id.id"), + JMESPathCheckExists("items[0].name") + ]) + + # Cluster tests + + @SelectNoSecClusterPreparer() + def sf_test_good_cluster_manifest(self): + self.cmd("az sf cluster manifest", checks=[ + JMESPathCheckExists("manifest") + ]) + + @SelectNoSecClusterPreparer() + def sf_test_good_cluster_code_version(self): + self.cmd("az sf cluster code-version", checks=[ + JMESPathCheckExists("[0].codeVersion") + ]) + + @SelectNoSecClusterPreparer() + def sf_test_good_cluster_config_version(self): + self.cmd("az sf cluster config-version", checks=[ + JMESPathCheckExists("[0].configVersion") + ]) + + @SelectNoSecClusterPreparer() + def sf_test_good_cluster_health(self): + self.cmd("az sf cluster health", checks=[ + JMESPathCheckExists("aggregatedHealthState"), + JMESPathCheck("applicationHealthStates[0].name", "fabric:/System"), + JMESPathCheckExists("nodeHealthStates") + ]) + + # Compose tests + + @SelectNoSecClusterPreparer() + def sf_test_good_none_compose_list(self): + self.cmd("az sf compose list", checks=[NoneCheck()]) diff --git a/src/command_modules/azure-cli-sf/tests/manual_sf_commands.py b/src/command_modules/azure-cli-sf/tests/manual_sf_commands.py new file mode 100644 index 00000000000..66fe51b49a0 --- /dev/null +++ b/src/command_modules/azure-cli-sf/tests/manual_sf_commands.py @@ -0,0 +1,113 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# ----------------------------------------------------------------------------- + +import os + +from azure.cli.testsdk import ( + ScenarioTest, JMESPathCheck, JMESPathCheckExists +) +from azure.cli.testsdk.base import execute +from azure.cli.testsdk.preparers import AbstractPreparer +import azure.cli.core.azlogging as azlogging + +logger = azlogging.get_az_logger(__name__) + + +class SelectNoSecClusterPreparer(AbstractPreparer): + def __init__(self, parameter_name="endpoint", + endpoint="http://127.0.0.1:10550", + env_variable_name="AZURE_CLI_SF_ENDPOINT"): + # Name randomization unnecessary + super(SelectNoSecClusterPreparer, self).__init__("test", 10) + self.endpoint = endpoint + self.parameter_name = parameter_name + self.env_variable_name = env_variable_name + + def create_resource(self, _, **kwargs): + # Omit name here since there is no randomization required + endpoint = os.environ.get(self.env_variable_name, self.endpoint) + logger.debug("endpoint %s", endpoint) + template = "az sf cluster select --endpoint {}" + execute(template.format(endpoint)) + return {self.parameter_name: endpoint} + + +class ServiceFabricTests(ScenarioTest): + + package_path = "/media/share/EchoServerApplication3" + package_name = "EchoServerApplication3" + application_type_name = "EchoServerApp" + application_type_version = "3.0" + application_name = "fabric:/app1" + application_id = "app1" + + # Application tests + + @SelectNoSecClusterPreparer() + def sf_test_application_lifecycle(self): + self.cmd("az sf application upload --path {}".format( + self.package_path + )) + + self.cmd( + "az sf application provision " + "--application-type-build-path {}".format( + self.package_name + ) + ) + + self.cmd( + "az sf application type", + checks=[ + JMESPathCheck( + "items[0].name", + self.application_type_name + ), + JMESPathCheck( + "items[0].version", + self.application_type_version + ) + ] + ) + + self.cmd( + "az sf application create " + "--app-type {} --version {} --name {}".format( + self.application_type_name, + self.application_type_version, + self.application_name + ) + ) + + self.cmd( + "az sf application list", + checks=[ + JMESPathCheck("items[0].id", self.application_id) + ] + ) + + self.cmd( + "az sf application health " + "--application-id {}".format(self.application_id), + checks=[ + JMESPathCheck("name", self.application_name), + JMESPathCheckExists("aggregatedHealthState") + ] + ) + + self.cmd( + "az sf application delete --application-id {}".format( + self.application_id + ) + ) + + self.cmd( + "az sf application unprovision " + "--application-type-name {} " + "--application-type-version {}".format( + self.application_type_name, self.application_type_version + ) + )