From 883e96db8e45b56823aa312b63263c899eae34e4 Mon Sep 17 00:00:00 2001 From: Travis Prescott Date: Mon, 18 Dec 2017 10:36:07 -0800 Subject: [PATCH] [BatchAI] BatchAI module Knack conversion (#5120) * BatchAi work * BatchAI module conversion. * Fix rebase issues with Backup. --- .flake8 | 2 - scripts/ci/build.sh | 2 +- scripts/ci/test_static.sh | 4 +- .../cli/command_modules/backup/_params.py | 4 +- .../cli/command_modules/backup/custom.py | 16 +- .../cli/command_modules/batchai/__init__.py | 26 +- .../batchai/_client_factory.py | 16 +- .../cli/command_modules/batchai/_params.py | 285 ++++++------------ .../cli/command_modules/batchai/commands.py | 65 ++-- .../cli/command_modules/batchai/custom.py | 69 +++-- .../batchai/tests/test_batchai_custom.py | 86 +++--- .../test_mysql_proxy_resources_mgmt.yaml | 56 ++++ .../test_postgres_proxy_resources_mgmt.yaml | 56 ++++ 13 files changed, 374 insertions(+), 313 deletions(-) create mode 100644 src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_mysql_proxy_resources_mgmt.yaml create mode 100644 src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_postgres_proxy_resources_mgmt.yaml diff --git a/.flake8 b/.flake8 index 10e6fe8740b..b9e80c7a24f 100644 --- a/.flake8 +++ b/.flake8 @@ -14,7 +14,5 @@ exclude = scripts doc build_scripts - src/command_modules/azure-cli-backup - src/command_modules/azure-cli-batchai src/command_modules/azure-cli-cosmosdb src/command_modules/azure-cli-monitor diff --git a/scripts/ci/build.sh b/scripts/ci/build.sh index 8f350cb9957..11d6025ef7a 100755 --- a/scripts/ci/build.sh +++ b/scripts/ci/build.sh @@ -107,7 +107,7 @@ EOL for name in $(ls src/command_modules | grep azure-cli-); do if [ "$name" == "azure-cli-appservice" ]; then continue; fi if [ "$name" == "azure-cli-backup" ]; then continue; fi - if [ "$name" == "azure-cli-batchai" ]; then continue; fi + if [ "$name" == "azure-cli-batchai" ]; then continue; fi if [ "$name" == "azure-cli-cosmosdb" ]; then continue; fi if [ "$name" == "azure-cli-dla" ]; then continue; fi if [ "$name" == "azure-cli-dls" ]; then continue; fi diff --git a/scripts/ci/test_static.sh b/scripts/ci/test_static.sh index f20a585e3e3..470799de992 100755 --- a/scripts/ci/test_static.sh +++ b/scripts/ci/test_static.sh @@ -38,9 +38,9 @@ run_style azure.cli.command_modules.acr run_style azure.cli.command_modules.acs run_style azure.cli.command_modules.advisor #run_style azure.cli.command_modules.appservice -#run_style azure.cli.command_modules.backup +run_style azure.cli.command_modules.backup run_style azure.cli.command_modules.batch -#run_style azure.cli.command_modules.batchai +run_style azure.cli.command_modules.batchai run_style azure.cli.command_modules.billing run_style azure.cli.command_modules.cdn run_style azure.cli.command_modules.cloud diff --git a/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/_params.py b/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/_params.py index 18f1467955b..8356e55d5be 100644 --- a/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/_params.py +++ b/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/_params.py @@ -6,13 +6,15 @@ # pylint: disable=line-too-long from argcomplete.completers import FilesCompleter + +from knack.arguments import CLIArgumentType + from azure.cli.core.commands.parameters import \ (get_resource_name_completion_list, file_type, get_location_type, get_three_state_flag, get_enum_type) from azure.cli.command_modules.backup._validators import \ (datetime_type) -from knack.arguments import CLIArgumentType # ARGUMENT DEFINITIONS diff --git a/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/custom.py b/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/custom.py index 2017838f8d9..cff593ed32c 100644 --- a/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/custom.py +++ b/src/command_modules/azure-cli-backup/azure/cli/command_modules/backup/custom.py @@ -9,7 +9,11 @@ import os from datetime import datetime, timedelta from six.moves.urllib.parse import urlparse # pylint: disable=import-error + +from knack.log import get_logger + from msrest.paging import Paged +from msrestazure.tools import parse_resource_id, is_valid_resource_id from azure.mgmt.recoveryservices.models import Vault, VaultProperties, Sku, SkuName, BackupStorageConfig from azure.mgmt.recoveryservicesbackup.models import ProtectedItemResource, AzureIaaSComputeVMProtectedItem, \ @@ -23,10 +27,6 @@ backup_protectable_items_cf, resources_cf, backup_operation_statuses_cf, job_details_cf, \ protection_container_refresh_operation_results_cf, backup_protection_containers_cf -from knack.log import get_logger - -from msrestazure.tools import parse_resource_id, is_valid_resource_id - logger = get_logger(__name__) fabric_name = "Azure" @@ -455,6 +455,7 @@ def _get_storage_account_id(cli_ctx, storage_account_name, storage_account_rg): return storage_account.id +# pylint: disable=inconsistent-return-statements def _get_disable_protection_request(item): if item.properties.workload_type == WorkloadType.vm.value: vm_item_properties = _get_vm_item_properties_from_vm_id(item.properties.virtual_machine_id) @@ -465,6 +466,7 @@ def _get_disable_protection_request(item): return vm_item +# pylint: disable=inconsistent-return-statements def _get_vm_item_properties_from_vm_type(vm_type): if vm_type == 'Microsoft.Compute/virtualMachines': return AzureIaaSComputeVMProtectedItem() @@ -472,6 +474,7 @@ def _get_vm_item_properties_from_vm_type(vm_type): return AzureIaaSClassicComputeVMProtectedItem() +# pylint: disable=inconsistent-return-statements def _get_vm_item_properties_from_vm_id(vm_id): if 'Microsoft.Compute/virtualMachines' in vm_id: return AzureIaaSComputeVMProtectedItem() @@ -572,9 +575,9 @@ def _get_resource_name_and_rg(resource_group_name, name_or_id): resource_group = resource_group_name return name, resource_group -# Tracking Utilities - +# Tracking Utilities +# pylint: disable=inconsistent-return-statements def _track_backup_ilr(cli_ctx, result, vault_name, resource_group): operation_status = _track_backup_operation(cli_ctx, resource_group, result, vault_name) @@ -583,6 +586,7 @@ def _track_backup_ilr(cli_ctx, result, vault_name, resource_group): return recovery_target.client_scripts +# pylint: disable=inconsistent-return-statements def _track_backup_job(cli_ctx, result, vault_name, resource_group): job_details_client = job_details_cf(None) diff --git a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/__init__.py b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/__init__.py index 319344eaf03..7b8951b4393 100644 --- a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/__init__.py +++ b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/__init__.py @@ -3,12 +3,28 @@ # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -import azure.cli.command_modules.batchai._help # pylint: disable=unused-import +from azure.cli.core import AzCommandsLoader +from azure.cli.command_modules.batchai._help import helps # pylint: disable=unused-import -def load_params(_): - import azure.cli.command_modules.batchai._params # pylint: disable=redefined-outer-name, unused-variable +class BatchAiCommandsLoader(AzCommandsLoader): -def load_commands(): - import azure.cli.command_modules.batchai.commands # pylint: disable=redefined-outer-name, unused-variable + def __init__(self, cli_ctx=None): + from azure.cli.core.commands import CliCommandType + batchai_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.batchai.custom#{}') + super(BatchAiCommandsLoader, self).__init__(cli_ctx=cli_ctx, + min_profile='2017-03-10-profile', + custom_command_type=batchai_custom) + + def load_command_table(self, args): + from azure.cli.command_modules.batchai.commands import load_command_table + load_command_table(self, args) + return self.command_table + + def load_arguments(self, command): + from azure.cli.command_modules.batchai._params import load_arguments + load_arguments(self, command) + + +COMMAND_LOADER_CLS = BatchAiCommandsLoader diff --git a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_client_factory.py b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_client_factory.py index 1352aeeac4d..26d30adde64 100644 --- a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_client_factory.py +++ b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_client_factory.py @@ -4,19 +4,19 @@ # -------------------------------------------------------------------------------------------- -def batchai_client_factory(_=None): +def batchai_client_factory(cli_ctx, _=None): from azure.mgmt.batchai import BatchAIManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client - return get_mgmt_service_client(BatchAIManagementClient) + return get_mgmt_service_client(cli_ctx, BatchAIManagementClient) -def cluster_client_factory(_): - return batchai_client_factory().clusters +def cluster_client_factory(cli_ctx, _): + return batchai_client_factory(cli_ctx).clusters -def job_client_factory(_): - return batchai_client_factory().jobs +def job_client_factory(cli_ctx, _): + return batchai_client_factory(cli_ctx).jobs -def file_server_client_factory(_): - return batchai_client_factory().file_servers +def file_server_client_factory(cli_ctx, _): + return batchai_client_factory(cli_ctx).file_servers diff --git a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_params.py b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_params.py index 928cebb18a3..fe4bd0aa263 100644 --- a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_params.py +++ b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/_params.py @@ -6,17 +6,18 @@ from azure.cli.command_modules.vm._actions import get_vm_sizes from azure.cli.core.commands.parameters import ( - ignore_type, location_type, resource_group_name_type, enum_choice_list, get_one_of_subscription_locations) -from azure.cli.core.commands import ParametersContext + get_enum_type, get_one_of_subscription_locations, resource_group_name_type) +from azure.cli.core.decorators import Completer from azure.mgmt.storage.models import SkuName -def get_vm_size_completion_list(prefix, action, parsed_args, **kwargs): # pylint: disable=unused-argument +@Completer +def get_vm_size_completion_list(cmd, prefix, namespace, **kwargs): # pylint: disable=unused-argument try: - location = parsed_args.location + location = namespace.location except AttributeError: - location = get_one_of_subscription_locations() - result = get_vm_sizes(location) + location = get_one_of_subscription_locations(cmd.cli_ctx) + result = get_vm_sizes(cmd.cli_ctx, location) return [r.name for r in result] @@ -25,186 +26,92 @@ class SupportedImages(Enum): # pylint: disable=too-few-public-methods ubuntu_dsvm = "UbuntuDSVM" -with ParametersContext(command='batchai cluster create') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.argument('location', options_list=('--location', '-l'), arg_type=location_type, - help='Location. You can configure the default location using `az configure --defaults ' - 'location=` or specify it in the cluster configuration file.') - c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.') - c.argument('user_name', options_list=('--user-name', '-u'), - help='Name of the admin user to be created on every compute node.', arg_group='Admin Account') - c.argument('ssh_key', options_list=('--ssh-key', '-k'), - help='SSH public key value or path.', arg_group='Admin Account') - c.argument('password', options_list=('--password', '-p'), - help='Password.', arg_group='Admin Account') - c.argument('image', options_list=('--image', '-i'), arg_group='Nodes', - help='Operation system.', **enum_choice_list(SupportedImages)) - c.argument('vm_size', options_list=('--vm-size', '-s'), - help='VM size (e.g. Standard_NC6 for 1 GPU node)', completer=get_vm_size_completion_list, - arg_group='Nodes') - c.argument('min_nodes', options_list=('--min',), - help='Min nodes count.', type=int, arg_group='Nodes') - c.argument('max_nodes', options_list=('--max',), - help='Max nodes count.', type=int, arg_group='Nodes') - c.argument('nfs_name', options_list=('--nfs',), - help='Name of a file server to mount. If you need to mount more than one file server, configure them in ' - 'a configuration file and use --config option.', - arg_group='File Server Mount') - c.argument('nfs_resource_group', options_list=('--nfs-resource-group',), - help='Resource group in which file server is created. Can be omitted if the file server and the cluster ' - 'belong to the same resource group', - arg_group='File Server Mount') - c.argument('nfs_mount_path', options_list=('--nfs-mount-path',), - help='Relative mount path for nfs. The nfs will be available at ' - '$AZ_BATCHAI_MOUNT_ROOT/ folder.', - arg_group='File Server Mount') - c.argument('account_name', options_list=('--storage-account-name',), - help='Storage account name for Azure File Shares and/or Azure Storage Containers mounting. Related ' - 'environment variable: AZURE_BATCHAI_STORAGE_ACCOUNT. Must be used in conjunction with ' - '--storage-account-key. If the key is not provided, the command will try to query the storage ' - 'account key using the authenticated Azure account.', - arg_group='Storage Account') - c.argument('account_key', options_list=('--storage-account-key',), - help='Storage account key. Must be used in conjunction with --storage-account-name. Environment ' - 'variable: AZURE_BATCHAI_STORAGE_KEY.', - arg_group='Storage Account') - c.argument('azure_file_share', options_list=('--afs-name',), - help='Name of the azure file share to mount. Must be used in conjunction with --storage-account-name ' - 'and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and ' - 'AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure File ' - 'share, configure them in a configuration file and use --config option.', - arg_group='Azure File Share Mount') - c.argument('afs_mount_path', options_list=('--afs-mount-path',), - help='Relative mount path for Azure File share. The file share will be available at ' - '$AZ_BATCHAI_MOUNT_ROOT/ folder.', - arg_group='Azure File Share Mount') - c.argument('container_name', options_list=('--container-name',), - help='Name of Azure Storage container to mount. Must be used in conjunction with --storage-account-name ' - 'and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and ' - 'AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure Storage ' - 'container, configure them in a configuration file and use --config option.', - arg_group='Azure Storage Container Mount') - c.argument('container_mount_path', options_list=('--container-mount-path',), - help='Relative mount path for Azure Storage container. The container will be available at ' - '$AZ_BATCHAI_MOUNT_ROOT/ folder.', - arg_group='Azure Storage Container Mount') - c.argument('json_file', options_list=('--config', '-c'), - help='A path to a json file containing cluster create parameters ' - '(json representation of azure.mgmt.batchai.models.ClusterCreateParameters).', - arg_group='Advanced') - -with ParametersContext(command='batchai cluster resize') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.') - c.argument('target', options_list=('--target', '-t'), help='Target number of compute nodes.') - -with ParametersContext(command='batchai cluster auto-scale') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.') - c.argument('min_nodes', options_list=('--min',), help='Minimum number of nodes.') - c.argument('max_nodes', options_list=('--max',), help='Maximum number of nodes.') - -with ParametersContext(command='batchai cluster delete') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.') - -with ParametersContext(command='batchai cluster show') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.') - -with ParametersContext(command='batchai cluster list') as c: - c.argument('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - # Not implemented yet - c.register_alias('clusters_list_options', options_list=('--clusters-list-options',), arg_type=ignore_type) - -with ParametersContext(command='batchai cluster list-nodes') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('cluster_name', options_list=('--name', '-n'), help='Name of the cluster.') - -with ParametersContext(command='batchai job create') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.argument('location', options_list=('--location', '-l'), arg_type=location_type, - help='Location. You can configure the default location using `az configure --defaults ' - 'location=` or specify it in the job configuration file.') - c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.') - c.argument('json_file', options_list=('--config', '-c'), - help='A path to a json file containing job create parameters ' - '(json representation of azure.mgmt.batchai.models.JobCreateParameters).') - c.argument('cluster_name', options_list=('--cluster-name', '-r'), - help='If specified, the job will run on the given cluster instead of the ' - 'one configured in the json file.') - c.argument('cluster_resource_group', options_list=('--cluster-resource-group',), - help='Specifies a resource group for the cluster given with --cluster-name parameter. ' - 'If omitted, --resource-group value will be used.') - -with ParametersContext(command='batchai job terminate') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.') - -with ParametersContext(command='batchai job delete') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.') - -with ParametersContext(command='batchai job show') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.') - -with ParametersContext(command='batchai job list') as c: - c.argument('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - # Not implemented yet - c.register_alias('jobs_list_options', options_list=('--jobs-list-options',), arg_type=ignore_type) - -with ParametersContext(command='batchai job list-nodes') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.') - -with ParametersContext(command='batchai job list-files') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('job_name', options_list=('--name', '-n'), help='Name of the job.') - c.register_alias('directory', options_list=('--output-directory-id', '-d'), - help='The Id of the Job output directory (as specified by "id" element in outputDirectories ' - 'collection in job create parameters). Use "stdouterr" to access job stdout and stderr ' - 'files.') - -with ParametersContext(command='batchai job stream-file') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('job_name', options_list=('--job-name', '-j'), help='Name of the job.') - c.register_alias('directory', options_list=('--output-directory-id', '-d'), - help='The Id of the Job output directory (as specified by "id" element in outputDirectories ' - 'collection in job create parameters). Use "stdouterr" to access job stdout and stderr ' - 'files.') - c.argument('file_name', options_list=('--name', '-n'), help='The name of the file to stream.') - -with ParametersContext(command='batchai file-server create') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.argument('location', options_list=('--location', '-l'), arg_type=location_type, - help='Location. You can configure the default location using `az configure --defaults ' - 'location=` or specify it in the file server configuration file.') - c.register_alias('file_server_name', options_list=('--name', '-n'), help='Name of the file server.') - c.argument('vm_size', options_list=('--vm-size', '-s'), help='VM size.', completer=get_vm_size_completion_list) - c.argument('disk_count', help='Number of disks.', type=int, arg_group='Storage') - c.argument('disk_size', help='Disk size in Gb.', type=int, arg_group='Storage') - c.argument('storage_sku', help='The sku of storage account to persist VM.', - arg_group='Storage', **enum_choice_list(SkuName)) - c.argument('user_name', options_list=('--admin-user-name', '-u'), - help='Name of the admin user to be created on every compute node.', arg_group='Admin Account') - c.argument('ssh_key', options_list=('--ssh-key', '-k'), - help='SSH public key value or path.', arg_group='Admin Account') - c.argument('password', options_list=('--password', '-p'), help='Password.', arg_group='Admin Account') - c.argument('json_file', options_list=('--config', '-c'), - help='A path to a json file containing file server create parameters (json representation of ' - 'azure.mgmt.batchai.models.FileServerCreateParameters). Note, parameters given via command line ' - 'will overwrite parameters specified in the configuration file.', arg_group='Advanced') - -with ParametersContext(command='batchai file-server show') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('file_server_name', options_list=('--name', '-n'), help='Name of the file server.') - -with ParametersContext(command='batchai file-server delete') as c: - c.register_alias('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - c.register_alias('file_server_name', options_list=('--name', '-n'), help='Name of the file server.') - -with ParametersContext(command='batchai file-server list') as c: - c.argument('resource_group', options_list=('--resource-group', '-g'), arg_type=resource_group_name_type) - # Not implemented yet - c.register_alias('file_servers_list_options', options_list=('--file-servers-list-options',), arg_type=ignore_type) +# pylint: disable=line-too-long, too-many-statements +def load_arguments(self, _): + + with self.argument_context('batchai cluster') as c: + c.argument('resource_group', resource_group_name_type) + + with self.argument_context('batchai cluster') as c: + c.argument('cluster_name', options_list=['--name', '-n'], help='Name of the cluster.') + + with self.argument_context('batchai cluster create') as c: + c.argument('json_file', options_list=['--config', '-c'], help='A path to a json file containing cluster create parameters (json representation of azure.mgmt.batchai.models.ClusterCreateParameters).', arg_group='Advanced') + + with self.argument_context('batchai cluster create', arg_group='Admin Account') as c: + c.argument('user_name', options_list=['--user-name', '-u'], help='Name of the admin user to be created on every compute node.') + c.argument('ssh_key', options_list=['--ssh-key', '-k'], help='SSH public key value or path.') + c.argument('password', options_list=['--password', '-p'], help='Password.') + + with self.argument_context('batchai cluster create', arg_group='Nodes') as c: + c.argument('image', arg_type=get_enum_type(SupportedImages), options_list=['--image', '-i'], help='Operation system.') + c.argument('vm_size', options_list=['--vm-size', '-s'], help='VM size (e.g. Standard_NC6 for 1 GPU node)', completer=get_vm_size_completion_list) + c.argument('min_nodes', options_list=['--min'], help='Min nodes count.', type=int) + c.argument('max_nodes', options_list=['--max'], help='Max nodes count.', type=int) + + with self.argument_context('batchai cluster create', arg_group='File Server Mount') as c: + c.argument('nfs_name', options_list=['--nfs'], help='Name of a file server to mount. If you need to mount more than one file server, configure them in a configuration file and use --config option.') + c.argument('nfs_resource_group', options_list=['--nfs-resource-group'], help='Resource group in which file server is created. Can be omitted if the file server and the cluster belong to the same resource group') + c.argument('nfs_mount_path', options_list=['--nfs-mount-path'], help='Relative mount path for nfs. The nfs will be available at $AZ_BATCHAI_MOUNT_ROOT/ folder.') + + with self.argument_context('batchai cluster create', arg_group='Storage Account') as c: + c.argument('account_name', options_list=['--storage-account-name'], help='Storage account name for Azure File Shares and/or Azure Storage Containers mounting. Related environment variable: AZURE_BATCHAI_STORAGE_ACCOUNT. Must be used in conjunction with --storage-account-key. If the key is not provided, the command will try to query the storage account key using the authenticated Azure account.') + c.argument('account_key', options_list=['--storage-account-key'], help='Storage account key. Must be used in conjunction with --storage-account-name. Environment variable: AZURE_BATCHAI_STORAGE_KEY.') + + with self.argument_context('batchai cluster create', arg_group='Azure File Share Mount') as c: + c.argument('azure_file_share', options_list=['--afs-name'], help='Name of the azure file share to mount. Must be used in conjunction with --storage-account-name and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure File share, configure them in a configuration file and use --config option.') + c.argument('afs_mount_path', options_list=['--afs-mount-path'], help='Relative mount path for Azure File share. The file share will be available at $AZ_BATCHAI_MOUNT_ROOT/ folder.') + + with self.argument_context('batchai cluster create', arg_group='Azure Storage Container Mount') as c: + c.argument('container_name', options_list=['--container-name'], help='Name of Azure Storage container to mount. Must be used in conjunction with --storage-account-name and --storage-account-key arguments or AZURE_BATCHAI_STORAGE_ACCOUNT and AZURE_BATCHAI_STORAGE_KEY environment variables. If you need to mount more than one Azure Storage container, configure them in a configuration file and use --config option.') + c.argument('container_mount_path', options_list=['--container-mount-path'], help='Relative mount path for Azure Storage container. The container will be available at $AZ_BATCHAI_MOUNT_ROOT/ folder.') + + with self.argument_context('batchai cluster resize') as c: + c.argument('target', options_list=['--target', '-t'], help='Target number of compute nodes.') + + with self.argument_context('batchai cluster auto-scale') as c: + c.argument('min_nodes', options_list=['--min'], help='Minimum number of nodes.') + c.argument('max_nodes', options_list=['--max'], help='Maximum number of nodes.') + + with self.argument_context('batchai cluster list') as c: + # Not implemented yet + c.ignore('clusters_list_options') + + with self.argument_context('batchai job') as c: + c.argument('job_name', options_list=['--name', '-n'], help='Name of the job.') + c.argument('cluster_name', options_list=['--cluster-name', '-r'], help='Name of the cluster.') + c.argument('directory', options_list=['--output-directory-id', '-d'], help='The Id of the Job output directory (as specified by "id" element in outputDirectories collection in job create parameters). Use "stdouterr" to access job stdout and stderr files.') + + with self.argument_context('batchai job create') as c: + c.argument('json_file', options_list=['--config', '-c'], help='A path to a json file containing job create parameters (json representation of azure.mgmt.batchai.models.JobCreateParameters).') + c.argument('cluster_name', options_list=['--cluster-name', '-r'], help='If specified, the job will run on the given cluster instead of the one configured in the json file.') + c.argument('cluster_resource_group', options_list=['--cluster-resource-group'], help='Specifies a resource group for the cluster given with --cluster-name parameter. If omitted, --resource-group value will be used.') + + with self.argument_context('batchai job list') as c: + # Not implemented yet + c.ignore('jobs_list_options') + + with self.argument_context('batchai job stream-file') as c: + c.argument('job_name', options_list=['--job-name', '-j'], help='Name of the job.') + c.argument('file_name', options_list=['--name', '-n'], help='The name of the file to stream.') + + with self.argument_context('batchai file-server') as c: + c.argument('file_server_name', options_list=['--name', '-n'], help='Name of the file server.') + + with self.argument_context('batchai file-server create') as c: + c.argument('vm_size', options_list=['--vm-size', '-s'], help='VM size.', completer=get_vm_size_completion_list) + c.argument('json_file', options_list=['--config', '-c'], help='A path to a json file containing file server create parameters (json representation of azure.mgmt.batchai.models.FileServerCreateParameters). Note, parameters given via command line will overwrite parameters specified in the configuration file.', arg_group='Advanced') + + with self.argument_context('batchai file-server create', arg_group='Storage') as c: + c.argument('disk_count', help='Number of disks.', type=int) + c.argument('disk_size', help='Disk size in Gb.', type=int) + c.argument('storage_sku', arg_type=get_enum_type(SkuName), help='The sku of storage account to persist VM.') + + with self.argument_context('batchai file-server create', arg_group='Admin Account') as c: + c.argument('user_name', options_list=['--admin-user-name', '-u'], help='Name of the admin user to be created on every compute node.') + c.argument('ssh_key', options_list=['--ssh-key', '-k'], help='SSH public key value or path.') + c.argument('password', options_list=['--password', '-p'], help='Password.') + + with self.argument_context('batchai file-server list') as c: + # Not implemented yet + c.ignore('file_servers_list_options') diff --git a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/commands.py b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/commands.py index 102ac59aa5d..f09cb46524b 100644 --- a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/commands.py +++ b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/commands.py @@ -17,29 +17,46 @@ file_server_table_format, remote_login_table_format, ) -from azure.cli.core.commands import cli_command +from azure.cli.core.commands import CliCommandType custom_path = 'azure.cli.command_modules.batchai.custom#{}' -mgmt_path = 'azure.mgmt.batchai.operations.{}_operations#{}.{}' - -cli_command(__name__, 'batchai cluster create', custom_path.format('create_cluster'), batchai_client_factory, no_wait_param='raw') -cli_command(__name__, 'batchai cluster delete', mgmt_path.format('clusters', 'ClustersOperations', 'delete'), cluster_client_factory, confirmation=True, no_wait_param='raw') -cli_command(__name__, 'batchai cluster show', mgmt_path.format('clusters', 'ClustersOperations', 'get'), cluster_client_factory) -cli_command(__name__, 'batchai cluster list', custom_path.format('list_clusters'), cluster_client_factory, table_transformer=cluster_list_table_format) -cli_command(__name__, 'batchai cluster list-nodes', mgmt_path.format('clusters', 'ClustersOperations', 'list_remote_login_information'), cluster_client_factory, table_transformer=remote_login_table_format) -cli_command(__name__, 'batchai cluster resize', custom_path.format('resize_cluster'), cluster_client_factory) -cli_command(__name__, 'batchai cluster auto-scale', custom_path.format('set_cluster_auto_scale_parameters'), cluster_client_factory) - -cli_command(__name__, 'batchai job create', custom_path.format('create_job'), batchai_client_factory, no_wait_param='raw') -cli_command(__name__, 'batchai job delete', mgmt_path.format('jobs', 'JobsOperations', 'delete'), job_client_factory, confirmation=True, no_wait_param='raw') -cli_command(__name__, 'batchai job terminate', mgmt_path.format('jobs', 'JobsOperations', 'terminate'), job_client_factory, no_wait_param='raw') -cli_command(__name__, 'batchai job show', mgmt_path.format('jobs', 'JobsOperations', 'get'), job_client_factory) -cli_command(__name__, 'batchai job list', custom_path.format('list_jobs'), job_client_factory, table_transformer=job_list_table_format) -cli_command(__name__, 'batchai job list-nodes', mgmt_path.format('jobs', 'JobsOperations', 'list_remote_login_information'), job_client_factory, table_transformer=remote_login_table_format) -cli_command(__name__, 'batchai job list-files', custom_path.format('list_files'), job_client_factory, table_transformer=file_list_table_format) -cli_command(__name__, 'batchai job stream-file', custom_path.format('tail_file'), job_client_factory) - -cli_command(__name__, 'batchai file-server create', custom_path.format('create_file_server'), file_server_client_factory, no_wait_param='raw') -cli_command(__name__, 'batchai file-server delete', mgmt_path.format('file_servers', 'FileServersOperations', 'delete'), file_server_client_factory, confirmation=True, no_wait_param='raw') -cli_command(__name__, 'batchai file-server show', mgmt_path.format('file_servers', 'FileServersOperations', 'get'), file_server_client_factory) -cli_command(__name__, 'batchai file-server list', custom_path.format('list_file_servers'), file_server_client_factory, table_transformer=file_server_table_format) + +batchai_cluster_sdk = CliCommandType( + operations_tmpl='azure.mgmt.batchai.operations.clusters_operations#ClustersOperations.{}', + client_factory=cluster_client_factory) + +batchai_job_sdk = CliCommandType( + operations_tmpl='azure.mgmt.batchai.operations.jobs_operations#JobsOperations.{}', + client_factory=job_client_factory) + +batchai_server_sdk = CliCommandType( + operations_tmpl='azure.mgmt.batchai.operations.file_servers_operations#FileServersOperations.{}', + client_factory=file_server_client_factory) + + +def load_command_table(self, _): + + with self.command_group('batchai cluster', batchai_cluster_sdk, client_factory=cluster_client_factory) as g: + g.custom_command('create', 'create_cluster', client_factory=batchai_client_factory, no_wait_param='raw') + g.command('delete', 'delete', confirmation=True, no_wait_param='raw') + g.command('show', 'get') + g.custom_command('list', 'list_clusters', table_transformer=cluster_list_table_format) + g.command('list-nodes', 'list_remote_login_information', table_transformer=remote_login_table_format) + g.custom_command('resize', 'resize_cluster') + g.custom_command('auto-scale', 'set_cluster_auto_scale_parameters') + + with self.command_group('batchai job', batchai_job_sdk, client_factory=job_client_factory) as g: + g.custom_command('create', 'create_job', client_factory=batchai_client_factory, no_wait_param='raw') + g.command('delete', 'delete', confirmation=True, no_wait_param='raw') + g.command('terminate', 'terminate', no_wait_param='raw') + g.command('show', 'get') + g.custom_command('list', 'list_jobs', table_transformer=job_list_table_format) + g.command('list-nodes', 'list_remote_login_information', table_transformer=remote_login_table_format) + g.custom_command('list-files', 'list_files', table_transformer=file_list_table_format) + g.custom_command('stream-file', 'tail_file') + + with self.command_group('batchai file-server', batchai_server_sdk, client_factory=file_server_client_factory) as g: + g.custom_command('create', 'create_file_server', no_wait_param='raw') + g.command('delete', 'delete', confirmation=True, no_wait_param='raw') + g.command('show', 'get') + g.custom_command('list', 'list_file_servers', table_transformer=file_server_table_format) diff --git a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/custom.py b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/custom.py index 4da1e509fac..e984511e42f 100644 --- a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/custom.py +++ b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/custom.py @@ -11,14 +11,16 @@ import signal import time import requests -from azure.cli.core._config import az_config -from azure.cli.core.keys import is_valid_ssh_rsa_public_key -from azure.cli.core.util import CLIError -import azure.mgmt.batchai.models as models +from knack.util import CLIError from msrest.serialization import Deserializer +import azure.mgmt.batchai.models as models + +from azure.cli.core.keys import is_valid_ssh_rsa_public_key + + # Environment variables for specifying azure storage account and key. We want the user to make explicit # decision about which storage account to use instead of using his default account specified via AZURE_STORAGE_ACCOUNT # and AZURE_STORAGE_KEY. @@ -53,7 +55,7 @@ def _get_deserializer(): return Deserializer(client_models) -def get_storage_account_key(account_name, account_key): +def _get_storage_account_key(cli_ctx, account_name, account_key): """Returns account key for the given storage account. :param str account_name: storage account name. @@ -63,7 +65,7 @@ def get_storage_account_key(account_name, account_key): return account_key from azure.mgmt.storage import StorageManagementClient from azure.cli.core.commands.client_factory import get_mgmt_service_client - storage_client = get_mgmt_service_client(StorageManagementClient) + storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient) account = [a.id for a in list(storage_client.storage_accounts.list()) if a.name == account_name] if not account: raise CLIError('Cannot find "{0}" storage account.'.format(account_name)) @@ -74,18 +76,18 @@ def get_storage_account_key(account_name, account_key): return keys_list_result.keys[0].value -def get_effective_storage_account_name_and_key(account_name, account_key): +def _get_effective_storage_account_name_and_key(cli_ctx, account_name, account_key): """Returns storage account name and key to be used. :param str or None account_name: storage account name provided as command line argument. :param str or None account_key: storage account key provided as command line argument. """ if account_name: - return account_name, get_storage_account_key(account_name, account_key) or '' - return az_config.get('batchai', 'storage_account', ''), az_config.get('batchai', 'storage_key', '') + return account_name, _get_storage_account_key(cli_ctx, account_name, account_key) or '' + return cli_ctx.config.get('batchai', 'storage_account', ''), cli_ctx.config.get('batchai', 'storage_key', '') -def update_cluster_create_parameters_with_env_variables(params, account_name=None, account_key=None): +def _update_cluster_create_parameters_with_env_variables(cli_ctx, params, account_name=None, account_key=None): """Replaces placeholders with information from the environment variables. Currently we support replacing of storage account name and key in mount volumes. @@ -96,7 +98,8 @@ def update_cluster_create_parameters_with_env_variables(params, account_name=Non :return models.ClusterCreateParameters: updated parameters. """ result = copy.deepcopy(params) - storage_account_name, storage_account_key = get_effective_storage_account_name_and_key(account_name, account_key) + storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key( + cli_ctx, account_name, account_key) require_storage_account = False require_storage_account_key = False @@ -142,7 +145,7 @@ def update_cluster_create_parameters_with_env_variables(params, account_name=Non return result -def update_user_account_settings(params, admin_user_name, ssh_key, password): +def _update_user_account_settings(params, admin_user_name, ssh_key, password): """Update account settings of cluster or file server creation parameters :param models.ClusterCreateParameters or models.FileServerCreateParameters params: params to update @@ -187,7 +190,7 @@ def update_user_account_settings(params, admin_user_name, ssh_key, password): return result -def add_nfs_to_cluster_create_parameters(params, file_server_id, mount_path): +def _add_nfs_to_cluster_create_parameters(params, file_server_id, mount_path): """Adds NFS to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. @@ -211,8 +214,8 @@ def add_nfs_to_cluster_create_parameters(params, file_server_id, mount_path): return result -def add_azure_file_share_to_cluster_create_parameters(params, azure_file_share, mount_path, account_name=None, - account_key=None): +def _add_azure_file_share_to_cluster_create_parameters(cli_ctx, params, azure_file_share, mount_path, account_name=None, + account_key=None): """Add Azure File share to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. @@ -231,7 +234,8 @@ def add_azure_file_share_to_cluster_create_parameters(params, azure_file_share, result.node_setup.mount_volumes = models.MountVolumes() if result.node_setup.mount_volumes.azure_file_shares is None: result.node_setup.mount_volumes.azure_file_shares = [] - storage_account_name, storage_account_key = get_effective_storage_account_name_and_key(account_name, account_key) + storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name, + account_key) if not storage_account_name: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) if not storage_account_key: @@ -244,8 +248,8 @@ def add_azure_file_share_to_cluster_create_parameters(params, azure_file_share, return result -def add_azure_container_to_cluster_create_parameters(params, container_name, mount_path, account_name=None, - account_key=None): +def _add_azure_container_to_cluster_create_parameters(cli_ctx, params, container_name, mount_path, account_name=None, + account_key=None): """Add Azure Storage container to the cluster create parameters. :param model.ClusterCreateParameters params: cluster create parameters. @@ -264,7 +268,8 @@ def add_azure_container_to_cluster_create_parameters(params, container_name, mou result.node_setup.mount_volumes = models.MountVolumes() if result.node_setup.mount_volumes.azure_blob_file_systems is None: result.node_setup.mount_volumes.azure_blob_file_systems = [] - storage_account_name, storage_account_key = get_effective_storage_account_name_and_key(account_name, account_key) + storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name, + account_key) if not storage_account_name: raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT) if not storage_account_key: @@ -277,7 +282,7 @@ def add_azure_container_to_cluster_create_parameters(params, container_name, mou return result -def get_image_reference_or_die(image): +def _get_image_reference_or_die(image): """Returns image reference for the given image alias. :param str image: image alias. @@ -290,7 +295,7 @@ def get_image_reference_or_die(image): return reference -def update_nodes_information(params, image, vm_size, min_nodes, max_nodes): +def _update_nodes_information(params, image, vm_size, min_nodes, max_nodes): """Updates cluster's nodes information. :param models.ClusterCreateParameters params: cluster create parameters. @@ -306,7 +311,7 @@ def update_nodes_information(params, image, vm_size, min_nodes, max_nodes): if not result.vm_size: raise CLIError('Please provide VM size') if image: - result.virtual_machine_configuration = models.VirtualMachineConfiguration(get_image_reference_or_die(image)) + result.virtual_machine_configuration = models.VirtualMachineConfiguration(_get_image_reference_or_die(image)) if min_nodes == max_nodes: result.scale_settings = models.ScaleSettings(manual=models.ManualScaleSettings(min_nodes)) elif max_nodes is not None: @@ -317,7 +322,7 @@ def update_nodes_information(params, image, vm_size, min_nodes, max_nodes): return result -def create_cluster(client, # pylint: disable=too-many-locals +def create_cluster(cmd, client, # pylint: disable=too-many-locals resource_group, cluster_name, json_file=None, location=None, user_name=None, ssh_key=None, password=None, image='UbuntuLTS', vm_size=None, min_nodes=0, max_nodes=None, nfs_name=None, nfs_resource_group=None, nfs_mount_path='nfs', azure_file_share=None, @@ -329,22 +334,22 @@ def create_cluster(client, # pylint: disable=too-many-locals params = _get_deserializer()('ClusterCreateParameters', json_obj) else: params = models.ClusterCreateParameters(None, None, None) - params = update_cluster_create_parameters_with_env_variables(params, account_name, account_key) - params = update_user_account_settings(params, user_name, ssh_key, password) + params = _update_cluster_create_parameters_with_env_variables(params, account_name, account_key) + params = _update_user_account_settings(params, user_name, ssh_key, password) if location: params.location = location if not params.location: raise CLIError('Please provide location for cluster creation.') - params = update_nodes_information(params, image, vm_size, min_nodes, max_nodes) + params = _update_nodes_information(params, image, vm_size, min_nodes, max_nodes) if nfs_name: file_server = client.file_servers.get(nfs_resource_group if nfs_resource_group else resource_group, nfs_name) - params = add_nfs_to_cluster_create_parameters(params, file_server.id, nfs_mount_path) + params = _add_nfs_to_cluster_create_parameters(params, file_server.id, nfs_mount_path) if azure_file_share: - params = add_azure_file_share_to_cluster_create_parameters(params, azure_file_share, afs_mount_path, - account_name, account_key) + params = _add_azure_file_share_to_cluster_create_parameters(cmd.cli_ctx, params, azure_file_share, + afs_mount_path, account_name, account_key) if container_name: - params = add_azure_container_to_cluster_create_parameters(params, container_name, container_mount_path, - account_name, account_key) + params = _add_azure_container_to_cluster_create_parameters(cmd.cli_ctx, params, container_name, + container_mount_path, account_name, account_key) return client.clusters.create(resource_group, cluster_name, params, raw=raw) @@ -442,7 +447,7 @@ def create_file_server(client, resource_group, file_server_name, json_file=None, else: parameters = models.FileServerCreateParameters(None, None, None, None) - parameters = update_user_account_settings(parameters, user_name, ssh_key, password) + parameters = _update_user_account_settings(parameters, user_name, ssh_key, password) if location: parameters.location = location if not parameters.location: diff --git a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/tests/test_batchai_custom.py b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/tests/test_batchai_custom.py index 3930ab29cfa..4d2c0b64e63 100644 --- a/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/tests/test_batchai_custom.py +++ b/src/command_modules/azure-cli-batchai/azure/cli/command_modules/batchai/tests/test_batchai_custom.py @@ -8,12 +8,12 @@ import unittest from azure.cli.command_modules.batchai.custom import ( - update_cluster_create_parameters_with_env_variables, - update_nodes_information, - update_user_account_settings, - add_azure_container_to_cluster_create_parameters, - add_azure_file_share_to_cluster_create_parameters, - add_nfs_to_cluster_create_parameters) + _update_cluster_create_parameters_with_env_variables, + _update_nodes_information, + _update_user_account_settings, + _add_azure_container_to_cluster_create_parameters, + _add_azure_file_share_to_cluster_create_parameters, + _add_nfs_to_cluster_create_parameters) from azure.cli.core.util import CLIError from azure.mgmt.batchai.models import ( ClusterCreateParameters, UserAccountSettings, NodeSetup, MountVolumes, FileServerCreateParameters, @@ -45,14 +45,14 @@ def test_batchai_update_cluster_create_parameters_with_user_account_settings(sel admin_user_password='password')) # No update. - result = update_user_account_settings(params, None, None, None) + result = _update_user_account_settings(params, None, None, None) self.assertEquals(result.user_account_settings.admin_user_name, 'name') self.assertEquals(result.user_account_settings.admin_user_password, 'password') self.assertIsNone(result.user_account_settings.admin_user_ssh_public_key) # Updating. params.user_account_settings = None - result = update_user_account_settings(params, 'user', SSH_KEY, None) + result = _update_user_account_settings(params, 'user', SSH_KEY, None) self.assertEquals(result.user_account_settings.admin_user_name, 'user') self.assertIsNone(result.user_account_settings.admin_user_password) self.assertEquals(result.user_account_settings.admin_user_ssh_public_key, SSH_KEY) @@ -60,20 +60,20 @@ def test_batchai_update_cluster_create_parameters_with_user_account_settings(sel # Incorrect ssh public key. params.user_account_settings = None # user may emit user account settings in config file with self.assertRaises(CLIError): - update_user_account_settings(params, 'user', 'wrong' + SSH_KEY, 'password') + _update_user_account_settings(params, 'user', 'wrong' + SSH_KEY, 'password') # No user account. params.user_account_settings = None with self.assertRaises(CLIError): - update_user_account_settings(params, None, SSH_KEY, None) + _update_user_account_settings(params, None, SSH_KEY, None) # No credentials. params.user_account_settings = None with self.assertRaises(CLIError): - update_user_account_settings(params, 'user', None, None) + _update_user_account_settings(params, 'user', None, None) # ssh public key from a file. - result = update_user_account_settings(params, 'user', _data_file('key.txt'), None) + result = _update_user_account_settings(params, 'user', _data_file('key.txt'), None) self.assertEquals(result.user_account_settings.admin_user_ssh_public_key, SSH_KEY) def test_batchai_update_file_server_create_parameters_with_user_account_settings(self): @@ -85,19 +85,19 @@ def test_batchai_update_file_server_create_parameters_with_user_account_settings data_disks=DataDisks(10, 2, 'Standard_LRS')) # No update. - result = update_user_account_settings(params, None, None, None) + result = _update_user_account_settings(params, None, None, None) self.assertEqual(params, result) # Updating when user_account_setting are omitted. params.ssh_configuration.user_account_settings = None - result = update_user_account_settings(params, 'user', SSH_KEY, None) + result = _update_user_account_settings(params, 'user', SSH_KEY, None) self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_name, 'user') self.assertIsNone(result.ssh_configuration.user_account_settings.admin_user_password) self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_ssh_public_key, SSH_KEY) # Updating when ssh_configuration is omitted. params.ssh_configuration = None - result = update_user_account_settings(params, 'user', SSH_KEY, 'password') + result = _update_user_account_settings(params, 'user', SSH_KEY, 'password') self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_name, 'user') self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_password, 'password') self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_ssh_public_key, SSH_KEY) @@ -105,26 +105,26 @@ def test_batchai_update_file_server_create_parameters_with_user_account_settings # Incorrect ssh public key. params.ssh_configuration = SshConfiguration(None) # user may emit user account settings in config file with self.assertRaises(CLIError): - update_user_account_settings(params, 'user', 'wrong' + SSH_KEY, None) + _update_user_account_settings(params, 'user', 'wrong' + SSH_KEY, None) # No user account. params.ssh_configuration.user_account_settings = None with self.assertRaises(CLIError): - update_user_account_settings(params, None, SSH_KEY, None) + _update_user_account_settings(params, None, SSH_KEY, None) # No credentials. params.ssh_configuration.user_account_settings = None with self.assertRaises(CLIError): - update_user_account_settings(params, 'user', None, None) + _update_user_account_settings(params, 'user', None, None) # Only password. params.ssh_configuration.user_account_settings = None - result = update_user_account_settings(params, 'user', None, 'password') + result = _update_user_account_settings(params, 'user', None, 'password') self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_name, 'user') self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_password, 'password') # ssh public key from a file. - result = update_user_account_settings(params, 'user', _data_file('key.txt'), None) + result = _update_user_account_settings(params, 'user', _data_file('key.txt'), None) self.assertEquals(result.ssh_configuration.user_account_settings.admin_user_ssh_public_key, SSH_KEY) def test_batchai_cluster_parameter_update_with_environment_variables(self): @@ -152,12 +152,12 @@ def test_batchai_cluster_parameter_update_with_environment_variables(self): os.environ.pop('AZURE_BATCHAI_STORAGE_ACCOUNT', None) os.environ.pop('AZURE_BATCHAI_STORAGE_KEY', None) with self.assertRaises(CLIError): - update_cluster_create_parameters_with_env_variables(params) + _update_cluster_create_parameters_with_env_variables(params) # Set environment variables and check patching results. os.environ['AZURE_BATCHAI_STORAGE_ACCOUNT'] = 'account' os.environ['AZURE_BATCHAI_STORAGE_KEY'] = 'key' - result = update_cluster_create_parameters_with_env_variables(params) + result = _update_cluster_create_parameters_with_env_variables(params) self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].account_name, 'account') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].credentials.account_key, 'key') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].account_name, 'account') @@ -170,7 +170,7 @@ def test_batchai_cluster_parameter_update_with_environment_variables(self): params.node_setup.mount_volumes.azure_blob_file_systems[0].credentials.account_key = 'some_other_key' os.environ['AZURE_BATCHAI_STORAGE_ACCOUNT'] = 'account' os.environ['AZURE_BATCHAI_STORAGE_KEY'] = 'key' - result = update_cluster_create_parameters_with_env_variables(params) + result = _update_cluster_create_parameters_with_env_variables(params) self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].account_name, 'some_account') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].credentials.account_key, 'some_key') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].account_name, @@ -184,7 +184,7 @@ def test_batchai_cluster_parameter_update_with_environment_variables(self): params.node_setup.mount_volumes.azure_blob_file_systems[0].account_name = '' params.node_setup.mount_volumes.azure_blob_file_systems[0].credentials.account_key = \ '' - result = update_cluster_create_parameters_with_env_variables(params, 'account_from_cmd', 'key_from_cmd') + result = _update_cluster_create_parameters_with_env_variables(params, 'account_from_cmd', 'key_from_cmd') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].account_name, 'account_from_cmd') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].credentials.account_key, 'key_from_cmd') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].account_name, 'account_from_cmd') @@ -195,7 +195,7 @@ def test_batchai_cluster_parameter_update_with_environment_variables(self): params.node_setup.mount_volumes.azure_file_shares[0].account_name = '' params.node_setup.mount_volumes.azure_file_shares[0].credentials.account_key = '' with self.assertRaises(CLIError): - update_cluster_create_parameters_with_env_variables(params, str(uuid.uuid4()), None) + _update_cluster_create_parameters_with_env_variables(params, str(uuid.uuid4()), None) def test_batchai_add_nfs_to_cluster_create_parameters(self): """Test adding of nfs into cluster create parameters.""" @@ -205,10 +205,10 @@ def test_batchai_add_nfs_to_cluster_create_parameters(self): # No relative mount path provided. with self.assertRaises(CLIError): - add_nfs_to_cluster_create_parameters(params, 'id', '') + _add_nfs_to_cluster_create_parameters(params, 'id', '') # Check valid update. - result = add_nfs_to_cluster_create_parameters(params, 'id', 'relative_path') + result = _add_nfs_to_cluster_create_parameters(params, 'id', 'relative_path') self.assertEquals(result.node_setup.mount_volumes.file_servers[0].file_server.id, 'id') self.assertEquals(result.node_setup.mount_volumes.file_servers[0].relative_mount_path, 'relative_path') self.assertEquals(result.node_setup.mount_volumes.file_servers[0].mount_options, 'rw') @@ -224,17 +224,17 @@ def test_batchai_add_azure_file_share_to_cluster_create_parameters(self): os.environ.pop('AZURE_BATCHAI_STORAGE_ACCOUNT', None) os.environ.pop('AZURE_BATCHAI_STORAGE_KEY', None) with self.assertRaises(CLIError): - add_azure_file_share_to_cluster_create_parameters(params, 'share', 'relative_path') + _add_azure_file_share_to_cluster_create_parameters(params, 'share', 'relative_path') os.environ['AZURE_BATCHAI_STORAGE_ACCOUNT'] = 'account' os.environ['AZURE_BATCHAI_STORAGE_KEY'] = 'key' # No relative mount path provided. with self.assertRaises(CLIError): - add_azure_file_share_to_cluster_create_parameters(params, 'share', '') + _add_azure_file_share_to_cluster_create_parameters(params, 'share', '') # Check valid update. - result = add_azure_file_share_to_cluster_create_parameters(params, 'share', 'relative_path') + result = _add_azure_file_share_to_cluster_create_parameters(params, 'share', 'relative_path') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].account_name, 'account') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].azure_file_url, 'https://account.file.core.windows.net/share') @@ -244,7 +244,7 @@ def test_batchai_add_azure_file_share_to_cluster_create_parameters(self): # Account name and key provided via command line args. os.environ.pop('AZURE_BATCHAI_STORAGE_ACCOUNT', None) os.environ.pop('AZURE_BATCHAI_STORAGE_KEY', None) - result = add_azure_file_share_to_cluster_create_parameters(params, 'share', 'relative_path', 'account', 'key') + result = _add_azure_file_share_to_cluster_create_parameters(params, 'share', 'relative_path', 'account', 'key') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].account_name, 'account') self.assertEquals(result.node_setup.mount_volumes.azure_file_shares[0].azure_file_url, 'https://account.file.core.windows.net/share') @@ -261,17 +261,17 @@ def test_batchai_add_azure_container_to_cluster_create_parameters(self): os.environ.pop('AZURE_BATCHAI_STORAGE_ACCOUNT', None) os.environ.pop('AZURE_BATCHAI_STORAGE_KEY', None) with self.assertRaises(CLIError): - add_azure_container_to_cluster_create_parameters(params, 'container', 'relative_path') + _add_azure_container_to_cluster_create_parameters(params, 'container', 'relative_path') os.environ['AZURE_BATCHAI_STORAGE_ACCOUNT'] = 'account' os.environ['AZURE_BATCHAI_STORAGE_KEY'] = 'key' # No relative mount path provided. with self.assertRaises(CLIError): - add_azure_container_to_cluster_create_parameters(params, 'container', '') + _add_azure_container_to_cluster_create_parameters(params, 'container', '') # Check valid update. - result = add_azure_container_to_cluster_create_parameters(params, 'container', 'relative_path') + result = _add_azure_container_to_cluster_create_parameters(params, 'container', 'relative_path') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].account_name, 'account') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].container_name, 'container') @@ -283,8 +283,8 @@ def test_batchai_add_azure_container_to_cluster_create_parameters(self): # Account name and key provided via command line args. os.environ.pop('AZURE_BATCHAI_STORAGE_ACCOUNT', None) os.environ.pop('AZURE_BATCHAI_STORAGE_KEY', None) - result = add_azure_container_to_cluster_create_parameters(params, 'container', 'relative_path', - 'account', 'key') + result = _add_azure_container_to_cluster_create_parameters(params, 'container', 'relative_path', + 'account', 'key') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].account_name, 'account') self.assertEquals(result.node_setup.mount_volumes.azure_blob_file_systems[0].container_name, 'container') @@ -299,14 +299,14 @@ def test_batchai_update_nodes_information(self): user_account_settings=UserAccountSettings(admin_user_name='name', admin_user_password='password')) # Update to autoscale Ubuntu DSVM. - result = update_nodes_information(params, 'ubuntudsvm', 'Standard_NC6', 1, 3) + result = _update_nodes_information(params, 'ubuntudsvm', 'Standard_NC6', 1, 3) self.assertEquals(result.vm_size, 'Standard_NC6') self.assertEquals(result.virtual_machine_configuration.image_reference, ImageReference('microsoft-ads', 'linux-data-science-vm-ubuntu', 'linuxdsvmubuntu')) self.assertEquals(result.scale_settings, ScaleSettings(auto_scale=AutoScaleSettings(1, 3))) # Update to manual scale Ubuntu LTS. - result = update_nodes_information(params, 'UbuntuLTS', 'Standard_NC6', 2, 2) + result = _update_nodes_information(params, 'UbuntuLTS', 'Standard_NC6', 2, 2) self.assertEquals(result.vm_size, 'Standard_NC6') self.assertEquals(result.virtual_machine_configuration.image_reference, ImageReference('Canonical', 'UbuntuServer', '16.04-LTS')) @@ -314,26 +314,26 @@ def test_batchai_update_nodes_information(self): # Update image. params.scale_settings = ScaleSettings(manual=ManualScaleSettings(2)) - result = update_nodes_information(params, 'UbuntuDsvm', None, 0, None) + result = _update_nodes_information(params, 'UbuntuDsvm', None, 0, None) self.assertEquals(result.virtual_machine_configuration.image_reference, ImageReference('microsoft-ads', 'linux-data-science-vm-ubuntu', 'linuxdsvmubuntu')) self.assertEquals(result.scale_settings, ScaleSettings(manual=ManualScaleSettings(2))) # Update nothing. - result = update_nodes_information(params, None, None, 0, None) + result = _update_nodes_information(params, None, None, 0, None) self.assertEqual(params, result) # Wrong image. with self.assertRaises(CLIError): - update_nodes_information(params, 'unsupported', None, 0, None) + _update_nodes_information(params, 'unsupported', None, 0, None) # No VM size. params.vm_size = None with self.assertRaises(CLIError): - update_nodes_information(params, 'unsupported', None, 0, None) + _update_nodes_information(params, 'unsupported', None, 0, None) # No scale settings. params.vm_size = 'Standard_NC6' params.scale_settings = None with self.assertRaises(CLIError): - update_nodes_information(params, None, None, 0, None) + _update_nodes_information(params, None, None, 0, None) diff --git a/src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_mysql_proxy_resources_mgmt.yaml b/src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_mysql_proxy_resources_mgmt.yaml new file mode 100644 index 00000000000..6dde92a8d9d --- /dev/null +++ b/src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_mysql_proxy_resources_mgmt.yaml @@ -0,0 +1,56 @@ +interactions: +- request: + body: '{"location": "westus", "tags": {"use": "az-test"}}' + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + CommandName: [group create] + Connection: [keep-alive] + Content-Length: ['50'] + Content-Type: [application/json; charset=utf-8] + User-Agent: [python/3.5.3 (Windows-10-10.0.16299-SP0) requests/2.18.4 msrest/0.4.19 + msrest_azure/0.4.17 resourcemanagementclient/1.2.1 Azure-SDK-For-Python + AZURECLI/2.0.22] + accept-language: [en-US] + method: PUT + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/clitest.rg000001?api-version=2017-05-10 + response: + body: {string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001","name":"clitest.rg000001","location":"westus","tags":{"use":"az-test"},"properties":{"provisioningState":"Succeeded"}}'} + headers: + cache-control: [no-cache] + content-length: ['328'] + content-type: [application/json; charset=utf-8] + date: ['Mon, 18 Dec 2017 17:55:34 GMT'] + expires: ['-1'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains] + x-ms-ratelimit-remaining-subscription-writes: ['1197'] + status: {code: 201, message: Created} +- request: + body: null + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + CommandName: [group delete] + Connection: [keep-alive] + Content-Length: ['0'] + Content-Type: [application/json; charset=utf-8] + User-Agent: [python/3.5.3 (Windows-10-10.0.16299-SP0) requests/2.18.4 msrest/0.4.19 + msrest_azure/0.4.17 resourcemanagementclient/1.2.1 Azure-SDK-For-Python + AZURECLI/2.0.22] + accept-language: [en-US] + method: DELETE + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/clitest.rg000001?api-version=2017-05-10 + response: + body: {string: ''} + headers: + cache-control: [no-cache] + content-length: ['0'] + date: ['Mon, 18 Dec 2017 17:55:34 GMT'] + expires: ['-1'] + location: ['https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1DTElURVNUOjJFUkdIV1VEVE9UVDZGUDJMSjVYS1NVQ1JYTVNCSElDSU5HSlZJUnxBNEY4MTQzODEyRkM2QkVGLVdFU1RVUyIsImpvYkxvY2F0aW9uIjoid2VzdHVzIn0?api-version=2017-05-10'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains] + x-ms-ratelimit-remaining-subscription-writes: ['1197'] + status: {code: 202, message: Accepted} +version: 1 diff --git a/src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_postgres_proxy_resources_mgmt.yaml b/src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_postgres_proxy_resources_mgmt.yaml new file mode 100644 index 00000000000..cf1b54381d9 --- /dev/null +++ b/src/command_modules/azure-cli-rdbms/azure/cli/command_modules/rdbms/tests/recordings/latest/test_postgres_proxy_resources_mgmt.yaml @@ -0,0 +1,56 @@ +interactions: +- request: + body: '{"location": "westus", "tags": {"use": "az-test"}}' + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + CommandName: [group create] + Connection: [keep-alive] + Content-Length: ['50'] + Content-Type: [application/json; charset=utf-8] + User-Agent: [python/3.5.3 (Windows-10-10.0.16299-SP0) requests/2.18.4 msrest/0.4.19 + msrest_azure/0.4.17 resourcemanagementclient/1.2.1 Azure-SDK-For-Python + AZURECLI/2.0.22] + accept-language: [en-US] + method: PUT + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/clitest.rg000001?api-version=2017-05-10 + response: + body: {string: '{"id":"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/clitest.rg000001","name":"clitest.rg000001","location":"westus","tags":{"use":"az-test"},"properties":{"provisioningState":"Succeeded"}}'} + headers: + cache-control: [no-cache] + content-length: ['328'] + content-type: [application/json; charset=utf-8] + date: ['Mon, 18 Dec 2017 17:55:38 GMT'] + expires: ['-1'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains] + x-ms-ratelimit-remaining-subscription-writes: ['1191'] + status: {code: 201, message: Created} +- request: + body: null + headers: + Accept: [application/json] + Accept-Encoding: ['gzip, deflate'] + CommandName: [group delete] + Connection: [keep-alive] + Content-Length: ['0'] + Content-Type: [application/json; charset=utf-8] + User-Agent: [python/3.5.3 (Windows-10-10.0.16299-SP0) requests/2.18.4 msrest/0.4.19 + msrest_azure/0.4.17 resourcemanagementclient/1.2.1 Azure-SDK-For-Python + AZURECLI/2.0.22] + accept-language: [en-US] + method: DELETE + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/clitest.rg000001?api-version=2017-05-10 + response: + body: {string: ''} + headers: + cache-control: [no-cache] + content-length: ['0'] + date: ['Mon, 18 Dec 2017 17:55:39 GMT'] + expires: ['-1'] + location: ['https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/operationresults/eyJqb2JJZCI6IlJFU09VUkNFR1JPVVBERUxFVElPTkpPQi1DTElURVNUOjJFUkdIMzZKVU5RVlRCSVRERTJQQVlOVktLNjZTR0paT0JLRE42VXwxM0ZDNDQ0M0MyRDA3RkM2LVdFU1RVUyIsImpvYkxvY2F0aW9uIjoid2VzdHVzIn0?api-version=2017-05-10'] + pragma: [no-cache] + strict-transport-security: [max-age=31536000; includeSubDomains] + x-ms-ratelimit-remaining-subscription-writes: ['1195'] + status: {code: 202, message: Accepted} +version: 1