diff --git a/src/containerapp/HISTORY.rst b/src/containerapp/HISTORY.rst index f05316e2680..70ffa6993c4 100644 --- a/src/containerapp/HISTORY.rst +++ b/src/containerapp/HISTORY.rst @@ -3,6 +3,13 @@ Release History =============== +0.3.2 +++++++ +* Added 'az containerapp up' to create or update a container app and all associated resources (container app environment, ACR, Github Actions, resource group, etc.) +* Open an ssh-like shell in a Container App with 'az containerapp exec' +* Support for log streaming with 'az containerapp logs show' +* Replica show and list commands + 0.3.1 ++++++ * Update "az containerapp github-action add" parameters: replace --docker-file-path with --context-path, add --image. diff --git a/src/containerapp/azext_containerapp/.flake8 b/src/containerapp/azext_containerapp/.flake8 new file mode 100644 index 00000000000..777d0ca9ecd --- /dev/null +++ b/src/containerapp/azext_containerapp/.flake8 @@ -0,0 +1,4 @@ +[flake8] +ignore = + W503 # line break before binary operator, not compliant with PEP 8 + E203 # whitespace before ':', not compliant with PEP 8 \ No newline at end of file diff --git a/src/containerapp/azext_containerapp/_acr_run_polling.py b/src/containerapp/azext_containerapp/_acr_run_polling.py new file mode 100644 index 00000000000..1a71a87c99a --- /dev/null +++ b/src/containerapp/azext_containerapp/_acr_run_polling.py @@ -0,0 +1,112 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=line-too-long, consider-using-f-string + +import time + +from msrest import Deserializer +from msrestazure.azure_exceptions import CloudError +from azure.cli.core.profiles import ResourceType +from azure.cli.command_modules.acr._constants import get_acr_task_models +from azure.core.polling import PollingMethod, LROPoller + + +def get_run_with_polling(cmd, + client, + run_id, + registry_name, + resource_group_name): + deserializer = Deserializer( + {k: v for k, v in get_acr_task_models(cmd).__dict__.items() if isinstance(v, type)}) + + def deserialize_run(response): + return deserializer('Run', response) + + return LROPoller( + client=client, + initial_response=client.get( + resource_group_name, registry_name, run_id, cls=lambda x, y, z: x), + deserialization_callback=deserialize_run, + polling_method=RunPolling( + cmd=cmd, + registry_name=registry_name, + run_id=run_id + )) + + +class RunPolling(PollingMethod): # pylint: disable=too-many-instance-attributes + + def __init__(self, cmd, registry_name, run_id, timeout=30): + self._cmd = cmd + self._registry_name = registry_name + self._run_id = run_id + self._timeout = timeout + self._client = None + self._response = None # Will hold latest received response + self._url = None # The URL used to get the run + self._deserialize = None # The deserializer for Run + self.operation_status = "" + self.operation_result = None + + def initialize(self, client, initial_response, deserialization_callback): + self._client = client._client # pylint: disable=protected-access + self._response = initial_response + self._url = initial_response.http_request.url + self._deserialize = deserialization_callback + + self._set_operation_status(initial_response) + + def run(self): + while not self.finished(): + time.sleep(self._timeout) + self._update_status() + + if self.operation_status not in get_succeeded_run_status(self._cmd): + from knack.util import CLIError + raise CLIError("The run with ID '{}' finished with unsuccessful status '{}'. " + "Show run details by 'az acr task show-run -r {} --run-id {}'. " + "Show run logs by 'az acr task logs -r {} --run-id {}'.".format( + self._run_id, + self.operation_status, + self._registry_name, + self._run_id, + self._registry_name, + self._run_id + )) + + def status(self): + return self.operation_status + + def finished(self): + return self.operation_status in get_finished_run_status(self._cmd) + + def resource(self): + return self.operation_result + + def _set_operation_status(self, response): + if response.http_response.status_code == 200: + self.operation_result = self._deserialize(response) + self.operation_status = self.operation_result.status + return + raise CloudError(response) + + def _update_status(self): + self._response = self._client._pipeline.run( # pylint: disable=protected-access + self._client.get(self._url), stream=False) + self._set_operation_status(self._response) + + +def get_succeeded_run_status(cmd): + RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs') + return [RunStatus.succeeded.value] + + +def get_finished_run_status(cmd): + RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs') + return [RunStatus.succeeded.value, + RunStatus.failed.value, + RunStatus.canceled.value, + RunStatus.error.value, + RunStatus.timeout.value] diff --git a/src/containerapp/azext_containerapp/_archive_utils.py b/src/containerapp/azext_containerapp/_archive_utils.py new file mode 100644 index 00000000000..9130e6ab4f9 --- /dev/null +++ b/src/containerapp/azext_containerapp/_archive_utils.py @@ -0,0 +1,243 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=consider-using-f-string, consider-using-with, no-member + +import tarfile +import os +import re +import codecs +from io import open +import requests +from knack.log import get_logger +from msrestazure.azure_exceptions import CloudError +from azure.cli.core.azclierror import (CLIInternalError) +from azure.cli.core.profiles import ResourceType, get_sdk +from azure.cli.command_modules.acr._azure_utils import get_blob_info +from azure.cli.command_modules.acr._constants import TASK_VALID_VSTS_URLS + +logger = get_logger(__name__) + + +def upload_source_code(cmd, client, + registry_name, + resource_group_name, + source_location, + tar_file_path, + docker_file_path, + docker_file_in_tar): + _pack_source_code(source_location, + tar_file_path, + docker_file_path, + docker_file_in_tar) + + size = os.path.getsize(tar_file_path) + unit = 'GiB' + for S in ['Bytes', 'KiB', 'MiB', 'GiB']: + if size < 1024: + unit = S + break + size = size / 1024.0 + + logger.info("Uploading archived source code from '%s'...", tar_file_path) + upload_url = None + relative_path = None + try: + source_upload_location = client.get_build_source_upload_url( + resource_group_name, registry_name) + upload_url = source_upload_location.upload_url + relative_path = source_upload_location.relative_path + except (AttributeError, CloudError) as e: + raise CLIInternalError("Failed to get a SAS URL to upload context. Error: {}".format(e.message)) from e + + if not upload_url: + raise CLIInternalError("Failed to get a SAS URL to upload context.") + + account_name, endpoint_suffix, container_name, blob_name, sas_token = get_blob_info(upload_url) + BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService') + BlockBlobService(account_name=account_name, + sas_token=sas_token, + endpoint_suffix=endpoint_suffix, + # Increase socket timeout from default of 20s for clients with slow network connection. + socket_timeout=300).create_blob_from_path( + container_name=container_name, + blob_name=blob_name, + file_path=tar_file_path) + logger.info("Sending context ({0:.3f} {1}) to registry: {2}...".format( + size, unit, registry_name)) + return relative_path + + +def _pack_source_code(source_location, tar_file_path, docker_file_path, docker_file_in_tar): + logger.info("Packing source code into tar to upload...") + + original_docker_file_name = os.path.basename(docker_file_path.replace("\\", os.sep)) + ignore_list, ignore_list_size = _load_dockerignore_file(source_location, original_docker_file_name) + common_vcs_ignore_list = {'.git', '.gitignore', '.bzr', 'bzrignore', '.hg', '.hgignore', '.svn'} + + def _ignore_check(tarinfo, parent_ignored, parent_matching_rule_index): + # ignore common vcs dir or file + if tarinfo.name in common_vcs_ignore_list: + logger.info("Excluding '%s' based on default ignore rules", tarinfo.name) + return True, parent_matching_rule_index + + if ignore_list is None: + # if .dockerignore doesn't exists, inherit from parent + # eg, it will ignore the files under .git folder. + return parent_ignored, parent_matching_rule_index + + for index, item in enumerate(ignore_list): + # stop checking the remaining rules whose priorities are lower than the parent matching rule + # at this point, current item should just inherit from parent + if index >= parent_matching_rule_index: + break + if re.match(item.pattern, tarinfo.name): + logger.debug(".dockerignore: rule '%s' matches '%s'.", + item.rule, tarinfo.name) + return item.ignore, index + + logger.debug(".dockerignore: no rule for '%s'. parent ignore '%s'", + tarinfo.name, parent_ignored) + # inherit from parent + return parent_ignored, parent_matching_rule_index + + with tarfile.open(tar_file_path, "w:gz") as tar: + # need to set arcname to empty string as the archive root path + _archive_file_recursively(tar, + source_location, + arcname="", + parent_ignored=False, + parent_matching_rule_index=ignore_list_size, + ignore_check=_ignore_check) + + # Add the Dockerfile if it's specified. + # In the case of run, there will be no Dockerfile. + if docker_file_path: + docker_file_tarinfo = tar.gettarinfo( + docker_file_path, docker_file_in_tar) + with open(docker_file_path, "rb") as f: + tar.addfile(docker_file_tarinfo, f) + + +class IgnoreRule: # pylint: disable=too-few-public-methods + def __init__(self, rule): + + self.rule = rule + self.ignore = True + # ! makes exceptions to exclusions + if rule.startswith('!'): + self.ignore = False + rule = rule[1:] # remove ! + # load path without leading slash in linux and windows + # environments (interferes with dockerignore file) + if rule.startswith('/'): + rule = rule[1:] # remove beginning '/' + + self.pattern = "^" + tokens = rule.split('/') + token_length = len(tokens) + for index, token in enumerate(tokens, 1): + # ** matches any number of directories + if token == "**": + self.pattern += ".*" # treat **/ as ** + else: + # * matches any sequence of non-seperator characters + # ? matches any single non-seperator character + # . matches dot character + self.pattern += token.replace( + "*", "[^/]*").replace("?", "[^/]").replace(".", "\\.") + if index < token_length: + self.pattern += "/" # add back / if it's not the last + self.pattern += "$" + + +def _load_dockerignore_file(source_location, original_docker_file_name): + # reference: https://docs.docker.com/engine/reference/builder/#dockerignore-file + docker_ignore_file = os.path.join(source_location, ".dockerignore") + docker_ignore_file_override = None + if original_docker_file_name != "Dockerfile": + docker_ignore_file_override = os.path.join( + source_location, "{}.dockerignore".format(original_docker_file_name)) + if os.path.exists(docker_ignore_file_override): + logger.info("Overriding .dockerignore with %s", docker_ignore_file_override) + docker_ignore_file = docker_ignore_file_override + + if not os.path.exists(docker_ignore_file): + return None, 0 + + encoding = "utf-8" + header = open(docker_ignore_file, "rb").read(len(codecs.BOM_UTF8)) + if header.startswith(codecs.BOM_UTF8): + encoding = "utf-8-sig" + + ignore_list = [] + if docker_ignore_file == docker_ignore_file_override: + ignore_list.append(IgnoreRule(".dockerignore")) + + for line in open(docker_ignore_file, 'r', encoding=encoding).readlines(): + rule = line.rstrip() + + # skip empty line and comment + if not rule or rule.startswith('#'): + continue + + # the ignore rule at the end has higher priority + ignore_list = [IgnoreRule(rule)] + ignore_list + + return ignore_list, len(ignore_list) + + +def _archive_file_recursively(tar, name, arcname, parent_ignored, parent_matching_rule_index, ignore_check): + # create a TarInfo object from the file + tarinfo = tar.gettarinfo(name, arcname) + + if tarinfo is None: + raise CLIInternalError("tarfile: unsupported type {}".format(name)) + + # check if the file/dir is ignored + ignored, matching_rule_index = ignore_check( + tarinfo, parent_ignored, parent_matching_rule_index) + + if not ignored: + # append the tar header and data to the archive + if tarinfo.isreg(): + with open(name, "rb") as f: + tar.addfile(tarinfo, f) + else: + tar.addfile(tarinfo) + + # even the dir is ignored, its child items can still be included, so continue to scan + if tarinfo.isdir(): + for f in os.listdir(name): + _archive_file_recursively(tar, os.path.join(name, f), os.path.join(arcname, f), + parent_ignored=ignored, parent_matching_rule_index=matching_rule_index, + ignore_check=ignore_check) + + +def check_remote_source_code(source_location): + lower_source_location = source_location.lower() + + # git + if lower_source_location.startswith("git@") or lower_source_location.startswith("git://"): + return source_location + + # http + if lower_source_location.startswith("https://") or lower_source_location.startswith("http://") \ + or lower_source_location.startswith("github.com/"): + isVSTS = any(url in lower_source_location for url in TASK_VALID_VSTS_URLS) + if isVSTS or re.search(r"\.git(?:#.+)?$", lower_source_location): + # git url must contain ".git" or be from VSTS/Azure DevOps. + # This is because Azure DevOps doesn't follow the standard git server convention of putting + # .git at the end of their URLs, so we have to special case them. + return source_location + if not lower_source_location.startswith("github.com/"): + # Others are tarball + if requests.head(source_location).status_code < 400: + return source_location + raise CLIInternalError("'{}' doesn't exist.".format(source_location)) + + # oci + if lower_source_location.startswith("oci://"): + return source_location + raise CLIInternalError("'{}' doesn't exist.".format(source_location)) diff --git a/src/containerapp/azext_containerapp/_clients.py b/src/containerapp/azext_containerapp/_clients.py index b4690cde26c..9c779257250 100644 --- a/src/containerapp/azext_containerapp/_clients.py +++ b/src/containerapp/azext_containerapp/_clients.py @@ -103,7 +103,7 @@ def create_or_update(cls, cmd, resource_group_name, name, container_app_envelope @classmethod def update(cls, cmd, resource_group_name, name, container_app_envelope, no_wait=False): management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager - api_version = PREVIEW_API_VERSION + api_version = STABLE_API_VERSION sub_id = get_subscription_id(cmd.cli_ctx) url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" request_url = url_fmt.format( @@ -117,7 +117,7 @@ def update(cls, cmd, resource_group_name, name, container_app_envelope, no_wait= if no_wait: return r.json() - elif r.status_code == 201: + elif r.status_code == 202: url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" request_url = url_fmt.format( management_hostname.strip('/'), @@ -354,6 +354,67 @@ def deactivate_revision(cls, cmd, resource_group_name, container_app_name, name) r = send_raw_request(cmd.cli_ctx, "POST", request_url) return r.json() + @classmethod + def list_replicas(cls, cmd, resource_group_name, container_app_name, revision_name): + replica_list = [] + + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}/revisions/{}/replicas?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + container_app_name, + revision_name, + STABLE_API_VERSION) + + r = send_raw_request(cmd.cli_ctx, "GET", request_url) + j = r.json() + for replica in j["value"]: + replica_list.append(replica) + + while j.get("nextLink") is not None: + request_url = j["nextLink"] + r = send_raw_request(cmd.cli_ctx, "GET", request_url) + j = r.json() + for replica in j["value"]: + replica_list.append(replica) + + return replica_list + + @classmethod + def get_replica(cls, cmd, resource_group_name, container_app_name, revision_name, replica_name): + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}/revisions/{}/replicas/{}/?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + container_app_name, + revision_name, + replica_name, + STABLE_API_VERSION) + + r = send_raw_request(cmd.cli_ctx, "GET", request_url) + return r.json() + + @classmethod + def get_auth_token(cls, cmd, resource_group_name, name): + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + sub_id = get_subscription_id(cmd.cli_ctx) + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}/authtoken?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + name, + STABLE_API_VERSION) + + r = send_raw_request(cmd.cli_ctx, "POST", request_url) + return r.json() + class ManagedEnvironmentClient(): @classmethod diff --git a/src/containerapp/azext_containerapp/_github_oauth.py b/src/containerapp/azext_containerapp/_github_oauth.py index 659d43afc39..96144b2d929 100644 --- a/src/containerapp/azext_containerapp/_github_oauth.py +++ b/src/containerapp/azext_containerapp/_github_oauth.py @@ -4,6 +4,7 @@ # -------------------------------------------------------------------------------------------- # pylint: disable=consider-using-f-string +from azure.cli.core.util import open_page_in_browser from azure.cli.core.azclierror import (ValidationError, CLIInternalError, UnclassifiedUserFault) from knack.log import get_logger @@ -24,7 +25,9 @@ ] -def get_github_access_token(cmd, scope_list=None): # pylint: disable=unused-argument +def get_github_access_token(cmd, scope_list=None, token=None): # pylint: disable=unused-argument + if token: + return token if scope_list: for scope in scope_list: if scope not in GITHUB_OAUTH_SCOPES: @@ -52,6 +55,7 @@ def get_github_access_token(cmd, scope_list=None): # pylint: disable=unused-arg expires_in_seconds = int(parsed_response['expires_in'][0]) logger.warning('Please navigate to %s and enter the user code %s to activate and ' 'retrieve your github personal access token', verification_uri, user_code) + open_page_in_browser("https://github.com/login/device") timeout = time.time() + expires_in_seconds logger.warning("Waiting up to '%s' minutes for activation", str(expires_in_seconds // 60)) diff --git a/src/containerapp/azext_containerapp/_help.py b/src/containerapp/azext_containerapp/_help.py index 895c769a2dc..ab4cde705eb 100644 --- a/src/containerapp/azext_containerapp/_help.py +++ b/src/containerapp/azext_containerapp/_help.py @@ -86,6 +86,98 @@ az containerapp list -g MyResourceGroup """ +helps['containerapp exec'] = """ + type: command + short-summary: Open an SSH-like interactive shell within a container app replica + examples: + - name: exec into a container app + text: | + az containerapp exec -n MyContainerapp -g MyResourceGroup + - name: exec into a particular container app replica and revision + text: | + az containerapp exec -n MyContainerapp -g MyResourceGroup --replica MyReplica --revision MyRevision + - name: open a bash shell in a containerapp + text: | + az containerapp exec -n MyContainerapp -g MyResourceGroup --command bash +""" + +helps['containerapp browse'] = """ + type: command + short-summary: Open a containerapp in the browser, if possible + examples: + - name: open a containerapp in the browser + text: | + az containerapp browse -n MyContainerapp -g MyResourceGroup +""" + +helps['containerapp up'] = """ + type: command + short-summary: Create or update a container app as well as any associated resources (ACR, resource group, container apps environment, Github Actions, etc.) + examples: + - name: Create a container app from a dockerfile in a Github repo (setting up github actions) + text: | + az containerapp up -n MyContainerapp --repo https://github.com/myAccount/myRepo + - name: Create a container app from a dockerfile in a local directory + text: | + az containerapp up -n MyContainerapp --source . + - name: Create a container app from an image in a registry + text: | + az containerapp up -n MyContainerapp --image myregistry.azurecr.io/myImage:myTag + - name: Create a container app from an image in a registry with ingress enabled and a specified environment + text: | + az containerapp up -n MyContainerapp --image myregistry.azurecr.io/myImage:myTag --ingress external --target-port 80 --environment MyEnv +""" + +helps['containerapp logs'] = """ + type: group + short-summary: Show container app logs +""" + +helps['containerapp logs show'] = """ + type: command + short-summary: Show past logs and/or print logs in real time (with the --follow parameter). Note that the logs are only taken from one revision, replica, and container. + examples: + - name: Fetch the past 20 lines of logs from an app and return + text: | + az containerapp logs show -n MyContainerapp -g MyResourceGroup + - name: Fetch 30 lines of past logs logs from an app and print logs as they come in + text: | + az containerapp logs show -n MyContainerapp -g MyResourceGroup --follow --tail 30 + - name: Fetch logs for a particular revision, replica, and container + text: | + az containerapp logs show -n MyContainerapp -g MyResourceGroup --replica MyReplica --revision MyRevision --container MyContainer +""" + +# Replica Commands +helps['containerapp replica'] = """ + type: group + short-summary: Manage container app replicas +""" + +helps['containerapp replica list'] = """ + type: command + short-summary: List a container app revision's replica + examples: + - name: List a container app's replicas in the latest revision + text: | + az containerapp replica list -n MyContainerapp -g MyResourceGroup + - name: List a container app's replicas in a particular revision + text: | + az containerapp replica list -n MyContainerapp -g MyResourceGroup --revision MyRevision +""" + +helps['containerapp replica show'] = """ + type: command + short-summary: Show a container app replica + examples: + - name: Show a replica from the latest revision + text: | + az containerapp replica show -n MyContainerapp -g MyResourceGroup --replica MyReplica + - name: Show a replica from the a particular revision + text: | + az containerapp replica show -n MyContainerapp -g MyResourceGroup --replica MyReplica --revision MyRevision +""" + # Revision Commands helps['containerapp revision'] = """ type: group diff --git a/src/containerapp/azext_containerapp/_params.py b/src/containerapp/azext_containerapp/_params.py index 51d2d6c5739..a1f6b3e2471 100644 --- a/src/containerapp/azext_containerapp/_params.py +++ b/src/containerapp/azext_containerapp/_params.py @@ -2,7 +2,7 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, too-many-statements, consider-using-f-string, option-length-too-long +# pylint: disable=line-too-long, too-many-statements, consider-using-f-string from knack.arguments import CLIArgumentType @@ -24,15 +24,40 @@ def load_arguments(self, _): c.argument('name', name_type, metavar='NAME', id_part='name', help="The name of the Containerapp.") c.argument('resource_group_name', arg_type=resource_group_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx)) + c.ignore('disable_warnings') with self.argument_context('containerapp') as c: c.argument('tags', arg_type=tags_type) c.argument('managed_env', validator=validate_managed_env_name_or_id, options_list=['--environment'], help="Name or resource ID of the container app's environment.") c.argument('yaml', type=file_type, help='Path to a .yaml file with the configuration of a container app. All other parameters will be ignored. For an example, see https://docs.microsoft.com/azure/container-apps/azure-resource-manager-api-spec#examples') + with self.argument_context('containerapp exec') as c: + c.argument('container', help="The name of the container to ssh into") + c.argument('replica', help="The name of the replica to ssh into. List replicas with 'az containerapp replica list'. A replica may not exist if there is not traffic to your app.") + c.argument('revision', help="The name of the container app revision to ssh into. Defaults to the latest revision.") + c.argument('startup_command', options_list=["--command"], help="The startup command (bash, zsh, sh, etc.).") + c.argument('name', name_type, id_part=None, help="The name of the Containerapp.") + c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None) + + with self.argument_context('containerapp logs show') as c: + c.argument('follow', help="Print logs in real time if present.", arg_type=get_three_state_flag()) + c.argument('tail', help="The number of past logs to print (0-300)", type=int, default=20) + c.argument('container', help="The name of the container") + c.argument('output_format', options_list=["--format"], help="Log output format", arg_type=get_enum_type(["json", "text"]), default="json") + c.argument('replica', help="The name of the replica. List replicas with 'az containerapp replica list'. A replica may not exist if there is not traffic to your app.") + c.argument('revision', help="The name of the container app revision. Defaults to the latest revision.") + c.argument('name', name_type, id_part=None, help="The name of the Containerapp.") + c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None) + + # Replica + with self.argument_context('containerapp replica') as c: + c.argument('replica', help="The name of the replica. ") + c.argument('revision', help="The name of the container app revision. Defaults to the latest revision.") + c.argument('name', name_type, id_part=None, help="The name of the Containerapp.") + c.argument('resource_group_name', arg_type=resource_group_name_type, id_part=None) + # Container with self.argument_context('containerapp', arg_group='Container') as c: - c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") c.argument('container_name', type=str, help="Name of the container.") c.argument('cpu', type=float, validator=validate_cpu, help="Required CPU in cores from 0.25 - 2.0, e.g. 0.5") c.argument('memory', type=str, validator=validate_memory, help="Required memory from 0.5 - 4.0 ending with \"Gi\", e.g. 1.0Gi") @@ -81,6 +106,12 @@ def load_arguments(self, _): c.argument('user_assigned', nargs='+', help="Space-separated user identities to be assigned.") c.argument('system_assigned', help="Boolean indicating whether to assign system-assigned identity.") + with self.argument_context('containerapp create', arg_group='Container') as c: + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + + with self.argument_context('containerapp update', arg_group='Container') as c: + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + with self.argument_context('containerapp scale') as c: c.argument('min_replicas', type=int, help="The minimum number of replicas.") c.argument('max_replicas', type=int, help="The maximum number of replicas.") @@ -146,6 +177,7 @@ def load_arguments(self, _): with self.argument_context('containerapp revision copy') as c: c.argument('from_revision', type=str, help='Revision to copy from. Default: latest revision.') + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") with self.argument_context('containerapp ingress') as c: c.argument('allow_insecure', help='Allow insecure connections for ingress traffic.') @@ -185,3 +217,27 @@ def load_arguments(self, _): with self.argument_context('containerapp revision list') as c: c.argument('name', id_part=None) + + with self.argument_context('containerapp up') as c: + c.argument('resource_group_name', configured_default='resource_group_name', id_part=None) + c.argument('location', configured_default='location') + c.argument('name', configured_default='name', id_part=None) + c.argument('managed_env', configured_default='managed_env') + c.argument('registry_server', configured_default='registry_server') + c.argument('source', type=str, help='Local directory path to upload to Azure container registry.') + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + c.argument('browse', help='Open the app in a web browser after creation and deployment, if possible.') + + with self.argument_context('containerapp up', arg_group='Log Analytics (Environment)') as c: + c.argument('logs_customer_id', type=str, options_list=['--logs-workspace-id'], help='Name or resource ID of the Log Analytics workspace to send diagnostics logs to. You can use \"az monitor log-analytics workspace create\" to create one. Extra billing may apply.') + c.argument('logs_key', type=str, options_list=['--logs-workspace-key'], help='Log Analytics workspace key to configure your Log Analytics workspace. You can use \"az monitor log-analytics workspace get-shared-keys\" to retrieve the key.') + c.ignore('no_wait') + + with self.argument_context('containerapp up', arg_group='Github Repo') as c: + c.argument('repo', help='Create an app via Github Actions. In the format: https://github.com// or /') + c.argument('token', help='A Personal Access Token with write access to the specified repository. For more information: https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line. If missing (and using --repo), a browser page will be opened to authenticate with Github.') + c.argument('branch', options_list=['--branch', '-b'], help='The branch of the GitHub repo. Defaults to "main"') + c.argument('context_path', help='Path in the repo from which to run the docker build. Defaults to "./". Dockerfile is assumed to be named "Dockerfile" and in this directory.') + c.argument('service_principal_client_id', help='The service principal client ID. Used by Github Actions to authenticate with Azure.', options_list=["--service-principal-client-id", "--sp-cid"]) + c.argument('service_principal_client_secret', help='The service principal client secret. Used by Github Actions to authenticate with Azure.', options_list=["--service-principal-client-secret", "--sp-sec"]) + c.argument('service_principal_tenant_id', help='The service principal tenant ID. Used by Github Actions to authenticate with Azure.', options_list=["--service-principal-tenant-id", "--sp-tid"]) diff --git a/src/containerapp/azext_containerapp/_ssh_utils.py b/src/containerapp/azext_containerapp/_ssh_utils.py new file mode 100644 index 00000000000..a5a77a601c0 --- /dev/null +++ b/src/containerapp/azext_containerapp/_ssh_utils.py @@ -0,0 +1,179 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import os +import sys +import time +import threading +import urllib +import requests +import websocket + +from knack.log import get_logger +from azure.cli.core.azclierror import CLIInternalError +from azure.cli.core.commands.client_factory import get_subscription_id + +from ._clients import ContainerAppClient +from ._utils import safe_get, is_platform_windows + +# pylint: disable=import-error,ungrouped-imports +if is_platform_windows(): + import msvcrt + from azure.cli.command_modules.container._vt_helper import (enable_vt_mode, _get_conout_mode, + _set_conout_mode, _get_conin_mode, _set_conin_mode) + +logger = get_logger(__name__) + +# SSH control byte values for container app proxy +SSH_PROXY_FORWARD = 0 +SSH_PROXY_INFO = 1 +SSH_PROXY_ERROR = 2 + +# SSH control byte values for container app cluster +SSH_CLUSTER_STDIN = 0 +SSH_CLUSTER_STDOUT = 1 +SSH_CLUSTER_STDERR = 2 + +# forward byte + stdin byte +SSH_INPUT_PREFIX = b"\x00\x00" + +# forward byte + terminal resize byte +SSH_TERM_RESIZE_PREFIX = b"\x00\x04" + +SSH_DEFAULT_ENCODING = "utf-8" +SSH_BACKUP_ENCODING = "latin_1" + +SSH_CTRL_C_MSG = b"\x00\x00\x03" + + +class WebSocketConnection: + def __init__(self, cmd, resource_group_name, name, revision, replica, container, startup_command): + from websocket._exceptions import WebSocketBadStatusException + + token_response = ContainerAppClient.get_auth_token(cmd, resource_group_name, name) + self._token = token_response["properties"]["token"] + self._logstream_endpoint = token_response["properties"]["logStreamEndpoint"] + self._url = self._get_url(cmd=cmd, resource_group_name=resource_group_name, name=name, revision=revision, + replica=replica, container=container, startup_command=startup_command) + self._socket = websocket.WebSocket(enable_multithread=True) + logger.warning("Attempting to connect to %s", self._url) + self._socket.connect(self._url, header=[f"Authorization: Bearer {self._token}"]) + + self.is_connected = True + self._windows_conout_mode = None + self._windows_conin_mode = None + if is_platform_windows(): + self._windows_conout_mode = _get_conout_mode() + self._windows_conin_mode = _get_conin_mode() + + def _get_url(self, cmd, resource_group_name, name, revision, replica, container, startup_command): + sub = get_subscription_id(cmd.cli_ctx) + base_url = self._logstream_endpoint + proxy_api_url = base_url[:base_url.index("/subscriptions/")].replace("https://", "") + encoded_cmd = urllib.parse.quote_plus(startup_command) + + return (f"wss://{proxy_api_url}/subscriptions/{sub}/resourceGroups/{resource_group_name}/containerApps/{name}" + f"/revisions/{revision}/replicas/{replica}/containers/{container}/exec" + f"?command={encoded_cmd}") + + def disconnect(self): + logger.warning("Disconnecting...") + self.is_connected = False + self._socket.close() + if self._windows_conout_mode and self._windows_conin_mode: + _set_conout_mode(self._windows_conout_mode) + _set_conin_mode(self._windows_conin_mode) + + def send(self, *args, **kwargs): + return self._socket.send(*args, **kwargs) + + def recv(self, *args, **kwargs): + return self._socket.recv(*args, **kwargs) + + +def _decode_and_output_to_terminal(connection: WebSocketConnection, response, encodings): + for i, encoding in enumerate(encodings): + try: + print(response[2:].decode(encoding), end="", flush=True) + break + except UnicodeDecodeError as e: + if i == len(encodings) - 1: # ran out of encodings to try + connection.disconnect() + logger.info("Proxy Control Byte: %s", response[0]) + logger.info("Cluster Control Byte: %s", response[1]) + logger.info("Hexdump: %s", response[2:].hex()) + raise CLIInternalError("Failed to decode server data") from e + logger.info("Failed to encode with encoding %s", encoding) + + +def read_ssh(connection: WebSocketConnection, response_encodings): + # response_encodings is the ordered list of Unicode encodings to try to decode with before raising an exception + while connection.is_connected: + response = connection.recv() + if not response: + connection.disconnect() + else: + logger.info("Received raw response %s", response.hex()) + proxy_status = response[0] + if proxy_status == SSH_PROXY_INFO: + print(f"INFO: {response[1:].decode(SSH_DEFAULT_ENCODING)}") + elif proxy_status == SSH_PROXY_ERROR: + print(f"ERROR: {response[1:].decode(SSH_DEFAULT_ENCODING)}") + elif proxy_status == SSH_PROXY_FORWARD: + control_byte = response[1] + if control_byte in (SSH_CLUSTER_STDOUT, SSH_CLUSTER_STDERR): + _decode_and_output_to_terminal(connection, response, response_encodings) + else: + connection.disconnect() + raise CLIInternalError("Unexpected message received") + + +def _send_stdin(connection: WebSocketConnection, getch_fn): + while connection.is_connected: + _resize_terminal(connection) + ch = getch_fn() + _resize_terminal(connection) + if connection.is_connected: + connection.send(b"".join([SSH_INPUT_PREFIX, ch])) + + +def _resize_terminal(connection: WebSocketConnection): + size = os.get_terminal_size() + if connection.is_connected: + connection.send(b"".join([SSH_TERM_RESIZE_PREFIX, + f'{{"Width": {size.columns}, ' + f'"Height": {size.lines}}}'.encode(SSH_DEFAULT_ENCODING)])) + + +def _getch_unix(): + return sys.stdin.read(1).encode(SSH_DEFAULT_ENCODING) + + +def _getch_windows(): + while not msvcrt.kbhit(): + time.sleep(0.01) + return msvcrt.getch() + + +def ping_container_app(app): + site = safe_get(app, "properties", "configuration", "ingress", "fqdn") + if site: + resp = requests.get(f'https://{site}') + if not resp.ok: + logger.info("Got bad status pinging app: {resp.status_code}") + else: + logger.info("Could not fetch site external URL") + + +def get_stdin_writer(connection: WebSocketConnection): + if not is_platform_windows(): + import tty + tty.setcbreak(sys.stdin.fileno()) # needed to prevent printing arrow key characters + writer = threading.Thread(target=_send_stdin, args=(connection, _getch_unix)) + else: + enable_vt_mode() # needed for interactive commands (ie vim) + writer = threading.Thread(target=_send_stdin, args=(connection, _getch_windows)) + + return writer diff --git a/src/containerapp/azext_containerapp/_up_utils.py b/src/containerapp/azext_containerapp/_up_utils.py new file mode 100644 index 00000000000..9c8567a89e3 --- /dev/null +++ b/src/containerapp/azext_containerapp/_up_utils.py @@ -0,0 +1,715 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation, arguments-differ, abstract-method, logging-format-interpolation + + +from urllib.parse import urlparse +import requests + +from azure.cli.core.azclierror import ( + RequiredArgumentMissingError, + ValidationError, + InvalidArgumentValueError, + MutuallyExclusiveArgumentError, +) +from azure.cli.core.commands.client_factory import get_subscription_id +from azure.cli.command_modules.appservice._create_util import ( + check_resource_group_exists, +) +from azure.cli.command_modules.acr.custom import acr_show +from azure.cli.core.commands.client_factory import get_mgmt_service_client +from azure.mgmt.containerregistry import ContainerRegistryManagementClient +from knack.log import get_logger + +from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id + +from ._clients import ManagedEnvironmentClient, ContainerAppClient, GitHubActionClient + +from ._utils import ( + get_randomized_name, + get_profile_username, + create_resource_group, + get_resource_group, + queue_acr_build, + _get_acr_cred, + create_new_acr, + _get_default_containerapps_location, + safe_get, + is_int, + create_service_principal_for_rbac, + repo_url_to_name, + get_container_app_if_exists, +) + +from .custom import ( + create_managed_environment, + containerapp_up_logic, + list_containerapp, + list_managed_environments, + create_or_update_github_action, +) + +logger = get_logger(__name__) + + +class ResourceGroup: + def __init__(self, cmd, name: str, location: str, exists: bool = None): + self.cmd = cmd + self.name = name + self.location = _get_default_containerapps_location(cmd, location) + self.exists = exists + + self.check_exists() + + def create(self): + if not self.name: + self.name = get_randomized_name(get_profile_username()) + g = create_resource_group(self.cmd, self.name, self.location) + self.exists = True + return g + + def _get(self): + return get_resource_group(self.cmd, self.name) + + def get(self): + r = None + try: + r = self._get() + except: # pylint: disable=bare-except + pass + return r + + def check_exists(self) -> bool: + if self.name is None: + self.exists = False + else: + self.exists = check_resource_group_exists(self.cmd, self.name) + return self.exists + + def create_if_needed(self): + if not self.check_exists(): + logger.warning(f"Creating resoure group '{self.name}'") + self.create() + else: + logger.warning(f"Using resoure group '{self.name}'") # TODO use .info() + + +class Resource: + def __init__( + self, cmd, name: str, resource_group: "ResourceGroup", exists: bool = None + ): + self.cmd = cmd + self.name = name + self.resource_group = resource_group + self.exists = exists + + self.check_exists() + + def create(self, *args, **kwargs): + raise NotImplementedError() + + def _get(self): + raise NotImplementedError() + + def get(self): + r = None + try: + r = self._get() + except: # pylint: disable=bare-except + pass + return r + + def check_exists(self): + if self.name is None or self.resource_group.name is None: + self.exists = False + else: + self.exists = self.get() is not None + return self.exists + + def create_if_needed(self, *args, **kwargs): + if not self.check_exists(): + logger.warning( + f"Creating {type(self).__name__} '{self.name}' in resource group {self.resource_group.name}" + ) + self.create(*args, **kwargs) + else: + logger.warning( + f"Using {type(self).__name__} '{self.name}' in resource group {self.resource_group.name}" + ) # TODO use .info() + + +class ContainerAppEnvironment(Resource): + def __init__( + self, + cmd, + name: str, + resource_group: "ResourceGroup", + exists: bool = None, + location=None, + logs_key=None, + logs_customer_id=None, + ): + + super().__init__(cmd, name, resource_group, exists) + if is_valid_resource_id(name): + self.name = parse_resource_id(name)["name"] + rg = parse_resource_id(name)["resource_group"] + if resource_group.name != rg: + self.resource_group = ResourceGroup(cmd, rg, location) + self.location = _get_default_containerapps_location(cmd, location) + self.logs_key = logs_key + self.logs_customer_id = logs_customer_id + + def set_name(self, name_or_rid): + if is_valid_resource_id(name_or_rid): + self.name = parse_resource_id(name_or_rid)["name"] + rg = parse_resource_id(name_or_rid)["resource_group"] + if self.resource_group.name != rg: + self.resource_group = ResourceGroup( + self.cmd, + rg, + _get_default_containerapps_location(self.cmd, self.location), + ) + else: + self.name = name_or_rid + + def _get(self): + return ManagedEnvironmentClient.show( + self.cmd, self.resource_group.name, self.name + ) + + def create(self, app_name): + if self.name is None: + self.name = "{}-env".format(app_name).replace("_", "-") + env = create_managed_environment( + self.cmd, + self.name, + location=self.location, + resource_group_name=self.resource_group.name, + logs_key=self.logs_key, + logs_customer_id=self.logs_customer_id, + disable_warnings=True, + ) + self.exists = True + return env + + def get_rid(self): + rid = self.name + if not is_valid_resource_id(self.name): + rid = resource_id( + subscription=get_subscription_id(self.cmd.cli_ctx), + resource_group=self.resource_group.name, + namespace="Microsoft.App", + type="managedEnvironments", + name=self.name, + ) + return rid + + +class AzureContainerRegistry(Resource): + def __init__(self, name: str, resource_group: "ResourceGroup"): # pylint: disable=super-init-not-called + + self.name = name + self.resource_group = resource_group + + +class ContainerApp(Resource): # pylint: disable=too-many-instance-attributes + def __init__( + self, + cmd, + name: str, + resource_group: "ResourceGroup", + exists: bool = None, + image=None, + env: "ContainerAppEnvironment" = None, + target_port=None, + registry_server=None, + registry_user=None, + registry_pass=None, + env_vars=None, + ingress=None, + ): + + super().__init__(cmd, name, resource_group, exists) + self.image = image + self.env = env + self.target_port = target_port + self.registry_server = registry_server + self.registry_user = registry_user + self.registry_pass = registry_pass + self.env_vars = env_vars + self.ingress = ingress + + self.should_create_acr = False + self.acr: "AzureContainerRegistry" = None + + def _get(self): + return ContainerAppClient.show(self.cmd, self.resource_group.name, self.name) + + def create(self, no_registry=False): + # no_registry: don't pass in a registry during create even if the app has one (used for GH actions) + if get_container_app_if_exists(self.cmd, self.resource_group.name, self.name): + logger.warning( + f"Updating Containerapp {self.name} in resource group {self.resource_group.name}" + ) + else: + logger.warning( + f"Creating Containerapp {self.name} in resource group {self.resource_group.name}" + ) + + return containerapp_up_logic( + cmd=self.cmd, + name=self.name, + resource_group_name=self.resource_group.name, + image=self.image, + managed_env=self.env.get_rid(), + target_port=self.target_port, + registry_server=None if no_registry else self.registry_server, + registry_pass=None if no_registry else self.registry_pass, + registry_user=None if no_registry else self.registry_user, + env_vars=self.env_vars, + ingress=self.ingress, + ) + + def create_acr_if_needed(self): + if self.should_create_acr: + logger.warning( + f"Creating Azure Container Registry {self.acr.name} in resource group " + f"{self.acr.resource_group.name}" + ) + self.create_acr() + + def create_acr(self): + registry_rg = self.resource_group + url = self.registry_server + registry_name = url[: url.rindex(".azurecr.io")] + registry_def = create_new_acr( + self.cmd, registry_name, registry_rg.name, self.env.location + ) + self.registry_server = registry_def.login_server + + if not self.acr: + self.acr = AzureContainerRegistry(registry_name, registry_rg) + + self.registry_user, self.registry_pass, _ = _get_acr_cred( + self.cmd.cli_ctx, registry_name + ) + + def run_acr_build(self, dockerfile, source, quiet=False): + image_name = self.image if self.image is not None else self.name + from datetime import datetime + + now = datetime.now() + # Add version tag for acr image + image_name += ":{}".format( + str(now).replace(" ", "").replace("-", "").replace(".", "").replace(":", "") + ) + + self.image = self.registry_server + "/" + image_name + queue_acr_build( + self.cmd, + self.acr.resource_group.name, + self.acr.name, + image_name, + source, + dockerfile, + quiet, + ) + + +def _create_service_principal(cmd, resource_group_name, env_resource_group_name): + logger.warning( + "No valid service principal provided. Creating a new service principal..." + ) + scopes = [ + f"/subscriptions/{get_subscription_id(cmd.cli_ctx)}/resourceGroups/{resource_group_name}" + ] + if ( + env_resource_group_name is not None + and env_resource_group_name != resource_group_name + ): + scopes.append( + f"/subscriptions/{get_subscription_id(cmd.cli_ctx)}/resourceGroups/{env_resource_group_name}" + ) + sp = create_service_principal_for_rbac(cmd, scopes=scopes, role="contributor") + + logger.info(f"Created service principal: {sp['displayName']}") + + return sp["appId"], sp["password"], sp["tenant"] + + +def _get_or_create_sp( # pylint: disable=inconsistent-return-statements + cmd, + resource_group_name, + env_resource_group_name, + name, + service_principal_client_id, + service_principal_client_secret, + service_principal_tenant_id, +): + try: + GitHubActionClient.show( + cmd=cmd, resource_group_name=resource_group_name, name=name + ) + return ( + service_principal_client_id, + service_principal_client_secret, + service_principal_tenant_id, + ) + except: # pylint: disable=bare-except + service_principal = None + + # TODO if possible, search for SPs with the right credentials + # I haven't found a way to get SP creds + secrets yet from the API + + if not service_principal: + return _create_service_principal( + cmd, resource_group_name, env_resource_group_name + ) + # return client_id, secret, tenant_id + + +def _get_dockerfile_content_from_repo( # pylint: disable=inconsistent-return-statements + repo_url, branch, token, context_path, dockerfile +): + from github import Github + + g = Github(token) + context_path = context_path or "." + repo = repo_url_to_name(repo_url) + r = g.get_repo(repo) + files = r.get_contents(context_path, ref=branch) + for f in files: + if f.path == dockerfile or f.path.endswith(f"/{dockerfile}"): + resp = requests.get(f.download_url) + if resp.ok and resp.content: + return resp.content.decode("utf-8").split("\n") + + +def _get_ingress_and_target_port(ingress, target_port, dockerfile_content: "list[str]"): + if not target_port and not ingress and dockerfile_content is not None: # pylint: disable=too-many-nested-blocks + for line in dockerfile_content: + if line: + line = ( + line.upper() + .strip() + .replace("/TCP", "") + .replace("/UDP", "") + .replace("\n", "") + ) + if line and line[0] != "#": + if "EXPOSE" in line: + parts = line.split(" ") + for i, p in enumerate(parts[:-1]): + if "EXPOSE" in p and is_int(parts[i + 1]): + target_port = parts[i + 1] + ingress = "external" + logger.warning( + "Adding external ingress port {} based on dockerfile expose.".format( + target_port + ) + ) + ingress = "external" if target_port and not ingress else ingress + return ingress, target_port + + +def _validate_up_args(source, image, repo): + if not source and not image and not repo: + raise RequiredArgumentMissingError( + "You must specify either --source, --repo, or --image" + ) + if source and repo: + raise MutuallyExclusiveArgumentError( + "Cannot use --source and --repo togther. " + "Can either deploy from a local directory or a Github repo" + ) + + +def _reformat_image(source, repo, image): + if source and (image or repo): + image = image.split("/")[-1] # if link is given + image = image.replace(":", "") + return image + + +def _get_dockerfile_content_local(source, dockerfile): + lines = [] + if source: + dockerfile_location = f"{source}/{dockerfile}" + try: + with open(dockerfile_location, "r") as fh: # pylint: disable=unspecified-encoding + lines = list(fh) + except Exception as e: + raise InvalidArgumentValueError( + "Cannot open specified Dockerfile. Check dockerfile name, path, and permissions." + ) from e + return lines + + +def _get_dockerfile_content(repo, branch, token, source, context_path, dockerfile): + if source: + return _get_dockerfile_content_local(source, dockerfile) + elif repo: + return _get_dockerfile_content_from_repo( + repo, branch, token, context_path, dockerfile + ) + return [] + + +def _get_app_env_and_group( + cmd, name, resource_group: "ResourceGroup", env: "ContainerAppEnvironment" +): + if not resource_group.name and not resource_group.exists: + matched_apps = [ + c for c in list_containerapp(cmd) if c["name"].lower() == name.lower() + ] + if len(matched_apps) == 1: + if env.name: + logger.warning( + "User passed custom environment name for an existing containerapp. Using existing environment." + ) + resource_group.name = parse_resource_id(matched_apps[0]["id"])[ + "resource_group" + ] + env.set_name(matched_apps[0]["properties"]["managedEnvironmentId"]) + elif len(matched_apps) > 1: + raise ValidationError( + f"There are multiple containerapps with name {name} on the subscription. " + "Please specify which resource group your Containerapp is in." + ) + + +def _get_env_and_group_from_log_analytics( + cmd, + resource_group_name, + env: "ContainerAppEnvironment", + resource_group: "ResourceGroup", + logs_customer_id, + location, +): + # resource_group_name is the value the user passed in (if present) + if not env.name: + if (resource_group_name == resource_group.name and resource_group.exists) or ( + not resource_group_name + ): + env_list = list_managed_environments( + cmd=cmd, resource_group_name=resource_group_name + ) + if logs_customer_id: + env_list = [ + e + for e in env_list + if safe_get( + e, + "properties", + "appLogsConfiguration", + "logAnalyticsConfiguration", + "customerId", + ) + == logs_customer_id + ] + if location: + env_list = [e for e in env_list if e["location"] == location] + if env_list: + # TODO check how many CA in env + env_details = parse_resource_id(env_list[0]["id"]) + env.set_name(env_details["name"]) + resource_group.name = env_details["resource_group"] + + +def _get_acr_from_image(cmd, app): + if app.image is not None and "azurecr.io" in app.image: + app.registry_server = app.image.split("/")[ + 0 + ] # TODO what if this conflicts with registry_server param? + parsed = urlparse(app.image) + registry_name = (parsed.netloc if parsed.scheme else parsed.path).split(".")[0] + if app.registry_user is None or app.registry_pass is None: + logger.info( + "No credential was provided to access Azure Container Registry. Trying to look up..." + ) + try: + app.registry_user, app.registry_pass, registry_rg = _get_acr_cred( + cmd.cli_ctx, registry_name + ) + app.acr = AzureContainerRegistry( + registry_name, ResourceGroup(cmd, registry_rg, None, None) + ) + except Exception as ex: + raise RequiredArgumentMissingError( + "Failed to retrieve credentials for container registry. Please provide the registry username and password" + ) from ex + else: + acr_rg = _get_acr_rg(app) + app.acr = AzureContainerRegistry( + name=registry_name, + resource_group=ResourceGroup(app.cmd, acr_rg, None, None), + ) + + +def _get_registry_from_app(app): + containerapp_def = app.get() + if containerapp_def: + if ( + len( + safe_get( + containerapp_def, + "properties", + "configuration", + "registries", + default=[], + ) + ) + == 1 + ): + app.registry_server = containerapp_def["properties"]["configuration"][ + "registries" + ][0]["server"] + + +def _get_acr_rg(app): + registry_name = app.registry_server[: app.registry_server.rindex(".azurecr.io")] + client = get_mgmt_service_client( + app.cmd.cli_ctx, ContainerRegistryManagementClient + ).registries + return parse_resource_id(acr_show(app.cmd, client, registry_name).id)[ + "resource_group" + ] + + +def _get_registry_details(cmd, app: "ContainerApp"): + registry_rg = None + registry_name = None + if app.registry_server: + if "azurecr.io" not in app.registry_server: + raise ValidationError( + "Cannot supply non-Azure registry when using --source." + ) + parsed = urlparse(app.registry_server) + registry_name = (parsed.netloc if parsed.scheme else parsed.path).split(".")[0] + if app.registry_user is None or app.registry_pass is None: + logger.info( + "No credential was provided to access Azure Container Registry. Trying to look up..." + ) + try: + app.registry_user, app.registry_pass, registry_rg = _get_acr_cred( + cmd.cli_ctx, registry_name + ) + except Exception as ex: + raise RequiredArgumentMissingError( + "Failed to retrieve credentials for container registry. Please provide the registry username and password" + ) from ex + else: + registry_rg = _get_acr_rg(app) + else: + registry_rg = app.resource_group.name + user = get_profile_username() + registry_name = app.name.replace("-", "").lower() + registry_name = ( + registry_name + + str(hash((registry_rg, user, app.name))) + .replace("-", "") + .replace(".", "")[:10] + ) # cap at 15 characters total + registry_name = ( + f"ca{registry_name}acr" # ACR names must start + end in a letter + ) + app.registry_server = registry_name + ".azurecr.io" + app.should_create_acr = True + + app.acr = AzureContainerRegistry( + registry_name, ResourceGroup(cmd, registry_rg, None, None) + ) + + +# attempt to populate defaults for managed env, RG, ACR, etc +def _set_up_defaults( + cmd, + name, + resource_group_name, + logs_customer_id, + location, + resource_group: "ResourceGroup", + env: "ContainerAppEnvironment", + app: "ContainerApp", +): + # If no RG passed in and a singular app exists with the same name, get its env and rg + _get_app_env_and_group(cmd, name, resource_group, env) + + # If no env passed in (and not creating a new RG), then try getting an env by location / log analytics ID + _get_env_and_group_from_log_analytics( + cmd, resource_group_name, env, resource_group, logs_customer_id, location + ) + + # get ACR details from --image, if possible + _get_acr_from_image(cmd, app) + + +def _create_github_action( + app: "ContainerApp", + env: "ContainerAppEnvironment", + service_principal_client_id, + service_principal_client_secret, + service_principal_tenant_id, + branch, + token, + repo, + context_path, +): + + sp = _get_or_create_sp( + app.cmd, + app.resource_group.name, + env.resource_group.name, + app.name, + service_principal_client_id, + service_principal_client_secret, + service_principal_tenant_id, + ) + ( + service_principal_client_id, + service_principal_client_secret, + service_principal_tenant_id, + ) = sp + create_or_update_github_action( + cmd=app.cmd, + name=app.name, + resource_group_name=app.resource_group.name, + repo_url=repo, + registry_url=app.registry_server, + registry_username=app.registry_user, + registry_password=app.registry_pass, + branch=branch, + token=token, + login_with_github=False, + service_principal_client_id=service_principal_client_id, + service_principal_client_secret=service_principal_client_secret, + service_principal_tenant_id=service_principal_tenant_id, + image=app.image, + context_path=context_path, + ) + + +def up_output(app): + url = safe_get( + ContainerAppClient.show(app.cmd, app.resource_group.name, app.name), + "properties", + "configuration", + "ingress", + "fqdn", + ) + if url and not url.startswith("http"): + url = f"http://{url}" + + logger.warning( + f"\nYour container app ({app.name}) has been created a deployed! Congrats! \n" + ) + url and logger.warning(f"Browse to your container app at: {url} \n") + logger.warning( + f"Stream logs for your container with: az containerapp logs -n {app.name} -g {app.resource_group.name} \n" + ) + logger.warning( + f"See full output using: az containerapp show -n {app.name} -g {app.resource_group.name} \n" + ) diff --git a/src/containerapp/azext_containerapp/_utils.py b/src/containerapp/azext_containerapp/_utils.py index 39ccef52633..0429f1dd1ba 100644 --- a/src/containerapp/azext_containerapp/_utils.py +++ b/src/containerapp/azext_containerapp/_utils.py @@ -2,14 +2,20 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument +# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals, logging-fstring-interpolation + +import time +import json +import platform from urllib.parse import urlparse -from azure.cli.command_modules.appservice.custom import (_get_acr_cred) -from azure.cli.core.azclierror import (ValidationError, RequiredArgumentMissingError) +from datetime import datetime +from dateutil.relativedelta import relativedelta +from azure.cli.core.azclierror import (ValidationError, RequiredArgumentMissingError, CLIInternalError, + ResourceNotFoundError, ArgumentUsageError) from azure.cli.core.commands.client_factory import get_subscription_id from knack.log import get_logger -from msrestazure.tools import parse_resource_id +from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id from ._clients import ContainerAppClient from ._client_factory import handle_raw_exception, providers_client_factory, cf_resource_groups, log_analytics_client_factory, log_analytics_shared_key_client_factory @@ -17,6 +23,197 @@ logger = get_logger(__name__) +# original implementation at azure.cli.command_modules.role.custom.create_service_principal_for_rbac +# reimplemented to remove unnecessary warning statements +def create_service_principal_for_rbac( # pylint:disable=too-many-statements,too-many-locals, too-many-branches, unused-argument, inconsistent-return-statements + cmd, name=None, years=None, create_cert=False, cert=None, scopes=None, role=None, + show_auth_for_sdk=None, skip_assignment=False, keyvault=None): + from azure.cli.command_modules.role.custom import (_graph_client_factory, TZ_UTC, _process_service_principal_creds, + _validate_app_dates, create_application, + _create_service_principal, _create_role_assignment, + _error_caused_by_role_assignment_exists) + + if role and not scopes or not role and scopes: + raise ArgumentUsageError("Usage error: To create role assignments, specify both --role and --scopes.") + + graph_client = _graph_client_factory(cmd.cli_ctx) + + years = years or 1 + _RETRY_TIMES = 36 + existing_sps = None + + if not name: + # No name is provided, create a new one + app_display_name = 'azure-cli-' + datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') + else: + app_display_name = name + # patch existing app with the same displayName to make the command idempotent + query_exp = "displayName eq '{}'".format(name) + existing_sps = list(graph_client.service_principals.list(filter=query_exp)) + + app_start_date = datetime.datetime.now(TZ_UTC) + app_end_date = app_start_date + relativedelta(years=years or 1) + + password, public_cert_string, cert_file, cert_start_date, cert_end_date = \ + _process_service_principal_creds(cmd.cli_ctx, years, app_start_date, app_end_date, cert, create_cert, + None, keyvault) + + app_start_date, app_end_date, cert_start_date, cert_end_date = \ + _validate_app_dates(app_start_date, app_end_date, cert_start_date, cert_end_date) + + aad_application = create_application(cmd, + display_name=app_display_name, + available_to_other_tenants=False, + password=password, + key_value=public_cert_string, + start_date=app_start_date, + end_date=app_end_date, + credential_description='rbac') + # pylint: disable=no-member + app_id = aad_application.app_id + + # retry till server replication is done + aad_sp = existing_sps[0] if existing_sps else None + if not aad_sp: + for retry_time in range(0, _RETRY_TIMES): + try: + aad_sp = _create_service_principal(cmd.cli_ctx, app_id, resolve_app=False) + break + except Exception as ex: # pylint: disable=broad-except + err_msg = str(ex) + if retry_time < _RETRY_TIMES and ( + ' does not reference ' in err_msg or + ' does not exist ' in err_msg or + 'service principal being created must in the local tenant' in err_msg): + logger.warning("Creating service principal failed with error '%s'. Retrying: %s/%s", + err_msg, retry_time + 1, _RETRY_TIMES) + time.sleep(5) + else: + logger.warning( + "Creating service principal failed for '%s'. Trace followed:\n%s", + app_id, ex.response.headers + if hasattr(ex, 'response') else ex) # pylint: disable=no-member + raise + sp_oid = aad_sp.object_id + + if role: + for scope in scopes: + # logger.warning("Creating '%s' role assignment under scope '%s'", role, scope) + # retry till server replication is done + for retry_time in range(0, _RETRY_TIMES): + try: + _create_role_assignment(cmd.cli_ctx, role, sp_oid, None, scope, resolve_assignee=False, + assignee_principal_type='ServicePrincipal') + break + except Exception as ex: + if retry_time < _RETRY_TIMES and ' does not exist in the directory ' in str(ex): + time.sleep(5) + logger.warning(' Retrying role assignment creation: %s/%s', retry_time + 1, + _RETRY_TIMES) + continue + if _error_caused_by_role_assignment_exists(ex): + logger.warning(' Role assignment already exists.\n') + break + + # dump out history for diagnoses + logger.warning(' Role assignment creation failed.\n') + if getattr(ex, 'response', None) is not None: + logger.warning(' role assignment response headers: %s\n', + ex.response.headers) # pylint: disable=no-member + raise + + if show_auth_for_sdk: + from azure.cli.core._profile import Profile + profile = Profile(cli_ctx=cmd.cli_ctx) + result = profile.get_sp_auth_info(scopes[0].split('/')[2] if scopes else None, + app_id, password, cert_file) + # sdk-auth file should be in json format all the time, hence the print + print(json.dumps(result, indent=2)) + return + + result = { + 'appId': app_id, + 'password': password, + 'displayName': app_display_name, + 'tenant': graph_client.config.tenant_id + } + if cert_file: + logger.warning( + "Please copy %s to a safe place. When you run 'az login', provide the file path in the --password argument", + cert_file) + result['fileWithCertAndPrivateKey'] = cert_file + return result + + +def is_int(s): + try: + int(s) + return True + except ValueError: + pass + return False + + +def await_github_action(cmd, token, repo, branch, name, resource_group_name, timeout_secs=300): + from .custom import show_github_action + from github import Github + from time import sleep + from ._clients import PollingAnimation + + start = datetime.utcnow() + + animation = PollingAnimation() + animation.tick() + g = Github(token) + + github_repo = g.get_repo(repo) + + workflow = None + while workflow is None: + workflows = github_repo.get_workflows() + animation.flush() + for wf in workflows: + if wf.path.startswith(f".github/workflows/{name}") and "Trigger auto deployment for containerapp" in wf.name: + workflow = wf + break + + gh_action_status = safe_get(show_github_action(cmd, name, resource_group_name), "properties", "operationState") + if gh_action_status == "Failed": + raise CLIInternalError("The Github Action creation failed.") + sleep(1) + animation.tick() + + if (datetime.utcnow() - start).seconds >= timeout_secs: + raise CLIInternalError("Timed out while waiting for the Github action to start.") + + animation.flush() + animation.tick() + animation.flush() + run = workflow.get_runs()[0] + logger.warning(f"Github action run: https://github.com/{repo}/actions/runs/{run.id}") + logger.warning("Waiting for deployment to complete...") + run_id = run.id + status = run.status + while status in ('queued', 'in_progress'): + sleep(3) + animation.tick() + status = [wf.status for wf in workflow.get_runs() if wf.id == run_id][0] + animation.flush() + if (datetime.utcnow() - start).seconds >= timeout_secs: + raise CLIInternalError("Timed out while waiting for the Github action to start.") + + if status != "completed": + raise ValidationError(f"Github action deployment ended with status: {status}") + + +def repo_url_to_name(repo_url): + repo = None + repo = repo_url.split('/') + if len(repo) >= 2: + repo = '/'.join(repo[-2:]) + return repo + + def _get_location_from_resource_group(cli_ctx, resource_group_name): client = cf_resource_groups(cli_ctx) group = client.get(resource_group_name) @@ -132,7 +329,7 @@ def _update_revision_env_secretrefs(containers, name): var["secretRef"] = var["secretRef"].replace("{}-".format(name), "") -def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_server, registry_pass, update_existing_secret=False): +def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_server, registry_pass, update_existing_secret=False, disable_warnings=False): if registry_pass.startswith("secretref:"): # If user passed in registry password using a secret @@ -162,7 +359,8 @@ def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_ raise ValidationError('Found secret with name \"{}\" but value does not equal the supplied registry password.'.format(registry_secret_name)) return registry_secret_name - logger.warning('Adding registry password as a secret with name \"{}\"'.format(registry_secret_name)) # pylint: disable=logging-format-interpolation + if not disable_warnings: + logger.warning('Adding registry password as a secret with name \"{}\"'.format(registry_secret_name)) # pylint: disable=logging-format-interpolation secrets_list.append({ "name": registry_secret_name, "value": registry_pass @@ -205,6 +403,44 @@ def _get_default_log_analytics_location(cmd): return default_location +def get_container_app_if_exists(cmd, resource_group_name, name): + app = None + try: + app = ContainerAppClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + except: # pylint: disable=bare-except + pass + return app + + +def _get_name(name_or_rid): + if is_valid_resource_id(name_or_rid): + return parse_resource_id(name_or_rid)["name"] + return name_or_rid + + +def _get_default_containerapps_location(cmd, location=None): + if location: + return location + default_location = "eastus" + providers_client = None + try: + providers_client = providers_client_factory(cmd.cli_ctx, get_subscription_id(cmd.cli_ctx)) + resource_types = getattr(providers_client.get("Microsoft.App"), 'resource_types', []) + res_locations = [] + for res in resource_types: + if res and getattr(res, 'resource_type', "") == "workspaces": + res_locations = getattr(res, 'locations', []) + + if len(res_locations) > 0: + location = res_locations[0].lower().replace(" ", "").replace("(", "").replace(")", "") + if location: + return location + + except Exception: # pylint: disable=broad-except + return default_location + return default_location + + # Generate random 4 character string def _new_tiny_guid(): import random @@ -232,6 +468,15 @@ def _generate_log_analytics_workspace_name(resource_group_name): return name +def _get_log_analytics_workspace_name(cmd, logs_customer_id, resource_group_name): + log_analytics_client = log_analytics_client_factory(cmd.cli_ctx) + logs_list = log_analytics_client.list_by_resource_group(resource_group_name) + for log in logs_list: + if log.customer_id.lower() == logs_customer_id.lower(): + return log.name + raise ResourceNotFoundError("Cannot find Log Analytics workspace with customer ID {}".format(logs_customer_id)) + + def _generate_log_analytics_if_not_provided(cmd, logs_customer_id, logs_key, location, resource_group_name): if logs_customer_id is None and logs_key is None: logger.warning("No Log Analytics workspace provided.") @@ -306,7 +551,6 @@ def _get_existing_secrets(cmd, resource_group_name, name, containerapp_def): def _ensure_identity_resource_id(subscription_id, resource_group, resource): - from msrestazure.tools import resource_id, is_valid_resource_id if is_valid_resource_id(resource): return resource @@ -428,8 +672,6 @@ def _add_or_update_tags(containerapp_def, tags): def _object_to_dict(obj): - import json - import datetime def default_handler(x): if isinstance(x, datetime.datetime): @@ -562,16 +804,16 @@ def _get_app_from_revision(revision): return revision -def _infer_acr_credentials(cmd, registry_server): +def _infer_acr_credentials(cmd, registry_server, disable_warnings=False): # If registry is Azure Container Registry, we can try inferring credentials if '.azurecr.io' not in registry_server: raise RequiredArgumentMissingError('Registry username and password are required if not using Azure Container Registry.') - logger.warning('No credential was provided to access Azure Container Registry. Trying to look up credentials...') + not disable_warnings and logger.warning('No credential was provided to access Azure Container Registry. Trying to look up credentials...') parsed = urlparse(registry_server) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: - registry_user, registry_pass = _get_acr_cred(cmd.cli_ctx, registry_name) + registry_user, registry_pass, registry_rg = _get_acr_cred(cmd.cli_ctx, registry_name) # pylint: disable=unused-variable return (registry_user, registry_pass) except Exception as ex: raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry {}. Please provide the registry username and password'.format(registry_name)) from ex @@ -585,3 +827,190 @@ def _registry_exists(containerapp_def, registry_server): exists = True break return exists + + +# get a value from nested dict without getting IndexError (returns None instead) +# for example, model["key1"]["key2"]["key3"] would become safe_get(model, "key1", "key2", "key3") +def safe_get(model, *keys, default=None): + for k in keys[:-1]: + model = model.get(k, {}) + return model.get(keys[-1], default) + + +def is_platform_windows(): + return platform.system() == "Windows" + + +def get_randomized_name(prefix, name=None, initial="rg"): + from random import randint + default = "{}_{}_{:04}".format(prefix, initial, randint(0, 9999)) + if name is not None: + return name + return default + + +def _set_webapp_up_default_args(cmd, resource_group_name, location, name, registry_server): + from azure.cli.core.util import ConfiguredDefaultSetter + with ConfiguredDefaultSetter(cmd.cli_ctx.config, True): + logger.warning("Setting 'az containerapp up' default arguments for current directory. " + "Manage defaults with 'az configure --scope local'") + + cmd.cli_ctx.config.set_value('defaults', 'resource_group_name', resource_group_name) + logger.warning("--resource-group/-g default: %s", resource_group_name) + + cmd.cli_ctx.config.set_value('defaults', 'location', location) + logger.warning("--location/-l default: %s", location) + + cmd.cli_ctx.config.set_value('defaults', 'name', name) + logger.warning("--name/-n default: %s", name) + + # cmd.cli_ctx.config.set_value('defaults', 'managed_env', managed_env) + # logger.warning("--environment default: %s", managed_env) + + cmd.cli_ctx.config.set_value('defaults', 'registry_server', registry_server) + logger.warning("--registry-server default: %s", registry_server) + + +def get_profile_username(): + from azure.cli.core._profile import Profile + user = Profile().get_current_account_user() + user = user.split('@', 1)[0] + if len(user.split('#', 1)) > 1: # on cloudShell user is in format live.com#user@domain.com + user = user.split('#', 1)[1] + return user + + +def create_resource_group(cmd, rg_name, location): + from azure.cli.core.profiles import ResourceType, get_sdk + rcf = _resource_client_factory(cmd.cli_ctx) + resource_group = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'ResourceGroup', mod='models') + rg_params = resource_group(location=location) + return rcf.resource_groups.create_or_update(rg_name, rg_params) + + +def get_resource_group(cmd, rg_name): + rcf = _resource_client_factory(cmd.cli_ctx) + return rcf.resource_groups.get(rg_name) + + +def _resource_client_factory(cli_ctx, **_): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) + + +def queue_acr_build(cmd, registry_rg, registry_name, img_name, src_dir, dockerfile="Dockerfile", quiet=False): + import os + import uuid + import tempfile + from ._archive_utils import upload_source_code + from azure.cli.command_modules.acr._stream_utils import stream_logs + from azure.cli.command_modules.acr._client_factory import cf_acr_registries_tasks + from azure.cli.core.commands import LongRunningOperation + + # client_registries = get_acr_service_client(cmd.cli_ctx).registries + client_registries = cf_acr_registries_tasks(cmd.cli_ctx) + + if not os.path.isdir(src_dir): + raise ValidationError("Source directory should be a local directory path.") + + docker_file_path = os.path.join(src_dir, dockerfile) + if not os.path.isfile(docker_file_path): + raise ValidationError("Unable to find '{}'.".format(docker_file_path)) + + # NOTE: os.path.basename is unable to parse "\" in the file path + original_docker_file_name = os.path.basename(docker_file_path.replace("\\", "/")) + docker_file_in_tar = '{}_{}'.format(uuid.uuid4().hex, original_docker_file_name) + tar_file_path = os.path.join(tempfile.gettempdir(), 'build_archive_{}.tar.gz'.format(uuid.uuid4().hex)) + + source_location = upload_source_code(cmd, client_registries, registry_name, registry_rg, src_dir, tar_file_path, docker_file_path, docker_file_in_tar) + + # For local source, the docker file is added separately into tar as the new file name (docker_file_in_tar) + # So we need to update the docker_file_path + docker_file_path = docker_file_in_tar + + from azure.cli.core.profiles import ResourceType + OS, Architecture = cmd.get_models('OS', 'Architecture', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='runs') + # Default platform values + platform_os = OS.linux.value + platform_arch = Architecture.amd64.value + platform_variant = None + + DockerBuildRequest, PlatformProperties = cmd.get_models('DockerBuildRequest', 'PlatformProperties', + resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='runs') + docker_build_request = DockerBuildRequest( + image_names=[img_name], + is_push_enabled=True, + source_location=source_location, + platform=PlatformProperties( + os=platform_os, + architecture=platform_arch, + variant=platform_variant + ), + docker_file_path=docker_file_path, + timeout=None, + arguments=[]) + + queued_build = LongRunningOperation(cmd.cli_ctx)(client_registries.begin_schedule_run( + resource_group_name=registry_rg, + registry_name=registry_name, + run_request=docker_build_request)) + + run_id = queued_build.run_id + logger.info("Queued a build with ID: %s", run_id) + not quiet and logger.info("Waiting for agent...") + + from azure.cli.command_modules.acr._client_factory import (cf_acr_runs) + from ._acr_run_polling import get_run_with_polling + client_runs = cf_acr_runs(cmd.cli_ctx) + + if quiet: + lro_poller = get_run_with_polling(cmd, client_runs, run_id, registry_name, registry_rg) + acr = LongRunningOperation(cmd.cli_ctx)(lro_poller) + logger.info("Build {}.".format(acr.status.lower())) # pylint: disable=logging-format-interpolation + if acr.status.lower() != "succeeded": + raise CLIInternalError("ACR build {}.".format(acr.status.lower())) + return acr + + return stream_logs(cmd, client_runs, run_id, registry_name, registry_rg, None, False, True) + + +def _get_acr_cred(cli_ctx, registry_name): + from azure.mgmt.containerregistry import ContainerRegistryManagementClient + from azure.cli.core.commands.parameters import get_resources_in_subscription + from azure.cli.core.commands.client_factory import get_mgmt_service_client + + client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries + + result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries') + result = [item for item in result if item.name.lower() == registry_name] + if not result or len(result) > 1: + raise ResourceNotFoundError("No resource or more than one were found with name '{}'.".format(registry_name)) + resource_group_name = parse_resource_id(result[0].id)['resource_group'] + + registry = client.get(resource_group_name, registry_name) + + if registry.admin_user_enabled: # pylint: disable=no-member + cred = client.list_credentials(resource_group_name, registry_name) + return cred.username, cred.passwords[0].value, resource_group_name + raise ResourceNotFoundError("Failed to retrieve container registry credentials. Please either provide the " + "credentials or run 'az acr update -n {} --admin-enabled true' to enable " + "admin first.".format(registry_name)) + + +def create_new_acr(cmd, registry_name, resource_group_name, location=None, sku="Basic"): + # from azure.cli.command_modules.acr.custom import acr_create + from azure.cli.command_modules.acr._client_factory import cf_acr_registries + from azure.cli.core.profiles import ResourceType + from azure.cli.core.commands import LongRunningOperation + + client = cf_acr_registries(cmd.cli_ctx) + # return acr_create(cmd, client, registry_name, resource_group_name, sku, location) + + Registry, Sku = cmd.get_models('Registry', 'Sku', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group="registries") + registry = Registry(location=location, sku=Sku(name=sku), admin_user_enabled=True, + zone_redundancy=None, tags=None) + + lro_poller = client.begin_create(resource_group_name, registry_name, registry) + acr = LongRunningOperation(cmd.cli_ctx)(lro_poller) + return acr diff --git a/src/containerapp/azext_containerapp/_validators.py b/src/containerapp/azext_containerapp/_validators.py index 861a7f049b6..9c1e53ed50e 100644 --- a/src/containerapp/azext_containerapp/_validators.py +++ b/src/containerapp/azext_containerapp/_validators.py @@ -4,7 +4,11 @@ # -------------------------------------------------------------------------------------------- # pylint: disable=line-too-long -from azure.cli.core.azclierror import (ValidationError) +from azure.cli.core.azclierror import (ValidationError, ResourceNotFoundError) + +from ._clients import ContainerAppClient +from ._ssh_utils import ping_container_app +from ._utils import safe_get def _is_number(s): @@ -87,3 +91,67 @@ def validate_ingress(namespace): if namespace.ingress: if not namespace.target_port: raise ValidationError("Usage error: must specify --target-port with --ingress") + + +def _set_ssh_defaults(cmd, namespace): + app = ContainerAppClient.show(cmd, namespace.resource_group_name, namespace.name) + if not app: + raise ResourceNotFoundError("Could not find a container app") + replicas = [] + if not namespace.revision: + namespace.revision = app.get("properties", {}).get("latestRevisionName") + if not namespace.revision: + raise ResourceNotFoundError("Could not find a revision") + if not namespace.replica: + # VVV this may not be necessary according to Anthony Chu + ping_container_app(app) # needed to get an alive replica + replicas = ContainerAppClient.list_replicas(cmd=cmd, + resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, + revision_name=namespace.revision) + if not replicas: + raise ResourceNotFoundError("Could not find a replica for this app") + namespace.replica = replicas[0]["name"] + if not namespace.container: + revision = ContainerAppClient.show_revision(cmd, resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, + name=namespace.revision) + revision_containers = safe_get(revision, "properties", "template", "containers") + if revision_containers: + namespace.container = revision_containers[0]["name"] + + +def _validate_revision_exists(cmd, namespace): + revision = ContainerAppClient.show_revision(cmd, resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, name=namespace.revision) + if not revision: + raise ResourceNotFoundError("Could not find revision") + + +def _validate_replica_exists(cmd, namespace): + replica = ContainerAppClient.get_replica(cmd=cmd, + resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, + revision_name=namespace.revision, + replica_name=namespace.replica) + if not replica: + raise ResourceNotFoundError("Could not find replica") + + +def _validate_container_exists(cmd, namespace): + replica_containers = ContainerAppClient.get_replica(cmd=cmd, + resource_group_name=namespace.resource_group_name, + container_app_name=namespace.name, + revision_name=namespace.revision, + replica_name=namespace.replica)["properties"]["containers"] + matches = [r for r in replica_containers if r["name"].lower() == namespace.container.lower()] + if not matches: + raise ResourceNotFoundError("Could not find container") + + +# also used to validate logstream +def validate_ssh(cmd, namespace): + _set_ssh_defaults(cmd, namespace) + _validate_revision_exists(cmd, namespace) + _validate_replica_exists(cmd, namespace) + _validate_container_exists(cmd, namespace) diff --git a/src/containerapp/azext_containerapp/commands.py b/src/containerapp/azext_containerapp/commands.py index dd6f2d067dc..57fd157ae7f 100644 --- a/src/containerapp/azext_containerapp/commands.py +++ b/src/containerapp/azext_containerapp/commands.py @@ -7,6 +7,7 @@ # from azure.cli.core.commands import CliCommandType # from msrestazure.tools import is_valid_resource_id, parse_resource_id from azext_containerapp._client_factory import ex_handler_factory +from ._validators import validate_ssh def transform_containerapp_output(app): @@ -49,6 +50,16 @@ def load_command_table(self, _): g.custom_command('create', 'create_containerapp', supports_no_wait=True, exception_handler=ex_handler_factory(), table_transformer=transform_containerapp_output) g.custom_command('update', 'update_containerapp', supports_no_wait=True, exception_handler=ex_handler_factory(), table_transformer=transform_containerapp_output) g.custom_command('delete', 'delete_containerapp', supports_no_wait=True, confirmation=True, exception_handler=ex_handler_factory()) + g.custom_command('exec', 'containerapp_ssh', validator=validate_ssh) + g.custom_command('up', 'containerapp_up', supports_no_wait=True, exception_handler=ex_handler_factory()) + g.custom_command('browse', 'open_containerapp_in_browser') + + with self.command_group('containerapp replica', is_preview=True) as g: + g.custom_show_command('show', 'get_replica') # TODO implement the table transformer + g.custom_command('list', 'list_replicas') + + with self.command_group('containerapp logs', is_preview=True) as g: + g.custom_show_command('show', 'stream_containerapp_logs', validator=validate_ssh) with self.command_group('containerapp env') as g: g.custom_show_command('show', 'show_managed_environment') diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index f990c5f6ff9..00a47081484 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2,10 +2,14 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, consider-using-f-string, logging-format-interpolation, inconsistent-return-statements, broad-except, bare-except, too-many-statements, too-many-locals, too-many-boolean-expressions, too-many-branches, too-many-nested-blocks, pointless-statement +# pylint: disable=line-too-long, consider-using-f-string, logging-format-interpolation, inconsistent-return-statements, broad-except, bare-except, too-many-statements, too-many-locals, too-many-boolean-expressions, too-many-branches, too-many-nested-blocks, pointless-statement, expression-not-assigned, unbalanced-tuple-unpacking +import threading +import sys +import time from urllib.parse import urlparse -from azure.cli.command_modules.appservice.custom import (_get_acr_cred) +import requests + from azure.cli.core.azclierror import ( RequiredArgumentMissingError, ValidationError, @@ -14,6 +18,7 @@ CLIInternalError, InvalidArgumentValueError) from azure.cli.core.commands.client_factory import get_subscription_id +from azure.cli.core.util import open_page_in_browser from knack.log import get_logger from msrestazure.tools import parse_resource_id, is_valid_resource_id @@ -47,7 +52,11 @@ _object_to_dict, _add_or_update_secrets, _remove_additional_attributes, _remove_readonly_attributes, _add_or_update_env_vars, _add_or_update_tags, update_nested_dictionary, _update_traffic_weights, _get_app_from_revision, raise_missing_token_suggestion, _infer_acr_credentials, _remove_registry_secret, _remove_secret, - _ensure_identity_resource_id, _remove_dapr_readonly_attributes, _registry_exists, _remove_env_vars, _update_revision_env_secretrefs) + _ensure_identity_resource_id, _remove_dapr_readonly_attributes, _remove_env_vars, + _update_revision_env_secretrefs, _get_acr_cred, safe_get, await_github_action, repo_url_to_name) + +from ._ssh_utils import (SSH_DEFAULT_ENCODING, WebSocketConnection, read_ssh, get_stdin_writer, SSH_CTRL_C_MSG, + SSH_BACKUP_ENCODING) logger = get_logger(__name__) @@ -57,7 +66,8 @@ def process_loaded_yaml(yaml_containerapp): if not yaml_containerapp.get('properties'): yaml_containerapp['properties'] = {} - nested_properties = ["provisioningState", "managedEnvironmentId", "latestRevisionName", "latestRevisionFqdn", "customDomainVerificationId", "configuration", "template", "outboundIPAddresses"] + nested_properties = ["provisioningState", "managedEnvironmentId", "latestRevisionName", "latestRevisionFqdn", + "customDomainVerificationId", "configuration", "template", "outboundIPAddresses"] for nested_property in nested_properties: tmp = yaml_containerapp.get(nested_property) if tmp: @@ -85,7 +95,6 @@ def load_yaml_file(file_name): def create_deserializer(): from ._sdk_models import ContainerApp # pylint: disable=unused-import from msrest import Deserializer - import sys import inspect sdkClasses = inspect.getmembers(sys.modules["azext_containerapp._sdk_models"]) @@ -294,6 +303,7 @@ def create_containerapp(cmd, tags=None, no_wait=False, system_assigned=False, + disable_warnings=False, user_assigned=None): _validate_subscription_registered(cmd, "Microsoft.App") @@ -302,7 +312,7 @@ def create_containerapp(cmd, revisions_mode or secrets or env_vars or cpu or memory or registry_server or\ registry_user or registry_pass or dapr_enabled or dapr_app_port or dapr_app_id or\ startup_command or args or tags: - logger.warning('Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead') + not disable_warnings and logger.warning('Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead') return create_containerapp_yaml(cmd=cmd, name=name, resource_group_name=resource_group_name, file_name=yaml, no_wait=no_wait) if not image: @@ -352,14 +362,14 @@ def create_containerapp(cmd, # Infer credentials if not supplied and its azurecr if registry_user is None or registry_pass is None: - registry_user, registry_pass = _infer_acr_credentials(cmd, registry_server) + registry_user, registry_pass = _infer_acr_credentials(cmd, registry_server, disable_warnings) registries_def["server"] = registry_server registries_def["username"] = registry_user if secrets_def is None: secrets_def = [] - registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, registry_user, registry_server, registry_pass) + registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, registry_user, registry_server, registry_pass, disable_warnings=disable_warnings) dapr_def = None if dapr_enabled: @@ -445,12 +455,12 @@ def create_containerapp(cmd, cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_def, no_wait=no_wait) if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not no_wait: - logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(name, resource_group_name)) + not disable_warnings and logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(name, resource_group_name)) if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and "fqdn" in r["properties"]["configuration"]["ingress"]: - logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) + not disable_warnings and logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) else: - logger.warning("\nContainer app created. To access it over HTTPS, enable ingress: az containerapp ingress enable --help\n") + not disable_warnings and logger.warning("\nContainer app created. To access it over HTTPS, enable ingress: az containerapp ingress enable --help\n") return r except Exception as e: @@ -740,6 +750,7 @@ def create_managed_environment(cmd, platform_reserved_dns_ip=None, internal_only=False, tags=None, + disable_warnings=False, no_wait=False): location = location or _get_location_from_resource_group(cmd.cli_ctx, resource_group_name) @@ -794,9 +805,9 @@ def create_managed_environment(cmd, cmd=cmd, resource_group_name=resource_group_name, name=name, managed_environment_envelope=managed_env_def, no_wait=no_wait) if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not no_wait: - logger.warning('Containerapp environment creation in progress. Please monitor the creation using `az containerapp env show -n {} -g {}`'.format(name, resource_group_name)) + not disable_warnings and logger.warning('Containerapp environment creation in progress. Please monitor the creation using `az containerapp env show -n {} -g {}`'.format(name, resource_group_name)) - logger.warning("\nContainer Apps environment created. To deploy a container app, use: az containerapp create --help\n") + not disable_warnings and logger.warning("\nContainer Apps environment created. To deploy a container app, use: az containerapp create --help\n") return r except Exception as e: @@ -1021,6 +1032,36 @@ def show_managed_identity(cmd, name, resource_group_name): return r["identity"] +def _validate_github(repo, branch, token): + from github import Github, GithubException + from github.GithubException import BadCredentialsException + + if repo: + g = Github(token) + github_repo = None + try: + github_repo = g.get_repo(repo) + if not github_repo.permissions.push or not github_repo.permissions.maintain: + raise ValidationError("The token does not have appropriate access rights to repository {}.".format(repo)) + try: + github_repo.get_branch(branch=branch) + except GithubException as e: + error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo) + if e.data and e.data['message']: + error_msg += " Error: {}".format(e.data['message']) + raise CLIInternalError(error_msg) from e + logger.warning('Verified GitHub repo and branch') + except BadCredentialsException as e: + raise ValidationError("Could not authenticate to the repository. Please create a Personal Access Token and use " + "the --token argument. Run 'az webapp deployment github-actions add --help' " + "for more information.") from e + except GithubException as e: + error_msg = "Encountered GitHub error when accessing {} repo".format(repo) + if e.data and e.data['message']: + error_msg += " Error: {}".format(e.data['message']) + raise CLIInternalError(error_msg) from e + + def create_or_update_github_action(cmd, name, resource_group_name, @@ -1035,7 +1076,8 @@ def create_or_update_github_action(cmd, context_path=None, service_principal_client_id=None, service_principal_client_secret=None, - service_principal_tenant_id=None): + service_principal_tenant_id=None, + no_wait=False): if not token and not login_with_github: raise_missing_token_suggestion() elif not token: @@ -1044,45 +1086,10 @@ def create_or_update_github_action(cmd, elif token and login_with_github: logger.warning("Both token and --login-with-github flag are provided. Will use provided token") - try: - # Verify github repo - from github import Github, GithubException - from github.GithubException import BadCredentialsException - - repo = None - repo = repo_url.split('/') - if len(repo) >= 2: - repo = '/'.join(repo[-2:]) + repo = repo_url_to_name(repo_url) + repo_url = f"https://github.com/{repo}" # allow specifying repo as / without the full github url - if repo: - g = Github(token) - github_repo = None - try: - github_repo = g.get_repo(repo) - if not github_repo.permissions.push or not github_repo.permissions.maintain: - raise ValidationError("The token does not have appropriate access rights to repository {}.".format(repo)) - try: - github_repo.get_branch(branch=branch) - except GithubException as e: - error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo) - if e.data and e.data['message']: - error_msg += " Error: {}".format(e.data['message']) - raise CLIInternalError(error_msg) from e - logger.warning('Verified GitHub repo and branch') - except BadCredentialsException as e: - raise ValidationError("Could not authenticate to the repository. Please create a Personal Access Token and use " - "the --token argument. Run 'az webapp deployment github-actions add --help' " - "for more information.") from e - except GithubException as e: - error_msg = "Encountered GitHub error when accessing {} repo".format(repo) - if e.data and e.data['message']: - error_msg += " Error: {}".format(e.data['message']) - raise CLIInternalError(error_msg) from e - except CLIError as clierror: - raise clierror - except Exception: - # If exception due to github package missing, etc just continue without validating the repo and rely on api validation - pass + _validate_github(repo, branch, token) source_control_info = None @@ -1120,7 +1127,7 @@ def create_or_update_github_action(cmd, registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: - registry_username, registry_password = _get_acr_cred(cmd.cli_ctx, registry_name) + registry_username, registry_password, _ = _get_acr_cred(cmd.cli_ctx, registry_name) except Exception as ex: raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex @@ -1140,7 +1147,10 @@ def create_or_update_github_action(cmd, headers = ["x-ms-github-auxiliary={}".format(token)] try: - r = GitHubActionClient.create_or_update(cmd=cmd, resource_group_name=resource_group_name, name=name, github_action_envelope=source_control_info, headers=headers) + logger.warning("Creating Github action...") + r = GitHubActionClient.create_or_update(cmd=cmd, resource_group_name=resource_group_name, name=name, github_action_envelope=source_control_info, headers=headers, no_wait=no_wait) + if not no_wait: + await_github_action(cmd, token, repo, branch, name, resource_group_name) return r except Exception as e: handle_raw_exception(e) @@ -1351,7 +1361,7 @@ def show_ingress(cmd, name, resource_group_name): raise ValidationError("The containerapp '{}' does not have ingress enabled.".format(name)) from e -def enable_ingress(cmd, name, resource_group_name, type, target_port, transport="auto", allow_insecure=False, no_wait=False): # pylint: disable=redefined-builtin +def enable_ingress(cmd, name, resource_group_name, type, target_port, transport="auto", allow_insecure=False, disable_warnings=False, no_wait=False): # pylint: disable=redefined-builtin _validate_subscription_registered(cmd, "Microsoft.App") containerapp_def = None @@ -1385,7 +1395,7 @@ def enable_ingress(cmd, name, resource_group_name, type, target_port, transport= try: r = ContainerAppClient.create_or_update( cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_def, no_wait=no_wait) - logger.warning("\nIngress enabled. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) + not disable_warnings and logger.warning("\nIngress enabled. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) return r["properties"]["configuration"]["ingress"] except Exception as e: handle_raw_exception(e) @@ -1507,7 +1517,7 @@ def list_registry(cmd, name, resource_group_name): raise ValidationError("The containerapp {} has no assigned registries.".format(name)) from e -def set_registry(cmd, name, resource_group_name, server, username=None, password=None, no_wait=False): +def set_registry(cmd, name, resource_group_name, server, username=None, password=None, disable_warnings=False, no_wait=False): _validate_subscription_registered(cmd, "Microsoft.App") containerapp_def = None @@ -1533,12 +1543,12 @@ def set_registry(cmd, name, resource_group_name, server, username=None, password # If registry is Azure Container Registry, we can try inferring credentials if '.azurecr.io' not in server: raise RequiredArgumentMissingError('Registry username and password are required if you are not using Azure Container Registry.') - logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') + not disable_warnings and logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(server) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: - username, password = _get_acr_cred(cmd.cli_ctx, registry_name) + username, password, _ = _get_acr_cred(cmd.cli_ctx, registry_name) except Exception as ex: raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex @@ -1546,7 +1556,7 @@ def set_registry(cmd, name, resource_group_name, server, username=None, password updating_existing_registry = False for r in registries_def: if r['server'].lower() == server.lower(): - logger.warning("Updating existing registry.") + not disable_warnings and logger.warning("Updating existing registry.") updating_existing_registry = True if username: r["username"] = username @@ -1712,7 +1722,6 @@ def set_secrets(cmd, name, resource_group_name, secrets, # yaml=None, no_wait=False): _validate_subscription_registered(cmd, "Microsoft.App") - # if not yaml and not secrets: # raise RequiredArgumentMissingError('Usage error: --secrets is required if not using --yaml') @@ -1880,3 +1889,279 @@ def remove_dapr_component(cmd, resource_group_name, dapr_component_name, environ return r except Exception as e: handle_raw_exception(e) + + +def list_replicas(cmd, resource_group_name, name, revision=None): + app = ContainerAppClient.show(cmd, resource_group_name, name) + if not revision: + revision = app["properties"]["latestRevisionName"] + return ContainerAppClient.list_replicas(cmd=cmd, + resource_group_name=resource_group_name, + container_app_name=name, + revision_name=revision) + + +def get_replica(cmd, resource_group_name, name, replica, revision=None): + app = ContainerAppClient.show(cmd, resource_group_name, name) + if not revision: + revision = app["properties"]["latestRevisionName"] + return ContainerAppClient.get_replica(cmd=cmd, + resource_group_name=resource_group_name, + container_app_name=name, + revision_name=revision, + replica_name=replica) + + +def containerapp_ssh(cmd, resource_group_name, name, container=None, revision=None, replica=None, startup_command="sh"): + if isinstance(startup_command, list): + startup_command = startup_command[0] # CLI seems a little buggy when calling a param "--command" + + conn = WebSocketConnection(cmd=cmd, resource_group_name=resource_group_name, name=name, revision=revision, + replica=replica, container=container, startup_command=startup_command) + + encodings = [SSH_DEFAULT_ENCODING, SSH_BACKUP_ENCODING] + reader = threading.Thread(target=read_ssh, args=(conn, encodings)) + reader.daemon = True + reader.start() + + writer = get_stdin_writer(conn) + writer.daemon = True + writer.start() + + logger.warning("Use ctrl + D to exit.") + while conn.is_connected: + try: + time.sleep(0.1) + except KeyboardInterrupt: + if conn.is_connected: + logger.info("Caught KeyboardInterrupt. Sending ctrl+c to server") + conn.send(SSH_CTRL_C_MSG) + + +def stream_containerapp_logs(cmd, resource_group_name, name, container=None, revision=None, replica=None, follow=False, + tail=None, output_format=None): + if tail: + if tail < 0 or tail > 300: + raise ValidationError("--tail must be between 0 and 300.") + + sub = get_subscription_id(cmd.cli_ctx) + token_response = ContainerAppClient.get_auth_token(cmd, resource_group_name, name) + token = token_response["properties"]["token"] + logstream_endpoint = token_response["properties"]["logStreamEndpoint"] + base_url = logstream_endpoint[:logstream_endpoint.index("/subscriptions/")] + + url = (f"{base_url}/subscriptions/{sub}/resourceGroups/{resource_group_name}/containerApps/{name}" + f"/revisions/{revision}/replicas/{replica}/containers/{container}/logstream") + + logger.warning("connecting to : %s", url) + request_params = {"follow": str(follow).lower(), "output": output_format, "tailLines": tail} + headers = {"Authorization": f"Bearer {token}"} + resp = requests.get(url, timeout=None, stream=True, params=request_params, headers=headers) + + if not resp.ok: + ValidationError(f"Got bad status from the logstream API: {resp.status_code}") + + for line in resp.iter_lines(): + if line: + logger.info("received raw log line: %s", line) + # these .replaces are needed to display color/quotations properly + # for some reason the API returns garbled unicode special characters (may need to add more in the future) + print(line.decode("utf-8").replace("\\u0022", "\u0022").replace("\\u001B", "\u001B").replace("\\u002B", "\u002B").replace("\\u0027", "\u0027")) + + +def open_containerapp_in_browser(cmd, name, resource_group_name): + app = ContainerAppClient.show(cmd, resource_group_name, name) + url = safe_get(app, "properties", "configuration", "ingress", "fqdn") + if not url: + raise ValidationError("Could not open in browser: no public URL for this app") + if not url.startswith("http"): + url = f"http://{url}" + open_page_in_browser(url) + + +def containerapp_up(cmd, + name, + resource_group_name=None, + managed_env=None, + location=None, + registry_server=None, + image=None, + source=None, + ingress=None, + target_port=None, + registry_user=None, + registry_pass=None, + env_vars=None, + logs_customer_id=None, + logs_key=None, + repo=None, + token=None, + branch=None, + browse=False, + context_path=None, + service_principal_client_id=None, + service_principal_client_secret=None, + service_principal_tenant_id=None): + from ._up_utils import (_validate_up_args, _reformat_image, _get_dockerfile_content, _get_ingress_and_target_port, + ResourceGroup, ContainerAppEnvironment, ContainerApp, _get_registry_from_app, + _get_registry_details, _create_github_action, _set_up_defaults, up_output, AzureContainerRegistry) + + dockerfile = "Dockerfile" # for now the dockerfile name must be "Dockerfile" (until GH actions API is updated) + + _validate_up_args(source, image, repo) + + image = _reformat_image(source, repo, image) + token = None if not repo else get_github_access_token(cmd, ["admin:repo_hook", "repo", "workflow"], token) + + if image and "mcr.microsoft.com/azuredocs/containerapps-helloworld:latest" in image.lower(): + ingress = "external" if not ingress else ingress + target_port = 80 if not target_port else target_port + + dockerfile_content = _get_dockerfile_content(repo, branch, token, source, context_path, dockerfile) + ingress, target_port = _get_ingress_and_target_port(ingress, target_port, dockerfile_content) + + resource_group = ResourceGroup(cmd, name=resource_group_name, location=location) + env = ContainerAppEnvironment(cmd, managed_env, resource_group, location=location, logs_key=logs_key, logs_customer_id=logs_customer_id) + app = ContainerApp(cmd, name, resource_group, None, image, env, target_port, registry_server, registry_user, registry_pass, env_vars, ingress) + + _set_up_defaults(cmd, name, resource_group_name, logs_customer_id, location, resource_group, env, app) + + if app.check_exists(): + if app.get()["properties"]["provisioningState"] == "InProgress": + raise ValidationError("Containerapp has an existing provisioning in progress. Please wait until provisioning has completed and rerun the command.") + + if source or repo: + _get_registry_from_app(app) # if the app exists, get the registry + _get_registry_details(cmd, app) # fetch ACR creds from arguments registry arguments + + resource_group.create_if_needed() + env.create_if_needed(name) + app.create_acr_if_needed() + + if source: + app.run_acr_build(dockerfile, source, False) + + app.create(no_registry=bool(repo)) + if repo: + _create_github_action(app, env, service_principal_client_id, service_principal_client_secret, + service_principal_tenant_id, branch, token, repo, context_path) + + if browse: + open_containerapp_in_browser(cmd, app.name, app.resource_group.name) + + up_output(app) + + +def containerapp_up_logic(cmd, resource_group_name, name, managed_env, image, env_vars, ingress, target_port, registry_server, registry_user, registry_pass): + containerapp_def = None + try: + containerapp_def = ContainerAppClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + except: + pass + + try: + location = ManagedEnvironmentClient.show(cmd, resource_group_name, managed_env.split('/')[-1])["location"] + except: + pass + + ca_exists = False + if containerapp_def: + ca_exists = True + + if not ca_exists: + containerapp_def = None + containerapp_def = ContainerAppModel + containerapp_def["location"] = location + containerapp_def["properties"]["managedEnvironmentId"] = managed_env + containerapp_def["properties"]["configuration"] = ConfigurationModel + else: + # check provisioning state here instead of secrets so no error + _get_existing_secrets(cmd, resource_group_name, name, containerapp_def) + + container = ContainerModel + container["image"] = image + container["name"] = name + + if env_vars: + container["env"] = parse_env_var_flags(env_vars) + + external_ingress = None + if ingress is not None: + if ingress.lower() == "internal": + external_ingress = False + elif ingress.lower() == "external": + external_ingress = True + + ingress_def = None + if target_port is not None and ingress is not None: + ingress_def = IngressModel + ingress_def["external"] = external_ingress + ingress_def["targetPort"] = target_port + containerapp_def["properties"]["configuration"]["ingress"] = ingress_def + + # handle multi-container case + if ca_exists: + existing_containers = containerapp_def["properties"]["template"]["containers"] + if len(existing_containers) == 0: + # No idea how this would ever happen, failed provisioning maybe? + containerapp_def["properties"]["template"] = TemplateModel + containerapp_def["properties"]["template"]["containers"] = [container] + if len(existing_containers) == 1: + # Assume they want it updated + existing_containers[0] = container + if len(existing_containers) > 1: + # Assume they want to update, if not existing just add it + existing_containers = [x for x in existing_containers if x['name'].lower() == name.lower()] + if len(existing_containers) == 1: + existing_containers[0] = container + else: + existing_containers.append(container) + containerapp_def["properties"]["template"]["containers"] = existing_containers + else: + containerapp_def["properties"]["template"] = TemplateModel + containerapp_def["properties"]["template"]["containers"] = [container] + + registries_def = None + registry = None + + if "secrets" not in containerapp_def["properties"]["configuration"] or containerapp_def["properties"]["configuration"]["secrets"] is None: + containerapp_def["properties"]["configuration"]["secrets"] = [] + + if "registries" not in containerapp_def["properties"]["configuration"] or containerapp_def["properties"]["configuration"]["registries"] is None: + containerapp_def["properties"]["configuration"]["registries"] = [] + + registries_def = containerapp_def["properties"]["configuration"]["registries"] + + if registry_server: + # Check if updating existing registry + updating_existing_registry = False + for r in registries_def: + if r['server'].lower() == registry_server.lower(): + updating_existing_registry = True + if registry_user: + r["username"] = registry_user + if registry_pass: + r["passwordSecretRef"] = store_as_secret_and_return_secret_ref( + containerapp_def["properties"]["configuration"]["secrets"], + r["username"], + r["server"], + registry_pass, + update_existing_secret=True) + + # If not updating existing registry, add as new registry + if not updating_existing_registry: + registry = RegistryCredentialsModel + registry["server"] = registry_server + registry["username"] = registry_user + registry["passwordSecretRef"] = store_as_secret_and_return_secret_ref( + containerapp_def["properties"]["configuration"]["secrets"], + registry_user, + registry_server, + registry_pass, + update_existing_secret=True) + + registries_def.append(registry) + + if ca_exists: + return ContainerAppClient.update(cmd, resource_group_name, name, containerapp_def) + return ContainerAppClient.create_or_update(cmd, resource_group_name, name, containerapp_def) diff --git a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py index bc6d1133a1f..98992aa134c 100644 --- a/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py +++ b/src/containerapp/azext_containerapp/tests/latest/test_containerapp_scenario.py @@ -4,9 +4,12 @@ # -------------------------------------------------------------------------------------------- import os +import platform +from unittest import mock import time -import unittest +from azext_containerapp.custom import containerapp_ssh +from azure.cli.testsdk.reverse_dependency import get_dummy_cli from azure.cli.testsdk.scenario_tests import AllowLargeResponse from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, JMESPathCheck, live_only) from knack.util import CLIError @@ -22,7 +25,7 @@ class ContainerappScenarioTest(ScenarioTest): def test_containerapp_e2e(self, resource_group): env_name = self.create_random_name(prefix='containerapp-e2e-env', length=24) - self.cmd('containerapp env create -g {} -n {}'.format(resource_group, env_name)) + self.cmd(f'containerapp env create -g {resource_group} -n {env_name}') # Ensure environment is completed containerapp_env = self.cmd('containerapp env show -g {} -n {}'.format(resource_group, env_name)).get_output_in_json() @@ -181,3 +184,77 @@ def test_containerapp_update(self, resource_group): JMESPathCheck('properties.template.containers[1].resources.cpu', '0.75'), JMESPathCheck('properties.template.containers[1].resources.memory', '1.5Gi'), ]) + + @live_only() # VCR.py can't seem to handle websockets (only --live works) + # @ResourceGroupPreparer(location="centraluseuap") + @mock.patch("azext_containerapp._ssh_utils._resize_terminal") + @mock.patch("sys.stdin") + def test_containerapp_ssh(self, resource_group=None, *args): + # containerapp_name = self.create_random_name(prefix='capp', length=24) + # env_name = self.create_random_name(prefix='env', length=24) + + # self.cmd(f'containerapp env create -g {resource_group} -n {env_name}') + # self.cmd(f'containerapp create -g {resource_group} -n {containerapp_name} --environment {env_name} --min-replicas 1 --ingress external') + + # TODO remove hardcoded app info (currently the SSH feature is only enabled in stage) + # these are only in my sub so they won't work on the CI / other people's machines + containerapp_name = "stage" + resource_group = "sca" + + stdout_buff = [] + + def mock_print(*args, end="\n", **kwargs): + out = " ".join([str(a) for a in args]) + if not stdout_buff: + stdout_buff.append(out) + elif end != "\n": + stdout_buff[-1] = f"{stdout_buff[-1]}{out}" + else: + stdout_buff.append(out) + + commands = "\n".join(["whoami", "pwd", "ls -l | grep index.js", "exit\n"]) + expected_output = ["root", "/usr/src/app", "-rw-r--r-- 1 root root 267 Oct 15 00:21 index.js"] + + idx = [0] + def mock_getch(): + ch = commands[idx[0]].encode("utf-8") + idx[0] = (idx[0] + 1) % len(commands) + return ch + + cmd = mock.MagicMock() + cmd.cli_ctx = get_dummy_cli() + from azext_containerapp._validators import validate_ssh + from azext_containerapp.custom import containerapp_ssh + + class Namespace: pass + namespace = Namespace() + setattr(namespace, "name", containerapp_name) + setattr(namespace, "resource_group_name", resource_group) + setattr(namespace, "revision", None) + setattr(namespace, "replica", None) + setattr(namespace, "container", None) + + validate_ssh(cmd=cmd, namespace=namespace) # needed to set values for container, replica, revision + + mock_lib = "tty.setcbreak" + if platform.system() == "Windows": + mock_lib = "azext_containerapp._ssh_utils.enable_vt_mode" + + with mock.patch("builtins.print", side_effect=mock_print), mock.patch(mock_lib): + with mock.patch("azext_containerapp._ssh_utils._getch_unix", side_effect=mock_getch), mock.patch("azext_containerapp._ssh_utils._getch_windows", side_effect=mock_getch): + containerapp_ssh(cmd=cmd, resource_group_name=namespace.resource_group_name, name=namespace.name, + container=namespace.container, revision=namespace.revision, replica=namespace.replica, startup_command="sh") + for line in expected_output: + self.assertIn(line, expected_output) + + + @live_only + @ResourceGroupPreparer(location="centraluseuap") + def test_containerapp_logstream(self, resource_group): + containerapp_name = self.create_random_name(prefix='capp', length=24) + env_name = self.create_random_name(prefix='env', length=24) + + self.cmd(f'containerapp env create -g {resource_group} -n {env_name}') + self.cmd(f'containerapp create -g {resource_group} -n {containerapp_name} --environment {env_name} --min-replicas 1 --ingress external --target-port 80') + + self.cmd(f'containerapp log tail -n {containerapp_name} -g {resource_group}') diff --git a/src/containerapp/setup.py b/src/containerapp/setup.py index 15717458301..d0f615849f3 100644 --- a/src/containerapp/setup.py +++ b/src/containerapp/setup.py @@ -16,7 +16,9 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '0.3.1' + +VERSION = '0.3.2' + # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers