From d41cebb159bc5056e841dbb17c0417031716875e Mon Sep 17 00:00:00 2001 From: SDKAuto Date: Thu, 29 Apr 2021 06:51:50 +0000 Subject: [PATCH] CodeGen from PR 14161 in Azure/azure-rest-api-specs Merge 8ae3f9bc38d1e80ca62ea55dc52d1156c2c790ab into dad4abaabaf043b347551523185925fac5b72543 --- sdk/media/azure-mgmt-media/MANIFEST.in | 1 + sdk/media/azure-mgmt-media/_meta.json | 8 + .../azure/mgmt/media/__init__.py | 3 + .../azure/mgmt/media/_azure_media_services.py | 19 + .../azure/mgmt/media/_configuration.py | 4 +- .../azure/mgmt/media/_metadata.json | 118 ++ .../azure/mgmt/media/_version.py | 3 +- .../mgmt/media/aio/_azure_media_services.py | 18 + .../azure/mgmt/media/aio/_configuration.py | 4 +- .../operations/_account_filters_operations.py | 10 +- .../operations/_asset_filters_operations.py | 10 +- .../aio/operations/_assets_operations.py | 16 +- .../_content_key_policies_operations.py | 12 +- .../media/aio/operations/_jobs_operations.py | 12 +- .../aio/operations/_live_events_operations.py | 46 +- .../operations/_live_outputs_operations.py | 16 +- .../aio/operations/_locations_operations.py | 4 +- .../operations/_mediaservices_operations.py | 95 +- .../mgmt/media/aio/operations/_operations.py | 4 +- ...private_endpoint_connections_operations.py | 16 +- .../_private_link_resources_operations.py | 8 +- .../_streaming_endpoints_operations.py | 40 +- .../_streaming_locators_operations.py | 12 +- .../_streaming_policies_operations.py | 8 +- .../aio/operations/_transforms_operations.py | 10 +- .../azure/mgmt/media/models/__init__.py | 77 ++ .../models/_azure_media_services_enums.py | 720 +++++++--- .../azure/mgmt/media/models/_models.py | 1122 ++++++++++++++-- .../azure/mgmt/media/models/_models_py3.py | 1194 +++++++++++++++-- .../operations/_account_filters_operations.py | 10 +- .../operations/_asset_filters_operations.py | 10 +- .../media/operations/_assets_operations.py | 16 +- .../_content_key_policies_operations.py | 12 +- .../mgmt/media/operations/_jobs_operations.py | 12 +- .../operations/_live_events_operations.py | 46 +- .../operations/_live_outputs_operations.py | 16 +- .../media/operations/_locations_operations.py | 4 +- .../operations/_mediaservices_operations.py | 96 +- .../mgmt/media/operations/_operations.py | 4 +- ...private_endpoint_connections_operations.py | 16 +- .../_private_link_resources_operations.py | 8 +- .../_streaming_endpoints_operations.py | 40 +- .../_streaming_locators_operations.py | 12 +- .../_streaming_policies_operations.py | 8 +- .../operations/_transforms_operations.py | 10 +- 45 files changed, 3191 insertions(+), 739 deletions(-) create mode 100644 sdk/media/azure-mgmt-media/_meta.json create mode 100644 sdk/media/azure-mgmt-media/azure/mgmt/media/_metadata.json diff --git a/sdk/media/azure-mgmt-media/MANIFEST.in b/sdk/media/azure-mgmt-media/MANIFEST.in index a3cb07df8765..3a9b6517412b 100644 --- a/sdk/media/azure-mgmt-media/MANIFEST.in +++ b/sdk/media/azure-mgmt-media/MANIFEST.in @@ -1,3 +1,4 @@ +include _meta.json recursive-include tests *.py *.yaml include *.md include azure/__init__.py diff --git a/sdk/media/azure-mgmt-media/_meta.json b/sdk/media/azure-mgmt-media/_meta.json new file mode 100644 index 000000000000..b8c4ea75fe42 --- /dev/null +++ b/sdk/media/azure-mgmt-media/_meta.json @@ -0,0 +1,8 @@ +{ + "autorest": "3.3.0", + "use": "@autorest/python@5.6.6", + "commit": "f4681bf7e3bf977e1187b8b904f25d0fe671e10d", + "repository_url": "https://github.com/Azure/azure-rest-api-specs", + "autorest_command": "autorest specification/mediaservices/resource-manager/readme.md --multiapi --python --python-mode=update --python-sdks-folder=/home/vsts/work/1/s/azure-sdk-for-python/sdk --track2 --use=@autorest/python@5.6.6 --version=3.3.0", + "readme": "specification/mediaservices/resource-manager/readme.md" +} \ No newline at end of file diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/__init__.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/__init__.py index 69767e61f86f..3f74a737cb45 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/__init__.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/__init__.py @@ -7,6 +7,9 @@ # -------------------------------------------------------------------------- from ._azure_media_services import AzureMediaServices +from ._version import VERSION + +__version__ = VERSION __all__ = ['AzureMediaServices'] try: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py index 03c1a67a4d45..9124613973de 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/_azure_media_services.py @@ -16,6 +16,7 @@ from typing import Any, Optional from azure.core.credentials import TokenCredential + from azure.core.pipeline.transport import HttpRequest, HttpResponse from ._configuration import AzureMediaServicesConfiguration from .operations import AccountFiltersOperations @@ -131,6 +132,24 @@ def __init__( self.streaming_endpoints = StreamingEndpointsOperations( self._client, self._config, self._serialize, self._deserialize) + def _send_request(self, http_request, **kwargs): + # type: (HttpRequest, Any) -> HttpResponse + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.HttpResponse + """ + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + def close(self): # type: () -> None self._client.close() diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py index c2d56eb3fc63..fe65c943e84a 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/_configuration.py @@ -12,13 +12,14 @@ from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy +from ._version import VERSION + if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any from azure.core.credentials import TokenCredential -VERSION = "unknown" class AzureMediaServicesConfiguration(Configuration): """Configuration for AzureMediaServices. @@ -47,7 +48,6 @@ def __init__( self.credential = credential self.subscription_id = subscription_id - self.api_version = "2020-05-01" self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-media/{}'.format(VERSION)) self._configure(**kwargs) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/_metadata.json b/sdk/media/azure-mgmt-media/azure/mgmt/media/_metadata.json new file mode 100644 index 000000000000..c5abfd2cb715 --- /dev/null +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/_metadata.json @@ -0,0 +1,118 @@ +{ + "chosen_version": "", + "total_api_version_list": ["2020-05-01", "2021-05-01"], + "client": { + "name": "AzureMediaServices", + "filename": "_azure_media_services", + "description": "This Swagger was generated by the API Framework.", + "base_url": "\u0027https://management.azure.com\u0027", + "custom_base_url": null, + "azure_arm": true, + "has_lro_operations": true, + "client_side_validation": false, + "sync_imports": "{\"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}, \"regular\": {\"azurecore\": {\"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"], \"msrest\": [\"Deserializer\", \"Serializer\"], \"azure.mgmt.core\": [\"ARMPipelineClient\"]}, \"local\": {\"._configuration\": [\"AzureMediaServicesConfiguration\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}, \"azurecore\": {\"azure.core.pipeline.transport\": [\"HttpRequest\", \"HttpResponse\"]}}}", + "async_imports": "{\"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}, \"regular\": {\"azurecore\": {\"azure.profiles\": [\"KnownProfiles\", \"ProfileDefinition\"], \"azure.profiles.multiapiclient\": [\"MultiApiClientMixin\"], \"msrest\": [\"Deserializer\", \"Serializer\"], \"azure.mgmt.core\": [\"AsyncARMPipelineClient\"]}, \"local\": {\"._configuration\": [\"AzureMediaServicesConfiguration\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\", \"Optional\"]}, \"azurecore\": {\"azure.core.pipeline.transport\": [\"AsyncHttpResponse\", \"HttpRequest\"]}}}" + }, + "global_parameters": { + "sync": { + "credential": { + "signature": "credential, # type: \"TokenCredential\"", + "description": "Credential needed for the client to connect to Azure.", + "docstring_type": "~azure.core.credentials.TokenCredential", + "required": true + }, + "subscription_id": { + "signature": "subscription_id, # type: str", + "description": "The unique identifier for a Microsoft Azure subscription.", + "docstring_type": "str", + "required": true + } + }, + "async": { + "credential": { + "signature": "credential: \"AsyncTokenCredential\",", + "description": "Credential needed for the client to connect to Azure.", + "docstring_type": "~azure.core.credentials_async.AsyncTokenCredential", + "required": true + }, + "subscription_id": { + "signature": "subscription_id: str,", + "description": "The unique identifier for a Microsoft Azure subscription.", + "docstring_type": "str", + "required": true + } + }, + "constant": { + }, + "call": "credential, subscription_id", + "service_client_specific": { + "sync": { + "api_version": { + "signature": "api_version=None, # type: Optional[str]", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false + }, + "base_url": { + "signature": "base_url=None, # type: Optional[str]", + "description": "Service URL", + "docstring_type": "str", + "required": false + }, + "profile": { + "signature": "profile=KnownProfiles.default, # type: KnownProfiles", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false + } + }, + "async": { + "api_version": { + "signature": "api_version: Optional[str] = None,", + "description": "API version to use if no profile is provided, or if missing in profile.", + "docstring_type": "str", + "required": false + }, + "base_url": { + "signature": "base_url: Optional[str] = None,", + "description": "Service URL", + "docstring_type": "str", + "required": false + }, + "profile": { + "signature": "profile: KnownProfiles = KnownProfiles.default,", + "description": "A profile definition, from KnownProfiles to dict.", + "docstring_type": "azure.profiles.KnownProfiles", + "required": false + } + } + } + }, + "config": { + "credential": true, + "credential_scopes": ["https://management.azure.com/.default"], + "credential_default_policy_type": "BearerTokenCredentialPolicy", + "credential_default_policy_type_has_async_version": true, + "credential_key_header_name": null, + "sync_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"], \"azure.mgmt.core.policies\": [\"ARMHttpLoggingPolicy\"]}, \"local\": {\"._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials\": [\"TokenCredential\"]}}}", + "async_imports": "{\"regular\": {\"azurecore\": {\"azure.core.configuration\": [\"Configuration\"], \"azure.core.pipeline\": [\"policies\"], \"azure.mgmt.core.policies\": [\"ARMHttpLoggingPolicy\"]}, \"local\": {\".._version\": [\"VERSION\"]}}, \"conditional\": {\"stdlib\": {\"typing\": [\"Any\"]}}, \"typing\": {\"azurecore\": {\"azure.core.credentials_async\": [\"AsyncTokenCredential\"]}}}" + }, + "operation_groups": { + "account_filters": "AccountFiltersOperations", + "operations": "Operations", + "mediaservices": "MediaservicesOperations", + "private_link_resources": "PrivateLinkResourcesOperations", + "private_endpoint_connections": "PrivateEndpointConnectionsOperations", + "locations": "LocationsOperations", + "assets": "AssetsOperations", + "asset_filters": "AssetFiltersOperations", + "content_key_policies": "ContentKeyPoliciesOperations", + "transforms": "TransformsOperations", + "jobs": "JobsOperations", + "streaming_policies": "StreamingPoliciesOperations", + "streaming_locators": "StreamingLocatorsOperations", + "live_events": "LiveEventsOperations", + "live_outputs": "LiveOutputsOperations", + "streaming_endpoints": "StreamingEndpointsOperations" + } +} \ No newline at end of file diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/_version.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/_version.py index 20e28141456a..e5754a47ce68 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/_version.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/_version.py @@ -6,5 +6,4 @@ # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "7.0.0b1" - +VERSION = "1.0.0b1" diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_azure_media_services.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_azure_media_services.py index 87f512de71fd..ec11cb6398c1 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_azure_media_services.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_azure_media_services.py @@ -8,6 +8,7 @@ from typing import Any, Optional, TYPE_CHECKING +from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest from azure.mgmt.core import AsyncARMPipelineClient from msrest import Deserializer, Serializer @@ -128,6 +129,23 @@ def __init__( self.streaming_endpoints = StreamingEndpointsOperations( self._client, self._config, self._serialize, self._deserialize) + async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse: + """Runs the network request through the client's chained policies. + + :param http_request: The network request you want to make. Required. + :type http_request: ~azure.core.pipeline.transport.HttpRequest + :keyword bool stream: Whether the response payload will be streamed. Defaults to True. + :return: The response of your network call. Does not do error handling on your response. + :rtype: ~azure.core.pipeline.transport.AsyncHttpResponse + """ + path_format_arguments = { + 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), + } + http_request.url = self._client.format_url(http_request.url, **path_format_arguments) + stream = kwargs.pop("stream", True) + pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs) + return pipeline_response.http_response + async def close(self) -> None: await self._client.close() diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_configuration.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_configuration.py index f2f21a322e49..3219fa71818e 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_configuration.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/_configuration.py @@ -12,11 +12,12 @@ from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy +from .._version import VERSION + if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential -VERSION = "unknown" class AzureMediaServicesConfiguration(Configuration): """Configuration for AzureMediaServices. @@ -44,7 +45,6 @@ def __init__( self.credential = credential self.subscription_id = subscription_id - self.api_version = "2020-05-01" self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default']) kwargs.setdefault('sdk_moniker', 'mgmt-media/{}'.format(VERSION)) self._configure(**kwargs) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_account_filters_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_account_filters_operations.py index 9d40ff13c12f..b4029572d360 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_account_filters_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_account_filters_operations.py @@ -107,7 +107,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -172,7 +172,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -247,7 +247,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -316,7 +316,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -386,7 +386,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AccountFilter', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_asset_filters_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_asset_filters_operations.py index 5d77a844b2f7..4def84178adb 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_asset_filters_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_asset_filters_operations.py @@ -111,7 +111,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -180,7 +180,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -259,7 +259,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -332,7 +332,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -406,7 +406,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AssetFilter', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_assets_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_assets_operations.py index f3d339f0c70a..b400b99451be 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_assets_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_assets_operations.py @@ -124,7 +124,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -189,7 +189,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -264,7 +264,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -333,7 +333,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -403,7 +403,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Asset', pipeline_response) @@ -477,7 +477,7 @@ async def list_container_sas( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AssetContainerSas', pipeline_response) @@ -543,7 +543,7 @@ async def get_encryption_key( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('StorageEncryptedAssetDecryptionData', pipeline_response) @@ -608,7 +608,7 @@ async def list_streaming_locators( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ListStreamingLocatorsResponse', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_content_key_policies_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_content_key_policies_operations.py index 8e293bce0ffb..1b41973bf6cb 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_content_key_policies_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_content_key_policies_operations.py @@ -124,7 +124,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -189,7 +189,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -264,7 +264,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -333,7 +333,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -403,7 +403,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ContentKeyPolicy', pipeline_response) @@ -468,7 +468,7 @@ async def get_policy_properties_with_secrets( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_jobs_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_jobs_operations.py index 9a0021051309..59a0bdf5d593 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_jobs_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_jobs_operations.py @@ -121,7 +121,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -190,7 +190,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -269,7 +269,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Job', pipeline_response) @@ -338,7 +338,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -414,7 +414,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Job', pipeline_response) @@ -483,7 +483,7 @@ async def cancel_job( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_events_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_events_operations.py index 00701a82aeb2..60d89ad920cb 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_events_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_events_operations.py @@ -109,7 +109,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -174,7 +174,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -235,7 +235,7 @@ async def _create_initial( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -276,8 +276,8 @@ async def begin_create( :type auto_start: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LiveEvent or the result of cls(response) @@ -378,7 +378,7 @@ async def _update_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -413,8 +413,8 @@ async def begin_update( :type parameters: ~azure.mgmt.media.models.LiveEvent :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LiveEvent or the result of cls(response) @@ -508,7 +508,7 @@ async def _delete_initial( if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -535,8 +535,8 @@ async def begin_delete( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -626,7 +626,7 @@ async def _allocate_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -653,8 +653,8 @@ async def begin_allocate( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -744,7 +744,7 @@ async def _start_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -772,8 +772,8 @@ async def begin_start( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -869,7 +869,7 @@ async def _stop_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -899,8 +899,8 @@ async def begin_stop( :type parameters: ~azure.mgmt.media.models.LiveEventActionInput :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -991,7 +991,7 @@ async def _reset_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -1020,8 +1020,8 @@ async def begin_reset( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_outputs_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_outputs_operations.py index ec299a871cd8..520c3e61ddda 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_outputs_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_live_outputs_operations.py @@ -113,7 +113,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -182,7 +182,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -242,7 +242,7 @@ async def _create_initial( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -282,8 +282,8 @@ async def begin_create( :type parameters: ~azure.mgmt.media.models.LiveOutput :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either LiveOutput or the result of cls(response) @@ -381,7 +381,7 @@ async def _delete_initial( if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -412,8 +412,8 @@ async def begin_delete( :type live_output_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_locations_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_locations_operations.py index f5c88e9c9b81..5eea4f0c7e50 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_locations_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_locations_operations.py @@ -64,7 +64,7 @@ async def check_name_availability( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -94,7 +94,7 @@ async def check_name_availability( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('EntityNameAvailabilityCheckOutput', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_mediaservices_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_mediaservices_operations.py index ef7d71adabbc..c95090812dea 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_mediaservices_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_mediaservices_operations.py @@ -62,7 +62,7 @@ def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): @@ -103,7 +103,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -138,7 +138,7 @@ async def get( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -164,7 +164,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MediaService', pipeline_response) @@ -202,7 +202,7 @@ async def create_or_update( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -233,7 +233,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -272,7 +272,7 @@ async def delete( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -298,7 +298,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -310,7 +310,7 @@ async def update( self, resource_group_name: str, account_name: str, - parameters: "_models.MediaService", + parameters: "_models.MediaServiceUpdate", **kwargs ) -> "_models.MediaService": """Update a Media Services account. @@ -322,7 +322,7 @@ async def update( :param account_name: The Media Services account name. :type account_name: str :param parameters: The request parameters. - :type parameters: ~azure.mgmt.media.models.MediaService + :type parameters: ~azure.mgmt.media.models.MediaServiceUpdate :keyword callable cls: A custom type or function that will be passed the direct response :return: MediaService, or the result of cls(response) :rtype: ~azure.mgmt.media.models.MediaService @@ -333,7 +333,7 @@ async def update( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -356,7 +356,7 @@ async def update( header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(parameters, 'MediaService') + body_content = self._serialize.body(parameters, 'MediaServiceUpdate') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) @@ -364,7 +364,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MediaService', pipeline_response) @@ -403,7 +403,7 @@ async def sync_storage_keys( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -434,7 +434,7 @@ async def sync_storage_keys( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -469,7 +469,7 @@ async def list_edge_policies( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -500,7 +500,7 @@ async def list_edge_policies( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('EdgePolicies', pipeline_response) @@ -529,7 +529,7 @@ def list_by_subscription( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): @@ -569,7 +569,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -579,60 +579,3 @@ async def get_next(next_link=None): get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Media/mediaservices'} # type: ignore - - async def get_by_subscription( - self, - account_name: str, - **kwargs - ) -> "_models.MediaService": - """Get a Media Services account. - - Get the details of a Media Services account. - - :param account_name: The Media Services account name. - :type account_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: MediaService, or the result of cls(response) - :rtype: ~azure.mgmt.media.models.MediaService - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.MediaService"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" - accept = "application/json" - - # Construct URL - url = self.get_by_subscription.metadata['url'] # type: ignore - path_format_arguments = { - 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) - raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) - - deserialized = self._deserialize('MediaService', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Media/mediaservices/{accountName}'} # type: ignore diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_operations.py index 8803efde27f5..06f414605452 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_operations.py @@ -59,7 +59,7 @@ def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): @@ -95,7 +95,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_endpoint_connections_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_endpoint_connections_operations.py index 57d57d8c4a59..52eeed308e7e 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_endpoint_connections_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_endpoint_connections_operations.py @@ -64,7 +64,7 @@ async def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -90,7 +90,7 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response) @@ -128,7 +128,7 @@ async def get( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -155,7 +155,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) @@ -196,7 +196,7 @@ async def create_or_update( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -228,7 +228,7 @@ async def create_or_update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) @@ -266,7 +266,7 @@ async def delete( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -293,7 +293,7 @@ async def delete( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_link_resources_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_link_resources_operations.py index beeb5b60aeb2..ea41ce6ecc53 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_link_resources_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_private_link_resources_operations.py @@ -64,7 +64,7 @@ async def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -90,7 +90,7 @@ async def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response) @@ -128,7 +128,7 @@ async def get( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -155,7 +155,7 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateLinkResource', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_endpoints_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_endpoints_operations.py index b22df4a06ad8..b4b6c64972b8 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_endpoints_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_endpoints_operations.py @@ -109,7 +109,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -174,7 +174,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -235,7 +235,7 @@ async def _create_initial( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -276,8 +276,8 @@ async def begin_create( :type auto_start: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either StreamingEndpoint or the result of cls(response) @@ -378,7 +378,7 @@ async def _update_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -415,8 +415,8 @@ async def begin_update( :type parameters: ~azure.mgmt.media.models.StreamingEndpoint :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either StreamingEndpoint or the result of cls(response) @@ -510,7 +510,7 @@ async def _delete_initial( if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -537,8 +537,8 @@ async def begin_delete( :type streaming_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -628,7 +628,7 @@ async def _start_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -655,8 +655,8 @@ async def begin_start( :type streaming_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -746,7 +746,7 @@ async def _stop_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -773,8 +773,8 @@ async def begin_stop( :type streaming_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) @@ -870,7 +870,7 @@ async def _scale_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -900,8 +900,8 @@ async def begin_scale( :type parameters: ~azure.mgmt.media.models.StreamingEntityScaleUnit :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the AsyncARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of AsyncLROPoller that returns either None or the result of cls(response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_locators_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_locators_operations.py index c03c181a7c1c..5d1e9e4a9d89 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_locators_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_locators_operations.py @@ -124,7 +124,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -189,7 +189,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -264,7 +264,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('StreamingLocator', pipeline_response) @@ -329,7 +329,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -391,7 +391,7 @@ async def list_content_keys( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ListContentKeysResponse', pipeline_response) @@ -456,7 +456,7 @@ async def list_paths( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ListPathsResponse', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_policies_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_policies_operations.py index 5472b3c7a31f..d536a9c765f8 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_policies_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_streaming_policies_operations.py @@ -124,7 +124,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -189,7 +189,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -264,7 +264,7 @@ async def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('StreamingPolicy', pipeline_response) @@ -329,7 +329,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_transforms_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_transforms_operations.py index d56ced16687c..e429116f5d23 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_transforms_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/aio/operations/_transforms_operations.py @@ -117,7 +117,7 @@ async def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -182,7 +182,7 @@ async def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -257,7 +257,7 @@ async def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -326,7 +326,7 @@ async def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -396,7 +396,7 @@ async def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Transform', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py index aec1da56df38..40499b50de42 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/__init__.py @@ -9,6 +9,7 @@ try: from ._models_py3 import AacAudio from ._models_py3 import AbsoluteClipTime + from ._models_py3 import AccessControl from ._models_py3 import AccountEncryption from ._models_py3 import AccountFilter from ._models_py3 import AccountFilterCollection @@ -25,6 +26,7 @@ from ._models_py3 import Audio from ._models_py3 import AudioAnalyzerPreset from ._models_py3 import AudioOverlay + from ._models_py3 import AudioTrackDescriptor from ._models_py3 import BuiltInStandardEncoderPreset from ._models_py3 import CbcsDrmConfiguration from ._models_py3 import CencDrmConfiguration @@ -76,13 +78,20 @@ from ._models_py3 import Filters from ._models_py3 import FirstQuality from ._models_py3 import Format + from ._models_py3 import FromAllInputFile + from ._models_py3 import FromEachInputFile from ._models_py3 import H264Layer from ._models_py3 import H264Video + from ._models_py3 import H265Layer + from ._models_py3 import H265Video + from ._models_py3 import H265VideoLayer from ._models_py3 import Hls from ._models_py3 import IPAccessControl from ._models_py3 import IPRange from ._models_py3 import Image from ._models_py3 import ImageFormat + from ._models_py3 import InputDefinition + from ._models_py3 import InputFile from ._models_py3 import Job from ._models_py3 import JobCollection from ._models_py3 import JobError @@ -91,12 +100,14 @@ from ._models_py3 import JobInputAsset from ._models_py3 import JobInputClip from ._models_py3 import JobInputHttp + from ._models_py3 import JobInputSequence from ._models_py3 import JobInputs from ._models_py3 import JobOutput from ._models_py3 import JobOutputAsset from ._models_py3 import JpgFormat from ._models_py3 import JpgImage from ._models_py3 import JpgLayer + from ._models_py3 import KeyDelivery from ._models_py3 import KeyVaultProperties from ._models_py3 import Layer from ._models_py3 import ListContainerSasInput @@ -123,6 +134,7 @@ from ._models_py3 import MediaService from ._models_py3 import MediaServiceCollection from ._models_py3 import MediaServiceIdentity + from ._models_py3 import MediaServiceUpdate from ._models_py3 import MetricDimension from ._models_py3 import MetricSpecification from ._models_py3 import Mp4Format @@ -150,6 +162,10 @@ from ._models_py3 import ProxyResource from ._models_py3 import Rectangle from ._models_py3 import Resource + from ._models_py3 import SelectAudioTrackByAttribute + from ._models_py3 import SelectAudioTrackById + from ._models_py3 import SelectVideoTrackByAttribute + from ._models_py3 import SelectVideoTrackById from ._models_py3 import ServiceSpecification from ._models_py3 import StandardEncoderPreset from ._models_py3 import StorageAccount @@ -170,6 +186,8 @@ from ._models_py3 import StreamingPolicyPlayReadyConfiguration from ._models_py3 import StreamingPolicyWidevineConfiguration from ._models_py3 import SyncStorageKeysInput + from ._models_py3 import SystemData + from ._models_py3 import TrackDescriptor from ._models_py3 import TrackPropertyCondition from ._models_py3 import TrackSelection from ._models_py3 import TrackedResource @@ -182,9 +200,11 @@ from ._models_py3 import VideoAnalyzerPreset from ._models_py3 import VideoLayer from ._models_py3 import VideoOverlay + from ._models_py3 import VideoTrackDescriptor except (SyntaxError, ImportError): from ._models import AacAudio # type: ignore from ._models import AbsoluteClipTime # type: ignore + from ._models import AccessControl # type: ignore from ._models import AccountEncryption # type: ignore from ._models import AccountFilter # type: ignore from ._models import AccountFilterCollection # type: ignore @@ -201,6 +221,7 @@ from ._models import Audio # type: ignore from ._models import AudioAnalyzerPreset # type: ignore from ._models import AudioOverlay # type: ignore + from ._models import AudioTrackDescriptor # type: ignore from ._models import BuiltInStandardEncoderPreset # type: ignore from ._models import CbcsDrmConfiguration # type: ignore from ._models import CencDrmConfiguration # type: ignore @@ -252,13 +273,20 @@ from ._models import Filters # type: ignore from ._models import FirstQuality # type: ignore from ._models import Format # type: ignore + from ._models import FromAllInputFile # type: ignore + from ._models import FromEachInputFile # type: ignore from ._models import H264Layer # type: ignore from ._models import H264Video # type: ignore + from ._models import H265Layer # type: ignore + from ._models import H265Video # type: ignore + from ._models import H265VideoLayer # type: ignore from ._models import Hls # type: ignore from ._models import IPAccessControl # type: ignore from ._models import IPRange # type: ignore from ._models import Image # type: ignore from ._models import ImageFormat # type: ignore + from ._models import InputDefinition # type: ignore + from ._models import InputFile # type: ignore from ._models import Job # type: ignore from ._models import JobCollection # type: ignore from ._models import JobError # type: ignore @@ -267,12 +295,14 @@ from ._models import JobInputAsset # type: ignore from ._models import JobInputClip # type: ignore from ._models import JobInputHttp # type: ignore + from ._models import JobInputSequence # type: ignore from ._models import JobInputs # type: ignore from ._models import JobOutput # type: ignore from ._models import JobOutputAsset # type: ignore from ._models import JpgFormat # type: ignore from ._models import JpgImage # type: ignore from ._models import JpgLayer # type: ignore + from ._models import KeyDelivery # type: ignore from ._models import KeyVaultProperties # type: ignore from ._models import Layer # type: ignore from ._models import ListContainerSasInput # type: ignore @@ -299,6 +329,7 @@ from ._models import MediaService # type: ignore from ._models import MediaServiceCollection # type: ignore from ._models import MediaServiceIdentity # type: ignore + from ._models import MediaServiceUpdate # type: ignore from ._models import MetricDimension # type: ignore from ._models import MetricSpecification # type: ignore from ._models import Mp4Format # type: ignore @@ -326,6 +357,10 @@ from ._models import ProxyResource # type: ignore from ._models import Rectangle # type: ignore from ._models import Resource # type: ignore + from ._models import SelectAudioTrackByAttribute # type: ignore + from ._models import SelectAudioTrackById # type: ignore + from ._models import SelectVideoTrackByAttribute # type: ignore + from ._models import SelectVideoTrackById # type: ignore from ._models import ServiceSpecification # type: ignore from ._models import StandardEncoderPreset # type: ignore from ._models import StorageAccount # type: ignore @@ -346,6 +381,8 @@ from ._models import StreamingPolicyPlayReadyConfiguration # type: ignore from ._models import StreamingPolicyWidevineConfiguration # type: ignore from ._models import SyncStorageKeysInput # type: ignore + from ._models import SystemData # type: ignore + from ._models import TrackDescriptor # type: ignore from ._models import TrackPropertyCondition # type: ignore from ._models import TrackSelection # type: ignore from ._models import TrackedResource # type: ignore @@ -358,28 +395,38 @@ from ._models import VideoAnalyzerPreset # type: ignore from ._models import VideoLayer # type: ignore from ._models import VideoOverlay # type: ignore + from ._models import VideoTrackDescriptor # type: ignore from ._azure_media_services_enums import ( AacAudioProfile, AccountEncryptionKeyType, + ActionType, AnalysisResolution, AssetContainerPermission, AssetStorageEncryptionFormat, + AttributeFilter, AudioAnalysisMode, + BlurType, + ChannelMapping, ContentKeyPolicyFairPlayRentalAndLeaseKeyType, ContentKeyPolicyPlayReadyContentType, ContentKeyPolicyPlayReadyLicenseType, ContentKeyPolicyPlayReadyUnknownOutputPassingOption, ContentKeyPolicyRestrictionTokenType, + CreatedByType, + DefaultAction, DeinterlaceMode, DeinterlaceParity, EncoderNamedPreset, EncryptionScheme, EntropyMode, + FaceRedactorMode, FilterTrackPropertyCompareOperation, FilterTrackPropertyType, H264Complexity, H264VideoProfile, + H265Complexity, + H265VideoProfile, InsightsType, JobErrorCategory, JobErrorCode, @@ -404,6 +451,7 @@ StreamingLocatorContentKeyType, StreamingPolicyStreamingProtocol, StretchMode, + TrackAttribute, TrackPropertyCompareOperation, TrackPropertyType, VideoSyncMode, @@ -412,6 +460,7 @@ __all__ = [ 'AacAudio', 'AbsoluteClipTime', + 'AccessControl', 'AccountEncryption', 'AccountFilter', 'AccountFilterCollection', @@ -428,6 +477,7 @@ 'Audio', 'AudioAnalyzerPreset', 'AudioOverlay', + 'AudioTrackDescriptor', 'BuiltInStandardEncoderPreset', 'CbcsDrmConfiguration', 'CencDrmConfiguration', @@ -479,13 +529,20 @@ 'Filters', 'FirstQuality', 'Format', + 'FromAllInputFile', + 'FromEachInputFile', 'H264Layer', 'H264Video', + 'H265Layer', + 'H265Video', + 'H265VideoLayer', 'Hls', 'IPAccessControl', 'IPRange', 'Image', 'ImageFormat', + 'InputDefinition', + 'InputFile', 'Job', 'JobCollection', 'JobError', @@ -494,12 +551,14 @@ 'JobInputAsset', 'JobInputClip', 'JobInputHttp', + 'JobInputSequence', 'JobInputs', 'JobOutput', 'JobOutputAsset', 'JpgFormat', 'JpgImage', 'JpgLayer', + 'KeyDelivery', 'KeyVaultProperties', 'Layer', 'ListContainerSasInput', @@ -526,6 +585,7 @@ 'MediaService', 'MediaServiceCollection', 'MediaServiceIdentity', + 'MediaServiceUpdate', 'MetricDimension', 'MetricSpecification', 'Mp4Format', @@ -553,6 +613,10 @@ 'ProxyResource', 'Rectangle', 'Resource', + 'SelectAudioTrackByAttribute', + 'SelectAudioTrackById', + 'SelectVideoTrackByAttribute', + 'SelectVideoTrackById', 'ServiceSpecification', 'StandardEncoderPreset', 'StorageAccount', @@ -573,6 +637,8 @@ 'StreamingPolicyPlayReadyConfiguration', 'StreamingPolicyWidevineConfiguration', 'SyncStorageKeysInput', + 'SystemData', + 'TrackDescriptor', 'TrackPropertyCondition', 'TrackSelection', 'TrackedResource', @@ -585,26 +651,36 @@ 'VideoAnalyzerPreset', 'VideoLayer', 'VideoOverlay', + 'VideoTrackDescriptor', 'AacAudioProfile', 'AccountEncryptionKeyType', + 'ActionType', 'AnalysisResolution', 'AssetContainerPermission', 'AssetStorageEncryptionFormat', + 'AttributeFilter', 'AudioAnalysisMode', + 'BlurType', + 'ChannelMapping', 'ContentKeyPolicyFairPlayRentalAndLeaseKeyType', 'ContentKeyPolicyPlayReadyContentType', 'ContentKeyPolicyPlayReadyLicenseType', 'ContentKeyPolicyPlayReadyUnknownOutputPassingOption', 'ContentKeyPolicyRestrictionTokenType', + 'CreatedByType', + 'DefaultAction', 'DeinterlaceMode', 'DeinterlaceParity', 'EncoderNamedPreset', 'EncryptionScheme', 'EntropyMode', + 'FaceRedactorMode', 'FilterTrackPropertyCompareOperation', 'FilterTrackPropertyType', 'H264Complexity', 'H264VideoProfile', + 'H265Complexity', + 'H265VideoProfile', 'InsightsType', 'JobErrorCategory', 'JobErrorCode', @@ -629,6 +705,7 @@ 'StreamingLocatorContentKeyType', 'StreamingPolicyStreamingProtocol', 'StretchMode', + 'TrackAttribute', 'TrackPropertyCompareOperation', 'TrackPropertyType', 'VideoSyncMode', diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py index c62b5331de3e..36f003647135 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_azure_media_services_enums.py @@ -30,16 +30,28 @@ class AacAudioProfile(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The encoding profile to be used when encoding audio with AAC. """ - AAC_LC = "AacLc" #: Specifies that the output audio is to be encoded into AAC Low Complexity profile (AAC-LC). - HE_AAC_V1 = "HeAacV1" #: Specifies that the output audio is to be encoded into HE-AAC v1 profile. - HE_AAC_V2 = "HeAacV2" #: Specifies that the output audio is to be encoded into HE-AAC v2 profile. + #: Specifies that the output audio is to be encoded into AAC Low Complexity profile (AAC-LC). + AAC_LC = "AacLc" + #: Specifies that the output audio is to be encoded into HE-AAC v1 profile. + HE_AAC_V1 = "HeAacV1" + #: Specifies that the output audio is to be encoded into HE-AAC v2 profile. + HE_AAC_V2 = "HeAacV2" class AccountEncryptionKeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of key used to encrypt the Account Key. """ - SYSTEM_KEY = "SystemKey" #: The Account Key is encrypted with a System Key. - CUSTOMER_KEY = "CustomerKey" #: The Account Key is encrypted with a Customer Key. + #: The Account Key is encrypted with a System Key. + SYSTEM_KEY = "SystemKey" + #: The Account Key is encrypted with a Customer Key. + CUSTOMER_KEY = "CustomerKey" + +class ActionType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Indicates the action type. + """ + + #: An internal action. + INTERNAL = "Internal" class AnalysisResolution(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Specifies the maximum resolution at which your video is analyzed. The default behavior is @@ -48,9 +60,9 @@ class AnalysisResolution(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): appropriate aspect ratio. It will only resize if the video is of higher resolution. For example, a 1920x1080 input would be scaled to 640x360 before processing. Switching to "StandardDefinition" will reduce the time it takes to process high resolution video. It may - also reduce the cost of using this component (see https://azure.microsoft.com/en- - us/pricing/details/media-services/#analytics for details). However, faces that end up being too - small in the resized video may not be detected. + also reduce the cost of using this component (see + https://azure.microsoft.com/en-us/pricing/details/media-services/#analytics for details). + However, faces that end up being too small in the resized video may not be detected. """ SOURCE_RESOLUTION = "SourceResolution" @@ -60,153 +72,384 @@ class AssetContainerPermission(with_metaclass(_CaseInsensitiveEnumMeta, str, Enu """The permissions to set on the SAS URL. """ - READ = "Read" #: The SAS URL will allow read access to the container. - READ_WRITE = "ReadWrite" #: The SAS URL will allow read and write access to the container. - READ_WRITE_DELETE = "ReadWriteDelete" #: The SAS URL will allow read, write and delete access to the container. + #: The SAS URL will allow read access to the container. + READ = "Read" + #: The SAS URL will allow read and write access to the container. + READ_WRITE = "ReadWrite" + #: The SAS URL will allow read, write and delete access to the container. + READ_WRITE_DELETE = "ReadWriteDelete" class AssetStorageEncryptionFormat(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The Asset encryption format. One of None or MediaStorageEncryption. """ - NONE = "None" #: The Asset does not use client-side storage encryption (this is the only allowed value for new Assets). - MEDIA_STORAGE_CLIENT_ENCRYPTION = "MediaStorageClientEncryption" #: The Asset is encrypted with Media Services client-side encryption. + #: The Asset does not use client-side storage encryption (this is the only allowed value for new + #: Assets). + NONE = "None" + #: The Asset is encrypted with Media Services client-side encryption. + MEDIA_STORAGE_CLIENT_ENCRYPTION = "MediaStorageClientEncryption" + +class AttributeFilter(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of AttributeFilter to apply to the TrackAttribute in order to select the tracks. + """ + + #: All tracks will be included. + ALL = "All" + #: The first track will be included when the attribute is sorted in descending order. Generally + #: used to select the largest bitrate. + TOP = "Top" + #: The first track will be included when the attribute is sorted in ascending order. Generally + #: used to select the smallest bitrate. + BOTTOM = "Bottom" + #: Any tracks that have an attribute equal to the value given will be included. + VALUE_EQUALS = "ValueEquals" class AudioAnalysisMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Determines the set of audio analysis operations to be performed. If unspecified, the Standard AudioAnalysisMode would be chosen. """ - STANDARD = "Standard" #: Performs all operations included in the Basic mode, additionally performing language detection and speaker diarization. - BASIC = "Basic" #: This mode performs speech-to-text transcription and generation of a VTT subtitle/caption file. The output of this mode includes an Insights JSON file including only the keywords, transcription,and timing information. Automatic language detection and speaker diarization are not included in this mode. + #: Performs all operations included in the Basic mode, additionally performing language detection + #: and speaker diarization. + STANDARD = "Standard" + #: This mode performs speech-to-text transcription and generation of a VTT subtitle/caption file. + #: The output of this mode includes an Insights JSON file including only the keywords, + #: transcription,and timing information. Automatic language detection and speaker diarization are + #: not included in this mode. + BASIC = "Basic" + +class BlurType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Blur type + """ + + #: Box: debug filter, bounding box only. + BOX = "Box" + #: Low: box-car blur filter. + LOW = "Low" + #: Med: Gaussian blur filter. + MED = "Med" + #: High: Confuse blur filter. + HIGH = "High" + #: Black: Black out filter. + BLACK = "Black" + +class ChannelMapping(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Optional designation for single channel audio tracks. Can be used to combine the tracks into + stereo or multi-channel audio tracks. + """ + + #: The Front Left Channel. + FRONT_LEFT = "FrontLeft" + #: The Front Right Channel. + FRONT_RIGHT = "FrontRight" + #: The Center Channel. + CENTER = "Center" + #: Low Frequency Effects Channel. Sometimes referred to as the Subwoofer. + LOW_FREQUENCY_EFFECTS = "LowFrequencyEffects" + #: The Back Left Channel. Sometimes referred to as the Left Surround Channel. + BACK_LEFT = "BackLeft" + #: The Back Right Channel. Sometimes referred to as the Right Surround Channel. + BACK_RIGHT = "BackRight" + #: The Left Stereo channel. Sometimes referred to as Down Mix Left. + STEREO_LEFT = "StereoLeft" + #: The Right Stereo channel. Sometimes referred to as Down Mix Right. + STEREO_RIGHT = "StereoRight" class ContentKeyPolicyFairPlayRentalAndLeaseKeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The rental and lease key type. """ - UNKNOWN = "Unknown" #: Represents a ContentKeyPolicyFairPlayRentalAndLeaseKeyType that is unavailable in current API version. - UNDEFINED = "Undefined" #: Key duration is not specified. - DUAL_EXPIRY = "DualExpiry" #: Dual expiry for offline rental. - PERSISTENT_UNLIMITED = "PersistentUnlimited" #: Content key can be persisted with an unlimited duration. - PERSISTENT_LIMITED = "PersistentLimited" #: Content key can be persisted and the valid duration is limited by the Rental Duration value. + #: Represents a ContentKeyPolicyFairPlayRentalAndLeaseKeyType that is unavailable in current API + #: version. + UNKNOWN = "Unknown" + #: Key duration is not specified. + UNDEFINED = "Undefined" + #: Dual expiry for offline rental. + DUAL_EXPIRY = "DualExpiry" + #: Content key can be persisted with an unlimited duration. + PERSISTENT_UNLIMITED = "PersistentUnlimited" + #: Content key can be persisted and the valid duration is limited by the Rental Duration value. + PERSISTENT_LIMITED = "PersistentLimited" class ContentKeyPolicyPlayReadyContentType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The PlayReady content type. """ - UNKNOWN = "Unknown" #: Represents a ContentKeyPolicyPlayReadyContentType that is unavailable in current API version. - UNSPECIFIED = "Unspecified" #: Unspecified content type. - ULTRA_VIOLET_DOWNLOAD = "UltraVioletDownload" #: Ultraviolet download content type. - ULTRA_VIOLET_STREAMING = "UltraVioletStreaming" #: Ultraviolet streaming content type. + #: Represents a ContentKeyPolicyPlayReadyContentType that is unavailable in current API version. + UNKNOWN = "Unknown" + #: Unspecified content type. + UNSPECIFIED = "Unspecified" + #: Ultraviolet download content type. + ULTRA_VIOLET_DOWNLOAD = "UltraVioletDownload" + #: Ultraviolet streaming content type. + ULTRA_VIOLET_STREAMING = "UltraVioletStreaming" class ContentKeyPolicyPlayReadyLicenseType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The license type. """ - UNKNOWN = "Unknown" #: Represents a ContentKeyPolicyPlayReadyLicenseType that is unavailable in current API version. - NON_PERSISTENT = "NonPersistent" #: Non persistent license. - PERSISTENT = "Persistent" #: Persistent license. Allows offline playback. + #: Represents a ContentKeyPolicyPlayReadyLicenseType that is unavailable in current API version. + UNKNOWN = "Unknown" + #: Non persistent license. + NON_PERSISTENT = "NonPersistent" + #: Persistent license. Allows offline playback. + PERSISTENT = "Persistent" class ContentKeyPolicyPlayReadyUnknownOutputPassingOption(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Configures Unknown output handling settings of the license. """ - UNKNOWN = "Unknown" #: Represents a ContentKeyPolicyPlayReadyUnknownOutputPassingOption that is unavailable in current API version. - NOT_ALLOWED = "NotAllowed" #: Passing the video portion of protected content to an Unknown Output is not allowed. - ALLOWED = "Allowed" #: Passing the video portion of protected content to an Unknown Output is allowed. - ALLOWED_WITH_VIDEO_CONSTRICTION = "AllowedWithVideoConstriction" #: Passing the video portion of protected content to an Unknown Output is allowed but with constrained resolution. + #: Represents a ContentKeyPolicyPlayReadyUnknownOutputPassingOption that is unavailable in current + #: API version. + UNKNOWN = "Unknown" + #: Passing the video portion of protected content to an Unknown Output is not allowed. + NOT_ALLOWED = "NotAllowed" + #: Passing the video portion of protected content to an Unknown Output is allowed. + ALLOWED = "Allowed" + #: Passing the video portion of protected content to an Unknown Output is allowed but with + #: constrained resolution. + ALLOWED_WITH_VIDEO_CONSTRICTION = "AllowedWithVideoConstriction" class ContentKeyPolicyRestrictionTokenType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of token. """ - UNKNOWN = "Unknown" #: Represents a ContentKeyPolicyRestrictionTokenType that is unavailable in current API version. - SWT = "Swt" #: Simple Web Token. - JWT = "Jwt" #: JSON Web Token. + #: Represents a ContentKeyPolicyRestrictionTokenType that is unavailable in current API version. + UNKNOWN = "Unknown" + #: Simple Web Token. + SWT = "Swt" + #: JSON Web Token. + JWT = "Jwt" + +class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The type of identity that created the resource. + """ + + USER = "User" + APPLICATION = "Application" + MANAGED_IDENTITY = "ManagedIdentity" + KEY = "Key" + +class DefaultAction(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The behavior for IP access control in Key Delivery. + """ + + #: All public IP addresses are allowed. + ALLOW = "Allow" + #: Public IP addresses are blocked. + DENY = "Deny" class DeinterlaceMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The deinterlacing mode. Defaults to AutoPixelAdaptive. """ - OFF = "Off" #: Disables de-interlacing of the source video. - AUTO_PIXEL_ADAPTIVE = "AutoPixelAdaptive" #: Apply automatic pixel adaptive de-interlacing on each frame in the input video. + #: Disables de-interlacing of the source video. + OFF = "Off" + #: Apply automatic pixel adaptive de-interlacing on each frame in the input video. + AUTO_PIXEL_ADAPTIVE = "AutoPixelAdaptive" class DeinterlaceParity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The field parity for de-interlacing, defaults to Auto. """ - AUTO = "Auto" #: Automatically detect the order of fields. - TOP_FIELD_FIRST = "TopFieldFirst" #: Apply top field first processing of input video. - BOTTOM_FIELD_FIRST = "BottomFieldFirst" #: Apply bottom field first processing of input video. + #: Automatically detect the order of fields. + AUTO = "Auto" + #: Apply top field first processing of input video. + TOP_FIELD_FIRST = "TopFieldFirst" + #: Apply bottom field first processing of input video. + BOTTOM_FIELD_FIRST = "BottomFieldFirst" class EncoderNamedPreset(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The built-in preset to be used for encoding videos. """ - H264_SINGLE_BITRATE_SD = "H264SingleBitrateSD" #: Produces an MP4 file where the video is encoded with H.264 codec at 2200 kbps and a picture height of 480 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - H264_SINGLE_BITRATE720_P = "H264SingleBitrate720p" #: Produces an MP4 file where the video is encoded with H.264 codec at 4500 kbps and a picture height of 720 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - H264_SINGLE_BITRATE1080_P = "H264SingleBitrate1080p" #: Produces an MP4 file where the video is encoded with H.264 codec at 6750 kbps and a picture height of 1080 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. - ADAPTIVE_STREAMING = "AdaptiveStreaming" #: Produces a set of GOP aligned MP4 files with H.264 video and stereo AAC audio. Auto-generates a bitrate ladder based on the input resolution, bitrate and frame rate. The auto-generated preset will never exceed the input resolution. For example, if the input is 720p, output will remain 720p at best. - AAC_GOOD_QUALITY_AUDIO = "AACGoodQualityAudio" #: Produces a single MP4 file containing only stereo audio encoded at 192 kbps. - CONTENT_AWARE_ENCODING_EXPERIMENTAL = "ContentAwareEncodingExperimental" #: Exposes an experimental preset for content-aware encoding. Given any input content, the service attempts to automatically determine the optimal number of layers, appropriate bitrate and resolution settings for delivery by adaptive streaming. The underlying algorithms will continue to evolve over time. The output will contain MP4 files with video and audio interleaved. - CONTENT_AWARE_ENCODING = "ContentAwareEncoding" #: Produces a set of GOP-aligned MP4s by using content-aware encoding. Given any input content, the service performs an initial lightweight analysis of the input content, and uses the results to determine the optimal number of layers, appropriate bitrate and resolution settings for delivery by adaptive streaming. This preset is particularly effective for low and medium complexity videos, where the output files will be at lower bitrates but at a quality that still delivers a good experience to viewers. The output will contain MP4 files with video and audio interleaved. - COPY_ALL_BITRATE_NON_INTERLEAVED = "CopyAllBitrateNonInterleaved" #: Copy all video and audio streams from the input asset as non-interleaved video and audio output files. This preset can be used to clip an existing asset or convert a group of key frame (GOP) aligned MP4 files as an asset that can be streamed. - H264_MULTIPLE_BITRATE1080_P = "H264MultipleBitrate1080p" #: Produces a set of 8 GOP-aligned MP4 files, ranging from 6000 kbps to 400 kbps, and stereo AAC audio. Resolution starts at 1080p and goes down to 180p. - H264_MULTIPLE_BITRATE720_P = "H264MultipleBitrate720p" #: Produces a set of 6 GOP-aligned MP4 files, ranging from 3400 kbps to 400 kbps, and stereo AAC audio. Resolution starts at 720p and goes down to 180p. - H264_MULTIPLE_BITRATE_SD = "H264MultipleBitrateSD" #: Produces a set of 5 GOP-aligned MP4 files, ranging from 1900kbps to 400 kbps, and stereo AAC audio. Resolution starts at 480p and goes down to 240p. + #: Produces an MP4 file where the video is encoded with H.264 codec at 2200 kbps and a picture + #: height of 480 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. + H264_SINGLE_BITRATE_SD = "H264SingleBitrateSD" + #: Produces an MP4 file where the video is encoded with H.264 codec at 4500 kbps and a picture + #: height of 720 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. + H264_SINGLE_BITRATE720_P = "H264SingleBitrate720p" + #: Produces an MP4 file where the video is encoded with H.264 codec at 6750 kbps and a picture + #: height of 1080 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. + H264_SINGLE_BITRATE1080_P = "H264SingleBitrate1080p" + #: Produces a set of GOP aligned MP4 files with H.264 video and stereo AAC audio. Auto-generates a + #: bitrate ladder based on the input resolution, bitrate and frame rate. The auto-generated preset + #: will never exceed the input resolution. For example, if the input is 720p, output will remain + #: 720p at best. + ADAPTIVE_STREAMING = "AdaptiveStreaming" + #: Produces a single MP4 file containing only stereo audio encoded at 192 kbps. + AAC_GOOD_QUALITY_AUDIO = "AACGoodQualityAudio" + #: Exposes an experimental preset for content-aware encoding. Given any input content, the service + #: attempts to automatically determine the optimal number of layers, appropriate bitrate and + #: resolution settings for delivery by adaptive streaming. The underlying algorithms will continue + #: to evolve over time. The output will contain MP4 files with video and audio interleaved. + CONTENT_AWARE_ENCODING_EXPERIMENTAL = "ContentAwareEncodingExperimental" + #: Produces a set of GOP-aligned MP4s by using content-aware encoding. Given any input content, + #: the service performs an initial lightweight analysis of the input content, and uses the results + #: to determine the optimal number of layers, appropriate bitrate and resolution settings for + #: delivery by adaptive streaming. This preset is particularly effective for low and medium + #: complexity videos, where the output files will be at lower bitrates but at a quality that still + #: delivers a good experience to viewers. The output will contain MP4 files with video and audio + #: interleaved. + CONTENT_AWARE_ENCODING = "ContentAwareEncoding" + #: Copy all video and audio streams from the input asset as non-interleaved video and audio output + #: files. This preset can be used to clip an existing asset or convert a group of key frame (GOP) + #: aligned MP4 files as an asset that can be streamed. + COPY_ALL_BITRATE_NON_INTERLEAVED = "CopyAllBitrateNonInterleaved" + #: Produces a set of 8 GOP-aligned MP4 files, ranging from 6000 kbps to 400 kbps, and stereo AAC + #: audio. Resolution starts at 1080p and goes down to 180p. + H264_MULTIPLE_BITRATE1080_P = "H264MultipleBitrate1080p" + #: Produces a set of 6 GOP-aligned MP4 files, ranging from 3400 kbps to 400 kbps, and stereo AAC + #: audio. Resolution starts at 720p and goes down to 180p. + H264_MULTIPLE_BITRATE720_P = "H264MultipleBitrate720p" + #: Produces a set of 5 GOP-aligned MP4 files, ranging from 1900kbps to 400 kbps, and stereo AAC + #: audio. Resolution starts at 480p and goes down to 240p. + H264_MULTIPLE_BITRATE_SD = "H264MultipleBitrateSD" + #: Produces a set of GOP-aligned MP4s by using content-aware encoding. Given any input content, + #: the service performs an initial lightweight analysis of the input content, and uses the results + #: to determine the optimal number of layers, appropriate bitrate and resolution settings for + #: delivery by adaptive streaming. This preset is particularly effective for low and medium + #: complexity videos, where the output files will be at lower bitrates but at a quality that still + #: delivers a good experience to viewers. The output will contain MP4 files with video and audio + #: interleaved. + H265_CONTENT_AWARE_ENCODING = "H265ContentAwareEncoding" + #: Produces a set of GOP aligned MP4 files with H.265 video and stereo AAC audio. Auto-generates a + #: bitrate ladder based on the input resolution, bitrate and frame rate. The auto-generated preset + #: will never exceed the input resolution. For example, if the input is 720p, output will remain + #: 720p at best. + H265_ADAPTIVE_STREAMING = "H265AdaptiveStreaming" + #: Produces an MP4 file where the video is encoded with H.265 codec at 1800 kbps and a picture + #: height of 720 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. + H265_SINGLE_BITRATE720_P = "H265SingleBitrate720p" + #: Produces an MP4 file where the video is encoded with H.265 codec at 3500 kbps and a picture + #: height of 1080 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. + H265_SINGLE_BITRATE1080_P = "H265SingleBitrate1080p" + #: Produces an MP4 file where the video is encoded with H.265 codec at 9500 kbps and a picture + #: height of 2160 pixels, and the stereo audio is encoded with AAC-LC codec at 128 kbps. + H265_SINGLE_BITRATE4_K = "H265SingleBitrate4K" class EncryptionScheme(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Encryption scheme """ - NO_ENCRYPTION = "NoEncryption" #: NoEncryption scheme. - ENVELOPE_ENCRYPTION = "EnvelopeEncryption" #: EnvelopeEncryption scheme. - COMMON_ENCRYPTION_CENC = "CommonEncryptionCenc" #: CommonEncryptionCenc scheme. - COMMON_ENCRYPTION_CBCS = "CommonEncryptionCbcs" #: CommonEncryptionCbcs scheme. + #: NoEncryption scheme. + NO_ENCRYPTION = "NoEncryption" + #: EnvelopeEncryption scheme. + ENVELOPE_ENCRYPTION = "EnvelopeEncryption" + #: CommonEncryptionCenc scheme. + COMMON_ENCRYPTION_CENC = "CommonEncryptionCenc" + #: CommonEncryptionCbcs scheme. + COMMON_ENCRYPTION_CBCS = "CommonEncryptionCbcs" class EntropyMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The entropy mode to be used for this layer. If not specified, the encoder chooses the mode that is appropriate for the profile and level. """ - CABAC = "Cabac" #: Context Adaptive Binary Arithmetic Coder (CABAC) entropy encoding. - CAVLC = "Cavlc" #: Context Adaptive Variable Length Coder (CAVLC) entropy encoding. + #: Context Adaptive Binary Arithmetic Coder (CABAC) entropy encoding. + CABAC = "Cabac" + #: Context Adaptive Variable Length Coder (CAVLC) entropy encoding. + CAVLC = "Cavlc" + +class FaceRedactorMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """This mode provides the ability to choose between the following settings: 1) Analyze - For + detection only.This mode generates a metadata JSON file marking appearances of faces throughout + the video.Where possible, appearances of the same person are assigned the same ID. 2) Combined + - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass process, + allowing for selective redaction of a subset of detected faces.It takes in the metadata file + from a prior analyze pass, along with the source video, and a user-selected subset of IDs that + require redaction. + """ + + #: Analyze mode detects faces and outputs a metadata file with the results. Allows editing of the + #: metadata file before faces are blurred with Redact mode. + ANALYZE = "Analyze" + #: Redact mode consumes the metadata file from Analyze mode and redacts the faces found. + REDACT = "Redact" + #: Combined mode does the Analyze and Redact steps in one pass when editing the analyzed faces is + #: not desired. + COMBINED = "Combined" class FilterTrackPropertyCompareOperation(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The track property condition operation. """ - EQUAL = "Equal" #: The equal operation. - NOT_EQUAL = "NotEqual" #: The not equal operation. + #: The equal operation. + EQUAL = "Equal" + #: The not equal operation. + NOT_EQUAL = "NotEqual" class FilterTrackPropertyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The track property type. """ - UNKNOWN = "Unknown" #: The unknown track property type. - TYPE = "Type" #: The type. - NAME = "Name" #: The name. - LANGUAGE = "Language" #: The language. - FOUR_CC = "FourCC" #: The fourCC. - BITRATE = "Bitrate" #: The bitrate. + #: The unknown track property type. + UNKNOWN = "Unknown" + #: The type. + TYPE = "Type" + #: The name. + NAME = "Name" + #: The language. + LANGUAGE = "Language" + #: The fourCC. + FOUR_CC = "FourCC" + #: The bitrate. + BITRATE = "Bitrate" class H264Complexity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Tells the encoder how to choose its encoding settings. The default value is Balanced. """ - SPEED = "Speed" #: Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed to decrease encoding time. - BALANCED = "Balanced" #: Tells the encoder to use settings that achieve a balance between speed and quality. - QUALITY = "Quality" #: Tells the encoder to use settings that are optimized to produce higher quality output at the expense of slower overall encode time. + #: Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed + #: to decrease encoding time. + SPEED = "Speed" + #: Tells the encoder to use settings that achieve a balance between speed and quality. + BALANCED = "Balanced" + #: Tells the encoder to use settings that are optimized to produce higher quality output at the + #: expense of slower overall encode time. + QUALITY = "Quality" class H264VideoProfile(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """We currently support Baseline, Main, High, High422, High444. Default is Auto. """ - AUTO = "Auto" #: Tells the encoder to automatically determine the appropriate H.264 profile. - BASELINE = "Baseline" #: Baseline profile. - MAIN = "Main" #: Main profile. - HIGH = "High" #: High profile. - HIGH422 = "High422" #: High 4:2:2 profile. - HIGH444 = "High444" #: High 4:4:4 predictive profile. + #: Tells the encoder to automatically determine the appropriate H.264 profile. + AUTO = "Auto" + #: Baseline profile. + BASELINE = "Baseline" + #: Main profile. + MAIN = "Main" + #: High profile. + HIGH = "High" + #: High 4:2:2 profile. + HIGH422 = "High422" + #: High 4:4:4 predictive profile. + HIGH444 = "High444" + +class H265Complexity(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """Tells the encoder how to choose its encoding settings. Quality will provide for a higher + compression ratio but at a higher cost and longer compute time. Speed will produce a + relatively larger file but is faster and more economical. The default value is Balanced. + """ + + #: Tells the encoder to use settings that are optimized for faster encoding. Quality is sacrificed + #: to decrease encoding time. + SPEED = "Speed" + #: Tells the encoder to use settings that achieve a balance between speed and quality. + BALANCED = "Balanced" + #: Tells the encoder to use settings that are optimized to produce higher quality output at the + #: expense of slower overall encode time. + QUALITY = "Quality" + +class H265VideoProfile(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """We currently support Main. Default is Auto. + """ + + #: Tells the encoder to automatically determine the appropriate H.265 profile. + AUTO = "Auto" + #: Main profile + #: (https://x265.readthedocs.io/en/default/cli.html?highlight=profile#profile-level-tier). + MAIN = "Main" class InsightsType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Defines the type of insights that you want the service to generate. The allowed values are @@ -218,53 +461,90 @@ class InsightsType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): conditions would error out. """ - AUDIO_INSIGHTS_ONLY = "AudioInsightsOnly" #: Generate audio only insights. Ignore video even if present. Fails if no audio is present. - VIDEO_INSIGHTS_ONLY = "VideoInsightsOnly" #: Generate video only insights. Ignore audio if present. Fails if no video is present. - ALL_INSIGHTS = "AllInsights" #: Generate both audio and video insights. Fails if either audio or video Insights fail. + #: Generate audio only insights. Ignore video even if present. Fails if no audio is present. + AUDIO_INSIGHTS_ONLY = "AudioInsightsOnly" + #: Generate video only insights. Ignore audio if present. Fails if no video is present. + VIDEO_INSIGHTS_ONLY = "VideoInsightsOnly" + #: Generate both audio and video insights. Fails if either audio or video Insights fail. + ALL_INSIGHTS = "AllInsights" class JobErrorCategory(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Helps with categorization of errors. """ - SERVICE = "Service" #: The error is service related. - DOWNLOAD = "Download" #: The error is download related. - UPLOAD = "Upload" #: The error is upload related. - CONFIGURATION = "Configuration" #: The error is configuration related. - CONTENT = "Content" #: The error is related to data in the input files. + #: The error is service related. + SERVICE = "Service" + #: The error is download related. + DOWNLOAD = "Download" + #: The error is upload related. + UPLOAD = "Upload" + #: The error is configuration related. + CONFIGURATION = "Configuration" + #: The error is related to data in the input files. + CONTENT = "Content" class JobErrorCode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Error code describing the error. """ - SERVICE_ERROR = "ServiceError" #: Fatal service error, please contact support. - SERVICE_TRANSIENT_ERROR = "ServiceTransientError" #: Transient error, please retry, if retry is unsuccessful, please contact support. - DOWNLOAD_NOT_ACCESSIBLE = "DownloadNotAccessible" #: While trying to download the input files, the files were not accessible, please check the availability of the source. - DOWNLOAD_TRANSIENT_ERROR = "DownloadTransientError" #: While trying to download the input files, there was an issue during transfer (storage service, network errors), see details and check your source. - UPLOAD_NOT_ACCESSIBLE = "UploadNotAccessible" #: While trying to upload the output files, the destination was not reachable, please check the availability of the destination. - UPLOAD_TRANSIENT_ERROR = "UploadTransientError" #: While trying to upload the output files, there was an issue during transfer (storage service, network errors), see details and check your destination. - CONFIGURATION_UNSUPPORTED = "ConfigurationUnsupported" #: There was a problem with the combination of input files and the configuration settings applied, fix the configuration settings and retry with the same input, or change input to match the configuration. - CONTENT_MALFORMED = "ContentMalformed" #: There was a problem with the input content (for example: zero byte files, or corrupt/non-decodable files), check the input files. - CONTENT_UNSUPPORTED = "ContentUnsupported" #: There was a problem with the format of the input (not valid media file, or an unsupported file/codec), check the validity of the input files. + #: Fatal service error, please contact support. + SERVICE_ERROR = "ServiceError" + #: Transient error, please retry, if retry is unsuccessful, please contact support. + SERVICE_TRANSIENT_ERROR = "ServiceTransientError" + #: While trying to download the input files, the files were not accessible, please check the + #: availability of the source. + DOWNLOAD_NOT_ACCESSIBLE = "DownloadNotAccessible" + #: While trying to download the input files, there was an issue during transfer (storage service, + #: network errors), see details and check your source. + DOWNLOAD_TRANSIENT_ERROR = "DownloadTransientError" + #: While trying to upload the output files, the destination was not reachable, please check the + #: availability of the destination. + UPLOAD_NOT_ACCESSIBLE = "UploadNotAccessible" + #: While trying to upload the output files, there was an issue during transfer (storage service, + #: network errors), see details and check your destination. + UPLOAD_TRANSIENT_ERROR = "UploadTransientError" + #: There was a problem with the combination of input files and the configuration settings applied, + #: fix the configuration settings and retry with the same input, or change input to match the + #: configuration. + CONFIGURATION_UNSUPPORTED = "ConfigurationUnsupported" + #: There was a problem with the input content (for example: zero byte files, or + #: corrupt/non-decodable files), check the input files. + CONTENT_MALFORMED = "ContentMalformed" + #: There was a problem with the format of the input (not valid media file, or an unsupported + #: file/codec), check the validity of the input files. + CONTENT_UNSUPPORTED = "ContentUnsupported" class JobRetry(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Indicates that it may be possible to retry the Job. If retry is unsuccessful, please contact Azure support via Azure Portal. """ - DO_NOT_RETRY = "DoNotRetry" #: Issue needs to be investigated and then the job resubmitted with corrections or retried once the underlying issue has been corrected. - MAY_RETRY = "MayRetry" #: Issue may be resolved after waiting for a period of time and resubmitting the same Job. + #: Issue needs to be investigated and then the job resubmitted with corrections or retried once + #: the underlying issue has been corrected. + DO_NOT_RETRY = "DoNotRetry" + #: Issue may be resolved after waiting for a period of time and resubmitting the same Job. + MAY_RETRY = "MayRetry" class JobState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Describes the state of the JobOutput. """ - CANCELED = "Canceled" #: The job was canceled. This is a final state for the job. - CANCELING = "Canceling" #: The job is in the process of being canceled. This is a transient state for the job. - ERROR = "Error" #: The job has encountered an error. This is a final state for the job. - FINISHED = "Finished" #: The job is finished. This is a final state for the job. - PROCESSING = "Processing" #: The job is processing. This is a transient state for the job. - QUEUED = "Queued" #: The job is in a queued state, waiting for resources to become available. This is a transient state. - SCHEDULED = "Scheduled" #: The job is being scheduled to run on an available resource. This is a transient state, between queued and processing states. + #: The job was canceled. This is a final state for the job. + CANCELED = "Canceled" + #: The job is in the process of being canceled. This is a transient state for the job. + CANCELING = "Canceling" + #: The job has encountered an error. This is a final state for the job. + ERROR = "Error" + #: The job is finished. This is a final state for the job. + FINISHED = "Finished" + #: The job is processing. This is a transient state for the job. + PROCESSING = "Processing" + #: The job is in a queued state, waiting for resources to become available. This is a transient + #: state. + QUEUED = "Queued" + #: The job is being scheduled to run on an available resource. This is a transient state, between + #: queued and processing states. + SCHEDULED = "Scheduled" class LiveEventEncodingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Live event type. When encodingType is set to None, the service simply passes through the @@ -274,61 +554,101 @@ class LiveEventEncodingType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)) be modified after the live event is created. """ - NONE = "None" #: A contribution live encoder sends a multiple bitrate stream. The ingested stream passes through the live event without any further processing. It is also called the pass-through mode. - STANDARD = "Standard" #: A contribution live encoder sends a single bitrate stream to the live event and Media Services creates multiple bitrate streams. The output cannot exceed 720p in resolution. - PREMIUM1080_P = "Premium1080p" #: A contribution live encoder sends a single bitrate stream to the live event and Media Services creates multiple bitrate streams. The output cannot exceed 1080p in resolution. + #: A contribution live encoder sends a multiple bitrate stream. The ingested stream passes through + #: the live event without any further processing. It is also called the pass-through mode. + NONE = "None" + #: A contribution live encoder sends a single bitrate stream to the live event and Media Services + #: creates multiple bitrate streams. The output cannot exceed 720p in resolution. + STANDARD = "Standard" + #: A contribution live encoder sends a single bitrate stream to the live event and Media Services + #: creates multiple bitrate streams. The output cannot exceed 1080p in resolution. + PREMIUM1080_P = "Premium1080p" class LiveEventInputProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The input protocol for the live event. This is specified at creation time and cannot be updated. """ - FRAGMENTED_MP4 = "FragmentedMP4" #: Smooth Streaming input will be sent by the contribution encoder to the live event. - RTMP = "RTMP" #: RTMP input will be sent by the contribution encoder to the live event. + #: Smooth Streaming input will be sent by the contribution encoder to the live event. + FRAGMENTED_MP4 = "FragmentedMP4" + #: RTMP input will be sent by the contribution encoder to the live event. + RTMP = "RTMP" class LiveEventResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The resource state of the live event. See https://go.microsoft.com/fwlink/?linkid=2139012 for more information. """ - STOPPED = "Stopped" #: This is the initial state of the live event after creation (unless autostart was set to true.) No billing occurs in this state. In this state, the live event properties can be updated but streaming is not allowed. - ALLOCATING = "Allocating" #: Allocate action was called on the live event and resources are being provisioned for this live event. Once allocation completes successfully, the live event will transition to StandBy state. - STAND_BY = "StandBy" #: Live event resources have been provisioned and is ready to start. Billing occurs in this state. Most properties can still be updated, however ingest or streaming is not allowed during this state. - STARTING = "Starting" #: The live event is being started and resources are being allocated. No billing occurs in this state. Updates or streaming are not allowed during this state. If an error occurs, the live event returns to the Stopped state. - RUNNING = "Running" #: The live event resources have been allocated, ingest and preview URLs have been generated, and it is capable of receiving live streams. At this point, billing is active. You must explicitly call Stop on the live event resource to halt further billing. - STOPPING = "Stopping" #: The live event is being stopped and resources are being de-provisioned. No billing occurs in this transient state. Updates or streaming are not allowed during this state. - DELETING = "Deleting" #: The live event is being deleted. No billing occurs in this transient state. Updates or streaming are not allowed during this state. + #: This is the initial state of the live event after creation (unless autostart was set to true.) + #: No billing occurs in this state. In this state, the live event properties can be updated but + #: streaming is not allowed. + STOPPED = "Stopped" + #: Allocate action was called on the live event and resources are being provisioned for this live + #: event. Once allocation completes successfully, the live event will transition to StandBy state. + ALLOCATING = "Allocating" + #: Live event resources have been provisioned and is ready to start. Billing occurs in this state. + #: Most properties can still be updated, however ingest or streaming is not allowed during this + #: state. + STAND_BY = "StandBy" + #: The live event is being started and resources are being allocated. No billing occurs in this + #: state. Updates or streaming are not allowed during this state. If an error occurs, the live + #: event returns to the Stopped state. + STARTING = "Starting" + #: The live event resources have been allocated, ingest and preview URLs have been generated, and + #: it is capable of receiving live streams. At this point, billing is active. You must explicitly + #: call Stop on the live event resource to halt further billing. + RUNNING = "Running" + #: The live event is being stopped and resources are being de-provisioned. No billing occurs in + #: this transient state. Updates or streaming are not allowed during this state. + STOPPING = "Stopping" + #: The live event is being deleted. No billing occurs in this transient state. Updates or + #: streaming are not allowed during this state. + DELETING = "Deleting" class LiveOutputResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The resource state of the live output. """ - CREATING = "Creating" #: Live output is being created. No content is archived in the asset until the live output is in running state. - RUNNING = "Running" #: Live output is running and archiving live streaming content to the asset if there is valid input from a contribution encoder. - DELETING = "Deleting" #: Live output is being deleted. The live asset is being converted from live to on-demand asset. Any streaming URLs created on the live output asset continue to work. + #: Live output is being created. No content is archived in the asset until the live output is in + #: running state. + CREATING = "Creating" + #: Live output is running and archiving live streaming content to the asset if there is valid + #: input from a contribution encoder. + RUNNING = "Running" + #: Live output is being deleted. The live asset is being converted from live to on-demand asset. + #: Any streaming URLs created on the live output asset continue to work. + DELETING = "Deleting" class ManagedIdentityType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The identity type. """ - SYSTEM_ASSIGNED = "SystemAssigned" #: A system-assigned managed identity. - NONE = "None" #: No managed identity. + #: A system-assigned managed identity. + SYSTEM_ASSIGNED = "SystemAssigned" + #: No managed identity. + NONE = "None" class MetricAggregationType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The metric aggregation type """ - AVERAGE = "Average" #: The average. - COUNT = "Count" #: The count of a number of items, usually requests. - TOTAL = "Total" #: The sum. + #: The average. + AVERAGE = "Average" + #: The count of a number of items, usually requests. + COUNT = "Count" + #: The sum. + TOTAL = "Total" class MetricUnit(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The metric unit """ - BYTES = "Bytes" #: The number of bytes. - COUNT = "Count" #: The count. - MILLISECONDS = "Milliseconds" #: The number of milliseconds. + #: The number of bytes. + BYTES = "Bytes" + #: The count. + COUNT = "Count" + #: The number of milliseconds. + MILLISECONDS = "Milliseconds" class OnErrorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """A Transform can define more than one outputs. This property defines what the service should do @@ -337,17 +657,25 @@ class OnErrorType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): 'ContinueJob'. The default is 'StopProcessingJob'. """ - STOP_PROCESSING_JOB = "StopProcessingJob" #: Tells the service that if this TransformOutput fails, then any other incomplete TransformOutputs can be stopped. - CONTINUE_JOB = "ContinueJob" #: Tells the service that if this TransformOutput fails, then allow any other TransformOutput to continue. + #: Tells the service that if this TransformOutput fails, then any other incomplete + #: TransformOutputs can be stopped. + STOP_PROCESSING_JOB = "StopProcessingJob" + #: Tells the service that if this TransformOutput fails, then allow any other TransformOutput to + #: continue. + CONTINUE_JOB = "ContinueJob" class Priority(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Sets the relative priority of the TransformOutputs within a Transform. This sets the priority that the service uses for processing TransformOutputs. The default priority is Normal. """ - LOW = "Low" #: Used for TransformOutputs that can be generated after Normal and High priority TransformOutputs. - NORMAL = "Normal" #: Used for TransformOutputs that can be generated at Normal priority. - HIGH = "High" #: Used for TransformOutputs that should take precedence over others. + #: Used for TransformOutputs that can be generated after Normal and High priority + #: TransformOutputs. + LOW = "Low" + #: Used for TransformOutputs that can be generated at Normal priority. + NORMAL = "Normal" + #: Used for TransformOutputs that should take precedence over others. + HIGH = "High" class PrivateEndpointConnectionProvisioningState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The current provisioning state. @@ -370,86 +698,152 @@ class Rotation(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The rotation, if any, to be applied to the input video, before it is encoded. Default is Auto """ - AUTO = "Auto" #: Automatically detect and rotate as needed. - NONE = "None" #: Do not rotate the video. If the output format supports it, any metadata about rotation is kept intact. - ROTATE0 = "Rotate0" #: Do not rotate the video but remove any metadata about the rotation. - ROTATE90 = "Rotate90" #: Rotate 90 degrees clockwise. - ROTATE180 = "Rotate180" #: Rotate 180 degrees clockwise. - ROTATE270 = "Rotate270" #: Rotate 270 degrees clockwise. + #: Automatically detect and rotate as needed. + AUTO = "Auto" + #: Do not rotate the video. If the output format supports it, any metadata about rotation is kept + #: intact. + NONE = "None" + #: Do not rotate the video but remove any metadata about the rotation. + ROTATE0 = "Rotate0" + #: Rotate 90 degrees clockwise. + ROTATE90 = "Rotate90" + #: Rotate 180 degrees clockwise. + ROTATE180 = "Rotate180" + #: Rotate 270 degrees clockwise. + ROTATE270 = "Rotate270" class StorageAccountType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The type of the storage account. """ - PRIMARY = "Primary" #: The primary storage account for the Media Services account. - SECONDARY = "Secondary" #: A secondary storage account for the Media Services account. + #: The primary storage account for the Media Services account. + PRIMARY = "Primary" + #: A secondary storage account for the Media Services account. + SECONDARY = "Secondary" class StorageAuthentication(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - SYSTEM = "System" #: System authentication. - MANAGED_IDENTITY = "ManagedIdentity" #: Managed Identity authentication. + #: System authentication. + SYSTEM = "System" + #: Managed Identity authentication. + MANAGED_IDENTITY = "ManagedIdentity" class StreamingEndpointResourceState(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The resource state of the streaming endpoint. """ - STOPPED = "Stopped" #: The initial state of a streaming endpoint after creation. Content is not ready to be streamed from this endpoint. - STARTING = "Starting" #: The streaming endpoint is transitioning to the running state. - RUNNING = "Running" #: The streaming endpoint is running. It is able to stream content to clients. - STOPPING = "Stopping" #: The streaming endpoint is transitioning to the stopped state. - DELETING = "Deleting" #: The streaming endpoint is being deleted. - SCALING = "Scaling" #: The streaming endpoint is increasing or decreasing scale units. + #: The initial state of a streaming endpoint after creation. Content is not ready to be streamed + #: from this endpoint. + STOPPED = "Stopped" + #: The streaming endpoint is transitioning to the running state. + STARTING = "Starting" + #: The streaming endpoint is running. It is able to stream content to clients. + RUNNING = "Running" + #: The streaming endpoint is transitioning to the stopped state. + STOPPING = "Stopping" + #: The streaming endpoint is being deleted. + DELETING = "Deleting" + #: The streaming endpoint is increasing or decreasing scale units. + SCALING = "Scaling" class StreamingLocatorContentKeyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Encryption type of Content Key """ - COMMON_ENCRYPTION_CENC = "CommonEncryptionCenc" #: Common Encryption using CENC. - COMMON_ENCRYPTION_CBCS = "CommonEncryptionCbcs" #: Common Encryption using CBCS. - ENVELOPE_ENCRYPTION = "EnvelopeEncryption" #: Envelope Encryption. + #: Common Encryption using CENC. + COMMON_ENCRYPTION_CENC = "CommonEncryptionCenc" + #: Common Encryption using CBCS. + COMMON_ENCRYPTION_CBCS = "CommonEncryptionCbcs" + #: Envelope Encryption. + ENVELOPE_ENCRYPTION = "EnvelopeEncryption" class StreamingPolicyStreamingProtocol(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Streaming protocol """ - HLS = "Hls" #: HLS protocol. - DASH = "Dash" #: DASH protocol. - SMOOTH_STREAMING = "SmoothStreaming" #: SmoothStreaming protocol. - DOWNLOAD = "Download" #: Download protocol. + #: HLS protocol. + HLS = "Hls" + #: DASH protocol. + DASH = "Dash" + #: SmoothStreaming protocol. + SMOOTH_STREAMING = "SmoothStreaming" + #: Download protocol. + DOWNLOAD = "Download" class StreamOptionsFlag(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): - DEFAULT = "Default" #: Live streaming with no special latency optimizations. - LOW_LATENCY = "LowLatency" #: The live event provides lower end to end latency by reducing its internal buffers. This could result in more client buffering during playback if network bandwidth is low. + #: Live streaming with no special latency optimizations. + DEFAULT = "Default" + #: The live event provides lower end to end latency by reducing its internal buffers. This could + #: result in more client buffering during playback if network bandwidth is low. + LOW_LATENCY = "LowLatency" class StretchMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The resizing mode - how the input video will be resized to fit the desired output resolution(s). Default is AutoSize """ - NONE = "None" #: Strictly respect the output resolution without considering the pixel aspect ratio or display aspect ratio of the input video. - AUTO_SIZE = "AutoSize" #: Override the output resolution, and change it to match the display aspect ratio of the input, without padding. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the value in the preset is overridden, and the output will be at 1280x720, which maintains the input aspect ratio of 16:9. - AUTO_FIT = "AutoFit" #: Pad the output (with either letterbox or pillar box) to honor the output resolution, while ensuring that the active video region in the output has the same aspect ratio as the input. For example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the output will be at 1280x1280, which contains an inner rectangle of 1280x720 at aspect ratio of 16:9, and pillar box regions 280 pixels wide at the left and right. + #: Strictly respect the output resolution without considering the pixel aspect ratio or display + #: aspect ratio of the input video. + NONE = "None" + #: Override the output resolution, and change it to match the display aspect ratio of the input, + #: without padding. For example, if the input is 1920x1080 and the encoding preset asks for + #: 1280x1280, then the value in the preset is overridden, and the output will be at 1280x720, + #: which maintains the input aspect ratio of 16:9. + AUTO_SIZE = "AutoSize" + #: Pad the output (with either letterbox or pillar box) to honor the output resolution, while + #: ensuring that the active video region in the output has the same aspect ratio as the input. For + #: example, if the input is 1920x1080 and the encoding preset asks for 1280x1280, then the output + #: will be at 1280x1280, which contains an inner rectangle of 1280x720 at aspect ratio of 16:9, + #: and pillar box regions 280 pixels wide at the left and right. + AUTO_FIT = "AutoFit" + +class TrackAttribute(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): + """The TrackAttribute to filter the tracks by. + """ + + #: The bitrate of the track. + BITRATE = "Bitrate" + #: The language of the track. + LANGUAGE = "Language" class TrackPropertyCompareOperation(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Track property condition operation """ - UNKNOWN = "Unknown" #: Unknown track property compare operation. - EQUAL = "Equal" #: Equal operation. + #: Unknown track property compare operation. + UNKNOWN = "Unknown" + #: Equal operation. + EQUAL = "Equal" class TrackPropertyType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """Track property type """ - UNKNOWN = "Unknown" #: Unknown track property. - FOUR_CC = "FourCC" #: Track FourCC. + #: Unknown track property. + UNKNOWN = "Unknown" + #: Track FourCC. + FOUR_CC = "FourCC" class VideoSyncMode(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)): """The Video Sync Mode """ - AUTO = "Auto" #: This is the default method. Chooses between Cfr and Vfr depending on muxer capabilities. For output format MP4, the default mode is Cfr. - PASSTHROUGH = "Passthrough" #: The presentation timestamps on frames are passed through from the input file to the output file writer. Recommended when the input source has variable frame rate, and are attempting to produce multiple layers for adaptive streaming in the output which have aligned GOP boundaries. Note: if two or more frames in the input have duplicate timestamps, then the output will also have the same behavior. - CFR = "Cfr" #: Input frames will be repeated and/or dropped as needed to achieve exactly the requested constant frame rate. Recommended when the output frame rate is explicitly set at a specified value. - VFR = "Vfr" #: Similar to the Passthrough mode, but if the input has frames that have duplicate timestamps, then only one frame is passed through to the output, and others are dropped. Recommended when the number of output frames is expected to be equal to the number of input frames. For example, the output is used to calculate a quality metric like PSNR against the input. + #: This is the default method. Chooses between Cfr and Vfr depending on muxer capabilities. For + #: output format MP4, the default mode is Cfr. + AUTO = "Auto" + #: The presentation timestamps on frames are passed through from the input file to the output file + #: writer. Recommended when the input source has variable frame rate, and are attempting to + #: produce multiple layers for adaptive streaming in the output which have aligned GOP boundaries. + #: Note: if two or more frames in the input have duplicate timestamps, then the output will also + #: have the same behavior. + PASSTHROUGH = "Passthrough" + #: Input frames will be repeated and/or dropped as needed to achieve exactly the requested + #: constant frame rate. Recommended when the output frame rate is explicitly set at a specified + #: value. + CFR = "Cfr" + #: Similar to the Passthrough mode, but if the input has frames that have duplicate timestamps, + #: then only one frame is passed through to the output, and others are dropped. Recommended when + #: the number of output frames is expected to be equal to the number of input frames. For example, + #: the output is used to calculate a quality metric like PSNR against the input. + VFR = "Vfr" diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py index 2c05b45f6b7a..b59667cdae7a 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models.py @@ -201,6 +201,31 @@ def __init__( self.time = kwargs['time'] +class AccessControl(msrest.serialization.Model): + """AccessControl. + + :param default_action: The behavior for IP access control in Key Delivery. Possible values + include: "Allow", "Deny". + :type default_action: str or ~azure.mgmt.media.models.DefaultAction + :param ip_allow_list: The IP allow list for access control in Key Delivery. If the default + action is set to 'Allow', the IP allow list must be empty. + :type ip_allow_list: list[str] + """ + + _attribute_map = { + 'default_action': {'key': 'defaultAction', 'type': 'str'}, + 'ip_allow_list': {'key': 'ipAllowList', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(AccessControl, self).__init__(**kwargs) + self.default_action = kwargs.get('default_action', None) + self.ip_allow_list = kwargs.get('ip_allow_list', None) + + class AccountEncryption(msrest.serialization.Model): """AccountEncryption. @@ -268,7 +293,41 @@ def __init__( self.type = None -class AccountFilter(Resource): +class ProxyResource(Resource): + """The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ProxyResource, self).__init__(**kwargs) + + +class AccountFilter(ProxyResource): """An Account Filter. Variables are only populated by the server, and will be ignored when sending a request. @@ -281,6 +340,8 @@ class AccountFilter(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param presentation_time_range: The presentation time range. :type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange :param first_quality: The first quality. @@ -293,12 +354,14 @@ class AccountFilter(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'}, 'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'}, 'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'}, @@ -309,6 +372,7 @@ def __init__( **kwargs ): super(AccountFilter, self).__init__(**kwargs) + self.system_data = None self.presentation_time_range = kwargs.get('presentation_time_range', None) self.first_quality = kwargs.get('first_quality', None) self.tracks = kwargs.get('tracks', None) @@ -404,7 +468,7 @@ def __init__( self.error = kwargs.get('error', None) -class Asset(Resource): +class Asset(ProxyResource): """An Asset. Variables are only populated by the server, and will be ignored when sending a request. @@ -417,6 +481,8 @@ class Asset(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar asset_id: The Asset ID. :vartype asset_id: str :ivar created: The creation date of the Asset. @@ -441,6 +507,7 @@ class Asset(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'asset_id': {'readonly': True}, 'created': {'readonly': True}, 'last_modified': {'readonly': True}, @@ -451,6 +518,7 @@ class Asset(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'asset_id': {'key': 'properties.assetId', 'type': 'str'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, @@ -466,6 +534,7 @@ def __init__( **kwargs ): super(Asset, self).__init__(**kwargs) + self.system_data = None self.asset_id = None self.created = None self.last_modified = None @@ -552,7 +621,7 @@ def __init__( self.asset_file_id = kwargs['asset_file_id'] -class AssetFilter(Resource): +class AssetFilter(ProxyResource): """An Asset Filter. Variables are only populated by the server, and will be ignored when sending a request. @@ -565,6 +634,8 @@ class AssetFilter(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param presentation_time_range: The presentation time range. :type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange :param first_quality: The first quality. @@ -577,12 +648,14 @@ class AssetFilter(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'}, 'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'}, 'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'}, @@ -593,6 +666,7 @@ def __init__( **kwargs ): super(AssetFilter, self).__init__(**kwargs) + self.system_data = None self.presentation_time_range = kwargs.get('presentation_time_range', None) self.first_quality = kwargs.get('first_quality', None) self.tracks = kwargs.get('tracks', None) @@ -733,8 +807,8 @@ class AudioAnalyzerPreset(Preset): language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable - speech. If automatic detection fails to find the language, transcription would fallback to 'en- - US'." The list of supported languages is available here: + speech. If automatic detection fails to find the language, transcription would fallback to + 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. :type audio_language: str :param mode: Determines the set of audio analysis operations to be performed. If unspecified, @@ -782,8 +856,8 @@ class Overlay(msrest.serialization.Model): :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str :param input_label: Required. The label of the job input which is to be used as an overlay. The - Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an - audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See + Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP + format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. :type input_label: str :param start: The start position, with reference to the input video, at which the overlay @@ -852,8 +926,8 @@ class AudioOverlay(Overlay): :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str :param input_label: Required. The label of the job input which is to be used as an overlay. The - Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an - audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See + Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP + format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. :type input_label: str :param start: The start position, with reference to the input video, at which the overlay @@ -904,6 +978,77 @@ def __init__( self.odata_type = '#Microsoft.Media.AudioOverlay' # type: str +class TrackDescriptor(msrest.serialization.Model): + """Base type for all TrackDescriptor types, which define the metadata and selection for tracks that should be processed by a Job. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AudioTrackDescriptor, VideoTrackDescriptor. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.AudioTrackDescriptor': 'AudioTrackDescriptor', '#Microsoft.Media.VideoTrackDescriptor': 'VideoTrackDescriptor'} + } + + def __init__( + self, + **kwargs + ): + super(TrackDescriptor, self).__init__(**kwargs) + self.odata_type = None # type: Optional[str] + + +class AudioTrackDescriptor(TrackDescriptor): + """A TrackSelection to select audio tracks. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SelectAudioTrackByAttribute, SelectAudioTrackById. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param channel_mapping: Optional designation for single channel audio tracks. Can be used to + combine the tracks into stereo or multi-channel audio tracks. Possible values include: + "FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight", + "StereoLeft", "StereoRight". + :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.SelectAudioTrackByAttribute': 'SelectAudioTrackByAttribute', '#Microsoft.Media.SelectAudioTrackById': 'SelectAudioTrackById'} + } + + def __init__( + self, + **kwargs + ): + super(AudioTrackDescriptor, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.AudioTrackDescriptor' # type: str + self.channel_mapping = kwargs.get('channel_mapping', None) + + class BuiltInStandardEncoderPreset(Preset): """Describes a built-in preset for encoding the input video with the Standard Encoder. @@ -915,7 +1060,9 @@ class BuiltInStandardEncoderPreset(Preset): values include: "H264SingleBitrateSD", "H264SingleBitrate720p", "H264SingleBitrate1080p", "AdaptiveStreaming", "AACGoodQualityAudio", "ContentAwareEncodingExperimental", "ContentAwareEncoding", "CopyAllBitrateNonInterleaved", "H264MultipleBitrate1080p", - "H264MultipleBitrate720p", "H264MultipleBitrateSD". + "H264MultipleBitrate720p", "H264MultipleBitrateSD", "H265ContentAwareEncoding", + "H265AdaptiveStreaming", "H265SingleBitrate720p", "H265SingleBitrate1080p", + "H265SingleBitrate4K". :type preset_name: str or ~azure.mgmt.media.models.EncoderNamedPreset """ @@ -1075,7 +1222,7 @@ def __init__( self.drm = kwargs.get('drm', None) -class ContentKeyPolicy(Resource): +class ContentKeyPolicy(ProxyResource): """A Content Key Policy resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -1088,6 +1235,8 @@ class ContentKeyPolicy(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar policy_id: The legacy Policy ID. :vartype policy_id: str :ivar created: The creation date of the Policy. @@ -1104,6 +1253,7 @@ class ContentKeyPolicy(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'policy_id': {'readonly': True}, 'created': {'readonly': True}, 'last_modified': {'readonly': True}, @@ -1113,6 +1263,7 @@ class ContentKeyPolicy(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'policy_id': {'key': 'properties.policyId', 'type': 'str'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, @@ -1125,6 +1276,7 @@ def __init__( **kwargs ): super(ContentKeyPolicy, self).__init__(**kwargs) + self.system_data = None self.policy_id = None self.created = None self.last_modified = None @@ -2371,7 +2523,7 @@ def __init__( class FaceDetectorPreset(Preset): - """Describes all the settings to be used when analyzing a video in order to detect all the faces present. + """Describes all the settings to be used when analyzing a video in order to detect (and optionally redact) all the faces present. All required parameters must be populated in order to send to Azure. @@ -2388,6 +2540,16 @@ class FaceDetectorPreset(Preset): However, faces that end up being too small in the resized video may not be detected. Possible values include: "SourceResolution", "StandardDefinition". :type resolution: str or ~azure.mgmt.media.models.AnalysisResolution + :param mode: This mode provides the ability to choose between the following settings: 1) + Analyze - For detection only.This mode generates a metadata JSON file marking appearances of + faces throughout the video.Where possible, appearances of the same person are assigned the same + ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass + process, allowing for selective redaction of a subset of detected faces.It takes in the + metadata file from a prior analyze pass, along with the source video, and a user-selected + subset of IDs that require redaction. Possible values include: "Analyze", "Redact", "Combined". + :type mode: str or ~azure.mgmt.media.models.FaceRedactorMode + :param blur_type: Blur type. Possible values include: "Box", "Low", "Med", "High", "Black". + :type blur_type: str or ~azure.mgmt.media.models.BlurType :param experimental_options: Dictionary containing key value pairs for parameters not exposed in the preset itself. :type experimental_options: dict[str, str] @@ -2400,6 +2562,8 @@ class FaceDetectorPreset(Preset): _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'resolution': {'key': 'resolution', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + 'blur_type': {'key': 'blurType', 'type': 'str'}, 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, } @@ -2410,6 +2574,8 @@ def __init__( super(FaceDetectorPreset, self).__init__(**kwargs) self.odata_type = '#Microsoft.Media.FaceDetectorPreset' # type: str self.resolution = kwargs.get('resolution', None) + self.mode = kwargs.get('mode', None) + self.blur_type = kwargs.get('blur_type', None) self.experimental_options = kwargs.get('experimental_options', None) @@ -2538,7 +2704,7 @@ class Format(msrest.serialization.Model): """Base class for output. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ImageFormat, JpgFormat, MultiBitrateFormat, PngFormat. + sub-classes are: ImageFormat, MultiBitrateFormat. All required parameters must be populated in order to send to Azure. @@ -2552,8 +2718,9 @@ class Format(msrest.serialization.Model): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ @@ -2568,7 +2735,7 @@ class Format(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat', '#Microsoft.Media.PngFormat': 'PngFormat'} + 'odata_type': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'} } def __init__( @@ -2580,11 +2747,106 @@ def __init__( self.filename_pattern = kwargs['filename_pattern'] +class InputDefinition(msrest.serialization.Model): + """Base class for defining an input. Use sub classes of this class to specify tracks selections and related metadata. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: FromAllInputFile, FromEachInputFile, InputFile. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.FromAllInputFile': 'FromAllInputFile', '#Microsoft.Media.FromEachInputFile': 'FromEachInputFile', '#Microsoft.Media.InputFile': 'InputFile'} + } + + def __init__( + self, + **kwargs + ): + super(InputDefinition, self).__init__(**kwargs) + self.odata_type = None # type: Optional[str] + self.included_tracks = kwargs.get('included_tracks', None) + + +class FromAllInputFile(InputDefinition): + """An InputDefinition that looks across all of the files provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to allow selection of a single track across a set of input files. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + } + + def __init__( + self, + **kwargs + ): + super(FromAllInputFile, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.FromAllInputFile' # type: str + + +class FromEachInputFile(InputDefinition): + """An InputDefinition that looks at each input file provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to select tracks from each file given. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + } + + def __init__( + self, + **kwargs + ): + super(FromEachInputFile, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.FromEachInputFile' # type: str + + class Layer(msrest.serialization.Model): """The encoder can be configured to produce video and/or images (thumbnails) at different resolutions, by specifying a layer for each desired resolution. A layer represents the properties for the video or image at a resolution. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JpgLayer, PngLayer, VideoLayer. + sub-classes are: H265VideoLayer, JpgLayer, PngLayer, VideoLayer. All required parameters must be populated in order to send to Azure. @@ -2615,7 +2877,7 @@ class Layer(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer'} + 'odata_type': {'#Microsoft.Media.H265VideoLayer': 'H265VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer'} } def __init__( @@ -2810,7 +3072,7 @@ class Video(Codec): """Describes the basic properties for encoding the input video. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H264Video, Image. + sub-classes are: H264Video, H265Video, Image. All required parameters must be populated in order to send to Azure. @@ -2846,7 +3108,7 @@ class Video(Codec): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.H264Video': 'H264Video', '#Microsoft.Media.Image': 'Image'} + 'odata_type': {'#Microsoft.Media.H264Video': 'H264Video', '#Microsoft.Media.H265Video': 'H265Video', '#Microsoft.Media.Image': 'Image'} } def __init__( @@ -2919,6 +3181,240 @@ def __init__( self.layers = kwargs.get('layers', None) +class H265VideoLayer(Layer): + """Describes the settings to be used when encoding the input video into a desired output bitrate layer. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: H265Layer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param width: The width of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in width as the input. + :type width: str + :param height: The height of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in height as the input. + :type height: str + :param label: The alphanumeric label for this layer, which can be used in multiplexing + different video and audio layers, or in naming the output file. + :type label: str + :param bitrate: Required. The average bitrate in bits per second at which to encode the input + video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this + value should be 3000000 This is a required field. + :type bitrate: int + :param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be + assumed to refill. If not specified, defaults to the same value as bitrate. + :type max_bitrate: int + :param b_frames: The number of B-frames to be used when encoding this layer. If not specified, + the encoder chooses an appropriate number based on the video profile and level. + :type b_frames: int + :param frame_rate: The frame rate (in frames per second) at which to encode this layer. The + value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the + form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame + rates based on the profile and level. If it is not specified, the encoder will use the same + frame rate as the input video. + :type frame_rate: str + :param slices: The number of slices to be used when encoding this layer. If not specified, + default is zero, which means that encoder will use a single slice for each frame. + :type slices: int + :param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when + encoding this layer. If not specified, the encoder will turn it on whenever the video profile + permits its use. + :type adaptive_b_frame: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'bitrate': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + 'label': {'key': 'label', 'type': 'str'}, + 'bitrate': {'key': 'bitrate', 'type': 'int'}, + 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, + 'b_frames': {'key': 'bFrames', 'type': 'int'}, + 'frame_rate': {'key': 'frameRate', 'type': 'str'}, + 'slices': {'key': 'slices', 'type': 'int'}, + 'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.H265Layer': 'H265Layer'} + } + + def __init__( + self, + **kwargs + ): + super(H265VideoLayer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.H265VideoLayer' # type: str + self.bitrate = kwargs['bitrate'] + self.max_bitrate = kwargs.get('max_bitrate', None) + self.b_frames = kwargs.get('b_frames', None) + self.frame_rate = kwargs.get('frame_rate', None) + self.slices = kwargs.get('slices', None) + self.adaptive_b_frame = kwargs.get('adaptive_b_frame', None) + + +class H265Layer(H265VideoLayer): + """Describes the settings to be used when encoding the input video into a desired output bitrate layer with the H.265 video codec. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param width: The width of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in width as the input. + :type width: str + :param height: The height of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in height as the input. + :type height: str + :param label: The alphanumeric label for this layer, which can be used in multiplexing + different video and audio layers, or in naming the output file. + :type label: str + :param bitrate: Required. The average bitrate in bits per second at which to encode the input + video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this + value should be 3000000 This is a required field. + :type bitrate: int + :param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be + assumed to refill. If not specified, defaults to the same value as bitrate. + :type max_bitrate: int + :param b_frames: The number of B-frames to be used when encoding this layer. If not specified, + the encoder chooses an appropriate number based on the video profile and level. + :type b_frames: int + :param frame_rate: The frame rate (in frames per second) at which to encode this layer. The + value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the + form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame + rates based on the profile and level. If it is not specified, the encoder will use the same + frame rate as the input video. + :type frame_rate: str + :param slices: The number of slices to be used when encoding this layer. If not specified, + default is zero, which means that encoder will use a single slice for each frame. + :type slices: int + :param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when + encoding this layer. If not specified, the encoder will turn it on whenever the video profile + permits its use. + :type adaptive_b_frame: bool + :param profile: We currently support Main. Default is Auto. Possible values include: "Auto", + "Main". + :type profile: str or ~azure.mgmt.media.models.H265VideoProfile + :param level: We currently support Level up to 6.2. The value can be Auto, or a number that + matches the H.265 profile. If not specified, the default is Auto, which lets the encoder choose + the Level that is appropriate for this layer. + :type level: str + :param buffer_window: The VBV buffer window length. The value should be in ISO 8601 format. The + value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S). + :type buffer_window: ~datetime.timedelta + :param reference_frames: The number of reference frames to be used when encoding this layer. If + not specified, the encoder determines an appropriate number based on the encoder complexity + setting. + :type reference_frames: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'bitrate': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + 'label': {'key': 'label', 'type': 'str'}, + 'bitrate': {'key': 'bitrate', 'type': 'int'}, + 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, + 'b_frames': {'key': 'bFrames', 'type': 'int'}, + 'frame_rate': {'key': 'frameRate', 'type': 'str'}, + 'slices': {'key': 'slices', 'type': 'int'}, + 'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'}, + 'profile': {'key': 'profile', 'type': 'str'}, + 'level': {'key': 'level', 'type': 'str'}, + 'buffer_window': {'key': 'bufferWindow', 'type': 'duration'}, + 'reference_frames': {'key': 'referenceFrames', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(H265Layer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.H265Layer' # type: str + self.profile = kwargs.get('profile', None) + self.level = kwargs.get('level', None) + self.buffer_window = kwargs.get('buffer_window', None) + self.reference_frames = kwargs.get('reference_frames', None) + + +class H265Video(Video): + """Describes all the properties for encoding a video with the H.265 codec. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param label: An optional label for the codec. The label can be used to control muxing + behavior. + :type label: str + :param key_frame_interval: The distance between two key frames. The value should be non-zero in + the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note + that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval + value will follow the input source setting. + :type key_frame_interval: ~datetime.timedelta + :param stretch_mode: The resizing mode - how the input video will be resized to fit the desired + output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize", + "AutoFit". + :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode + :param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr", + "Vfr". + :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode + :param scene_change_detection: Specifies whether or not the encoder should insert key frames at + scene changes. If not specified, the default is false. This flag should be set to true only + when the encoder is being configured to produce a single output video. + :type scene_change_detection: bool + :param complexity: Tells the encoder how to choose its encoding settings. Quality will provide + for a higher compression ratio but at a higher cost and longer compute time. Speed will + produce a relatively larger file but is faster and more economical. The default value is + Balanced. Possible values include: "Speed", "Balanced", "Quality". + :type complexity: str or ~azure.mgmt.media.models.H265Complexity + :param layers: The collection of output H.265 layers to be produced by the encoder. + :type layers: list[~azure.mgmt.media.models.H265Layer] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'label': {'key': 'label', 'type': 'str'}, + 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, + 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, + 'sync_mode': {'key': 'syncMode', 'type': 'str'}, + 'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'}, + 'complexity': {'key': 'complexity', 'type': 'str'}, + 'layers': {'key': 'layers', 'type': '[H265Layer]'}, + } + + def __init__( + self, + **kwargs + ): + super(H265Video, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.H265Video' # type: str + self.scene_change_detection = kwargs.get('scene_change_detection', None) + self.complexity = kwargs.get('complexity', None) + self.layers = kwargs.get('layers', None) + + class Hls(msrest.serialization.Model): """HTTP Live Streaming (HLS) packing setting for the live output. @@ -3025,42 +3521,83 @@ def __init__( self.range = kwargs.get('range', None) -class ImageFormat(Format): - """Describes the properties for an output image file. +class ImageFormat(Format): + """Describes the properties for an output image file. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: JpgFormat, PngFormat. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param filename_pattern: Required. The pattern of the file names for the generated output + files. The following macros are supported in the file name: {Basename} - An expansion macro + that will use the name of the input video file. If the base name(the file suffix is not + included) of the input video file is less than 32 characters long, the base name of input video + files will be used. If the length of base name of the input video file exceeds 32 characters, + the base name is truncated to the first 32 characters in total length. {Extension} - The + appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} + - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. + :type filename_pattern: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'filename_pattern': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.PngFormat': 'PngFormat'} + } + + def __init__( + self, + **kwargs + ): + super(ImageFormat, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.ImageFormat' # type: str + + +class InputFile(InputDefinition): + """An InputDefinition for a single file. TrackSelections are scoped to the file specified. All required parameters must be populated in order to send to Azure. :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str - :param filename_pattern: Required. The pattern of the file names for the generated output - files. The following macros are supported in the file name: {Basename} - An expansion macro - that will use the name of the input video file. If the base name(the file suffix is not - included) of the input video file is less than 32 characters long, the base name of input video - files will be used. If the length of base name of the input video file exceeds 32 characters, - the base name is truncated to the first 32 characters in total length. {Extension} - The - appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. - :type filename_pattern: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + :param filename: Name of the file that this input definition applies to. + :type filename: str """ _validation = { 'odata_type': {'required': True}, - 'filename_pattern': {'required': True}, } _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + 'filename': {'key': 'filename', 'type': 'str'}, } def __init__( self, **kwargs ): - super(ImageFormat, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Media.ImageFormat' # type: str + super(InputFile, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.InputFile' # type: str + self.filename = kwargs.get('filename', None) class IPAccessControl(msrest.serialization.Model): @@ -3109,7 +3646,7 @@ def __init__( self.subnet_prefix_length = kwargs.get('subnet_prefix_length', None) -class Job(Resource): +class Job(ProxyResource): """A Job resource type. The progress and state can be obtained by polling a Job or subscribing to events using EventGrid. Variables are only populated by the server, and will be ignored when sending a request. @@ -3122,8 +3659,10 @@ class Job(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str - :ivar created: The UTC date and time when the customer has created the Job, in 'YYYY-MM- - DDThh:mm:ssZ' format. + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData + :ivar created: The UTC date and time when the customer has created the Job, in + 'YYYY-MM-DDThh:mm:ssZ' format. :vartype created: ~datetime.datetime :ivar state: The current state of the job. Possible values include: "Canceled", "Canceling", "Error", "Finished", "Processing", "Queued", "Scheduled". @@ -3154,6 +3693,7 @@ class Job(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, 'state': {'readonly': True}, 'last_modified': {'readonly': True}, @@ -3165,6 +3705,7 @@ class Job(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, @@ -3182,6 +3723,7 @@ def __init__( **kwargs ): super(Job, self).__init__(**kwargs) + self.system_data = None self.created = None self.state = None self.description = kwargs.get('description', None) @@ -3303,7 +3845,7 @@ class JobInput(msrest.serialization.Model): """Base class for inputs to a Job. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobInputClip, JobInputs. + sub-classes are: JobInputClip, JobInputSequence, JobInputs. All required parameters must be populated in order to send to Azure. @@ -3320,7 +3862,7 @@ class JobInput(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputs': 'JobInputs'} + 'odata_type': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputSequence': 'JobInputSequence', '#Microsoft.Media.JobInputs': 'JobInputs'} } def __init__( @@ -3355,6 +3897,9 @@ class JobInputClip(JobInput): submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. :type label: str + :param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it + defines a list of track selections and related metadata. + :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] """ _validation = { @@ -3367,6 +3912,7 @@ class JobInputClip(JobInput): 'start': {'key': 'start', 'type': 'ClipTime'}, 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, + 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, } _subtype_map = { @@ -3383,6 +3929,7 @@ def __init__( self.start = kwargs.get('start', None) self.end = kwargs.get('end', None) self.label = kwargs.get('label', None) + self.input_definitions = kwargs.get('input_definitions', None) class JobInputAsset(JobInputClip): @@ -3406,6 +3953,9 @@ class JobInputAsset(JobInputClip): submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. :type label: str + :param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it + defines a list of track selections and related metadata. + :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] :param asset_name: Required. The name of the input Asset. :type asset_name: str """ @@ -3421,6 +3971,7 @@ class JobInputAsset(JobInputClip): 'start': {'key': 'start', 'type': 'ClipTime'}, 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, + 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, 'asset_name': {'key': 'assetName', 'type': 'str'}, } @@ -3454,6 +4005,9 @@ class JobInputHttp(JobInputClip): submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. :type label: str + :param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it + defines a list of track selections and related metadata. + :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] :param base_uri: Base URI for HTTPS job input. It will be concatenated with provided file names. If no base uri is given, then the provided file list is assumed to be fully qualified uris. Maximum length of 4000 characters. @@ -3470,6 +4024,7 @@ class JobInputHttp(JobInputClip): 'start': {'key': 'start', 'type': 'ClipTime'}, 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, + 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, 'base_uri': {'key': 'baseUri', 'type': 'str'}, } @@ -3511,6 +4066,35 @@ def __init__( self.inputs = kwargs.get('inputs', None) +class JobInputSequence(JobInput): + """A Sequence contains an ordered list of Clips where each clip is a JobInput. The Sequence will be treated as a single input. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param inputs: JobInputs that make up the timeline. + :type inputs: list[~azure.mgmt.media.models.JobInputClip] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[JobInputClip]'}, + } + + def __init__( + self, + **kwargs + ): + super(JobInputSequence, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.JobInputSequence' # type: str + self.inputs = kwargs.get('inputs', None) + + class JobOutput(msrest.serialization.Model): """Describes all the properties of a JobOutput. @@ -3653,7 +4237,7 @@ def __init__( self.asset_name = kwargs['asset_name'] -class JpgFormat(Format): +class JpgFormat(ImageFormat): """Describes the settings for producing JPEG thumbnails. All required parameters must be populated in order to send to Azure. @@ -3668,8 +4252,9 @@ class JpgFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ @@ -3822,6 +4407,25 @@ def __init__( self.quality = kwargs.get('quality', None) +class KeyDelivery(msrest.serialization.Model): + """KeyDelivery. + + :param access_control: The access control properties for Key Delivery. + :type access_control: ~azure.mgmt.media.models.AccessControl + """ + + _attribute_map = { + 'access_control': {'key': 'accessControl', 'type': 'AccessControl'}, + } + + def __init__( + self, + **kwargs + ): + super(KeyDelivery, self).__init__(**kwargs) + self.access_control = kwargs.get('access_control', None) + + class KeyVaultProperties(msrest.serialization.Model): """KeyVaultProperties. @@ -4029,6 +4633,8 @@ class LiveEvent(TrackedResource): :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param description: A description for the live event. :type description: str :param input: Live event input settings. It defines how the live event receives input from a @@ -4076,6 +4682,7 @@ class LiveEvent(TrackedResource): 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, + 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'resource_state': {'readonly': True}, 'created': {'readonly': True}, @@ -4088,6 +4695,7 @@ class LiveEvent(TrackedResource): 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'input': {'key': 'properties.input', 'type': 'LiveEventInput'}, 'preview': {'key': 'properties.preview', 'type': 'LiveEventPreview'}, @@ -4108,6 +4716,7 @@ def __init__( **kwargs ): super(LiveEvent, self).__init__(**kwargs) + self.system_data = None self.description = kwargs.get('description', None) self.input = kwargs.get('input', None) self.preview = kwargs.get('preview', None) @@ -4458,7 +5067,7 @@ def __init__( self.output_transcription_track = kwargs.get('output_transcription_track', None) -class LiveOutput(Resource): +class LiveOutput(ProxyResource): """The Live Output. Variables are only populated by the server, and will be ignored when sending a request. @@ -4651,6 +5260,8 @@ class MediaService(TrackedResource): :type location: str :param identity: The Managed Identity for the Media Services account. :type identity: ~azure.mgmt.media.models.MediaServiceIdentity + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar media_service_id: The Media Services account ID. :vartype media_service_id: str :param storage_accounts: The storage accounts for this resource. @@ -4659,6 +5270,8 @@ class MediaService(TrackedResource): :type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication :param encryption: The account encryption properties. :type encryption: ~azure.mgmt.media.models.AccountEncryption + :param key_delivery: The Key Delivery properties for Media Services account. + :type key_delivery: ~azure.mgmt.media.models.KeyDelivery """ _validation = { @@ -4666,6 +5279,7 @@ class MediaService(TrackedResource): 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, + 'system_data': {'readonly': True}, 'media_service_id': {'readonly': True}, } @@ -4676,10 +5290,12 @@ class MediaService(TrackedResource): 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, + 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, } def __init__( @@ -4688,10 +5304,12 @@ def __init__( ): super(MediaService, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) + self.system_data = None self.media_service_id = None self.storage_accounts = kwargs.get('storage_accounts', None) self.storage_authentication = kwargs.get('storage_authentication', None) self.encryption = kwargs.get('encryption', None) + self.key_delivery = kwargs.get('key_delivery', None) class MediaServiceCollection(msrest.serialization.Model): @@ -4755,6 +5373,55 @@ def __init__( self.tenant_id = None +class MediaServiceUpdate(msrest.serialization.Model): + """A Media Services account update. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param tags: A set of tags. Resource tags. + :type tags: dict[str, str] + :param identity: The Managed Identity for the Media Services account. + :type identity: ~azure.mgmt.media.models.MediaServiceIdentity + :ivar media_service_id: The Media Services account ID. + :vartype media_service_id: str + :param storage_accounts: The storage accounts for this resource. + :type storage_accounts: list[~azure.mgmt.media.models.StorageAccount] + :param storage_authentication: Possible values include: "System", "ManagedIdentity". + :type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication + :param encryption: The account encryption properties. + :type encryption: ~azure.mgmt.media.models.AccountEncryption + :param key_delivery: The Key Delivery properties for Media Services account. + :type key_delivery: ~azure.mgmt.media.models.KeyDelivery + """ + + _validation = { + 'media_service_id': {'readonly': True}, + } + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '{str}'}, + 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, + 'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'}, + 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, + 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, + 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, + 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, + } + + def __init__( + self, + **kwargs + ): + super(MediaServiceUpdate, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.identity = kwargs.get('identity', None) + self.media_service_id = None + self.storage_accounts = kwargs.get('storage_accounts', None) + self.storage_authentication = kwargs.get('storage_authentication', None) + self.encryption = kwargs.get('encryption', None) + self.key_delivery = kwargs.get('key_delivery', None) + + class MetricDimension(msrest.serialization.Model): """A metric dimension. @@ -4806,10 +5473,21 @@ class MetricSpecification(msrest.serialization.Model): :ivar aggregation_type: The metric aggregation type. Possible values include: "Average", "Count", "Total". :vartype aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType + :ivar lock_aggregation_type: The metric lock aggregation type. Possible values include: + "Average", "Count", "Total". + :vartype lock_aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~azure.mgmt.media.models.MetricDimension] + :ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled. + :vartype enable_regional_mdm_account: bool + :ivar source_mdm_account: The source MDM account. + :vartype source_mdm_account: str + :ivar source_mdm_namespace: The source MDM namespace. + :vartype source_mdm_namespace: str + :ivar supported_time_grain_types: The supported time grain types. + :vartype supported_time_grain_types: list[str] """ _validation = { @@ -4818,7 +5496,12 @@ class MetricSpecification(msrest.serialization.Model): 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, + 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, + 'enable_regional_mdm_account': {'readonly': True}, + 'source_mdm_account': {'readonly': True}, + 'source_mdm_namespace': {'readonly': True}, + 'supported_time_grain_types': {'readonly': True}, } _attribute_map = { @@ -4827,8 +5510,13 @@ class MetricSpecification(msrest.serialization.Model): 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, + 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, + 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, + 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, + 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, + 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( @@ -4841,8 +5529,13 @@ def __init__( self.display_description = None self.unit = None self.aggregation_type = None + self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None + self.enable_regional_mdm_account = None + self.source_mdm_account = None + self.source_mdm_namespace = None + self.supported_time_grain_types = None class MultiBitrateFormat(Format): @@ -4863,8 +5556,9 @@ class MultiBitrateFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str :param output_files: The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . @@ -4910,8 +5604,9 @@ class Mp4Format(MultiBitrateFormat): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str :param output_files: The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . @@ -5000,6 +5695,10 @@ class Operation(msrest.serialization.Model): :type origin: str :param properties: Operation properties format. :type properties: ~azure.mgmt.media.models.Properties + :param is_data_action: Whether the operation applies to data-plane. + :type is_data_action: bool + :param action_type: Indicates the action type. Possible values include: "Internal". + :type action_type: str or ~azure.mgmt.media.models.ActionType """ _validation = { @@ -5011,6 +5710,8 @@ class Operation(msrest.serialization.Model): 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, + 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, + 'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__( @@ -5022,6 +5723,8 @@ def __init__( self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties', None) + self.is_data_action = kwargs.get('is_data_action', None) + self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): @@ -5108,7 +5811,7 @@ def __init__( self.labels = kwargs['labels'] -class PngFormat(Format): +class PngFormat(ImageFormat): """Describes the settings for producing PNG thumbnails. All required parameters must be populated in order to send to Azure. @@ -5123,8 +5826,9 @@ class PngFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ @@ -5196,7 +5900,7 @@ class PngImage(Image): stop at the end of the stream. :type range: str :param layers: A collection of output PNG image layers to be produced by the encoder. - :type layers: list[~azure.mgmt.media.models.Layer] + :type layers: list[~azure.mgmt.media.models.PngLayer] """ _validation = { @@ -5213,7 +5917,7 @@ class PngImage(Image): 'start': {'key': 'start', 'type': 'str'}, 'step': {'key': 'step', 'type': 'str'}, 'range': {'key': 'range', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[Layer]'}, + 'layers': {'key': 'layers', 'type': '[PngLayer]'}, } def __init__( @@ -5544,40 +6248,6 @@ def __init__( self.provider_name = kwargs['provider_name'] -class ProxyResource(Resource): - """The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or - "Microsoft.Storage/storageAccounts". - :vartype type: str - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ProxyResource, self).__init__(**kwargs) - - class Rectangle(msrest.serialization.Model): """Describes the properties of a rectangular window applied to the input media before processing it. @@ -5613,6 +6283,196 @@ def __init__( self.height = kwargs.get('height', None) +class SelectAudioTrackByAttribute(AudioTrackDescriptor): + """Select audio tracks from the input by specifying an attribute and an attribute filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param channel_mapping: Optional designation for single channel audio tracks. Can be used to + combine the tracks into stereo or multi-channel audio tracks. Possible values include: + "FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight", + "StereoLeft", "StereoRight". + :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping + :param attribute: Required. The TrackAttribute to filter the tracks by. Possible values + include: "Bitrate", "Language". + :type attribute: str or ~azure.mgmt.media.models.TrackAttribute + :param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to + select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals". + :type filter: str or ~azure.mgmt.media.models.AttributeFilter + :param filter_value: The value to filter the tracks by. Only used when + AttributeFilter.ValueEquals is specified for the Filter property. + :type filter_value: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'attribute': {'required': True}, + 'filter': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, + 'attribute': {'key': 'attribute', 'type': 'str'}, + 'filter': {'key': 'filter', 'type': 'str'}, + 'filter_value': {'key': 'filterValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SelectAudioTrackByAttribute, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.SelectAudioTrackByAttribute' # type: str + self.attribute = kwargs['attribute'] + self.filter = kwargs['filter'] + self.filter_value = kwargs.get('filter_value', None) + + +class SelectAudioTrackById(AudioTrackDescriptor): + """Select audio tracks from the input by specifying a track identifier. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param channel_mapping: Optional designation for single channel audio tracks. Can be used to + combine the tracks into stereo or multi-channel audio tracks. Possible values include: + "FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight", + "StereoLeft", "StereoRight". + :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping + :param track_id: Required. Track identifier to select. + :type track_id: long + """ + + _validation = { + 'odata_type': {'required': True}, + 'track_id': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, + 'track_id': {'key': 'trackId', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SelectAudioTrackById, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.SelectAudioTrackById' # type: str + self.track_id = kwargs['track_id'] + + +class VideoTrackDescriptor(TrackDescriptor): + """A TrackSelection to select video tracks. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SelectVideoTrackByAttribute, SelectVideoTrackById. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.SelectVideoTrackByAttribute': 'SelectVideoTrackByAttribute', '#Microsoft.Media.SelectVideoTrackById': 'SelectVideoTrackById'} + } + + def __init__( + self, + **kwargs + ): + super(VideoTrackDescriptor, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.VideoTrackDescriptor' # type: str + + +class SelectVideoTrackByAttribute(VideoTrackDescriptor): + """Select video tracks from the input by specifying an attribute and an attribute filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param attribute: Required. The TrackAttribute to filter the tracks by. Possible values + include: "Bitrate", "Language". + :type attribute: str or ~azure.mgmt.media.models.TrackAttribute + :param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to + select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals". + :type filter: str or ~azure.mgmt.media.models.AttributeFilter + :param filter_value: The value to filter the tracks by. Only used when + AttributeFilter.ValueEquals is specified for the Filter property. For TrackAttribute.Bitrate, + this should be an integer value in bits per second (e.g: '1500000'). The + TrackAttribute.Language is not supported for video tracks. + :type filter_value: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'attribute': {'required': True}, + 'filter': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'attribute': {'key': 'attribute', 'type': 'str'}, + 'filter': {'key': 'filter', 'type': 'str'}, + 'filter_value': {'key': 'filterValue', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SelectVideoTrackByAttribute, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.SelectVideoTrackByAttribute' # type: str + self.attribute = kwargs['attribute'] + self.filter = kwargs['filter'] + self.filter_value = kwargs.get('filter_value', None) + + +class SelectVideoTrackById(VideoTrackDescriptor): + """Select video tracks from the input by specifying a track identifier. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param track_id: Required. Track identifier to select. + :type track_id: long + """ + + _validation = { + 'odata_type': {'required': True}, + 'track_id': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'track_id': {'key': 'trackId', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SelectVideoTrackById, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.SelectVideoTrackById' # type: str + self.track_id = kwargs['track_id'] + + class ServiceSpecification(msrest.serialization.Model): """The service metric specifications. @@ -5759,6 +6619,8 @@ class StreamingEndpoint(TrackedResource): :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param description: The streaming endpoint description. :type description: str :param scale_units: The number of scale units. Use the Scale operation to adjust this value. @@ -5799,6 +6661,7 @@ class StreamingEndpoint(TrackedResource): 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, + 'system_data': {'readonly': True}, 'host_name': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'resource_state': {'readonly': True}, @@ -5813,6 +6676,7 @@ class StreamingEndpoint(TrackedResource): 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'scale_units': {'key': 'properties.scaleUnits', 'type': 'int'}, 'availability_set_name': {'key': 'properties.availabilitySetName', 'type': 'str'}, @@ -5836,6 +6700,7 @@ def __init__( **kwargs ): super(StreamingEndpoint, self).__init__(**kwargs) + self.system_data = None self.description = kwargs.get('description', None) self.scale_units = kwargs.get('scale_units', None) self.availability_set_name = kwargs.get('availability_set_name', None) @@ -5924,7 +6789,7 @@ def __init__( self.scale_unit = kwargs.get('scale_unit', None) -class StreamingLocator(Resource): +class StreamingLocator(ProxyResource): """A Streaming Locator resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -5937,6 +6802,8 @@ class StreamingLocator(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param asset_name: Asset Name. :type asset_name: str :ivar created: The creation time of the Streaming Locator. @@ -5968,6 +6835,7 @@ class StreamingLocator(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, } @@ -5975,6 +6843,7 @@ class StreamingLocator(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'asset_name': {'key': 'properties.assetName', 'type': 'str'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'}, @@ -5992,6 +6861,7 @@ def __init__( **kwargs ): super(StreamingLocator, self).__init__(**kwargs) + self.system_data = None self.asset_name = kwargs.get('asset_name', None) self.created = None self.start_time = kwargs.get('start_time', None) @@ -6116,7 +6986,7 @@ def __init__( self.paths = kwargs.get('paths', None) -class StreamingPolicy(Resource): +class StreamingPolicy(ProxyResource): """A Streaming Policy resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -6129,6 +6999,8 @@ class StreamingPolicy(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar created: Creation time of Streaming Policy. :vartype created: ~datetime.datetime :param default_content_key_policy_name: Default ContentKey used by current Streaming Policy. @@ -6147,6 +7019,7 @@ class StreamingPolicy(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, } @@ -6154,6 +7027,7 @@ class StreamingPolicy(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'default_content_key_policy_name': {'key': 'properties.defaultContentKeyPolicyName', 'type': 'str'}, 'envelope_encryption': {'key': 'properties.envelopeEncryption', 'type': 'EnvelopeEncryption'}, @@ -6167,6 +7041,7 @@ def __init__( **kwargs ): super(StreamingPolicy, self).__init__(**kwargs) + self.system_data = None self.created = None self.default_content_key_policy_name = kwargs.get('default_content_key_policy_name', None) self.envelope_encryption = kwargs.get('envelope_encryption', None) @@ -6357,6 +7232,47 @@ def __init__( self.id = kwargs.get('id', None) +class SystemData(msrest.serialization.Model): + """Metadata pertaining to creation and last modification of the resource. + + :param created_by: The identity that created the resource. + :type created_by: str + :param created_by_type: The type of identity that created the resource. Possible values + include: "User", "Application", "ManagedIdentity", "Key". + :type created_by_type: str or ~azure.mgmt.media.models.CreatedByType + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_by: The identity that last modified the resource. + :type last_modified_by: str + :param last_modified_by_type: The type of identity that last modified the resource. Possible + values include: "User", "Application", "ManagedIdentity", "Key". + :type last_modified_by_type: str or ~azure.mgmt.media.models.CreatedByType + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_by': {'key': 'createdBy', 'type': 'str'}, + 'created_by_type': {'key': 'createdByType', 'type': 'str'}, + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, + 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + **kwargs + ): + super(SystemData, self).__init__(**kwargs) + self.created_by = kwargs.get('created_by', None) + self.created_by_type = kwargs.get('created_by_type', None) + self.created_at = kwargs.get('created_at', None) + self.last_modified_by = kwargs.get('last_modified_by', None) + self.last_modified_by_type = kwargs.get('last_modified_by_type', None) + self.last_modified_at = kwargs.get('last_modified_at', None) + + class TrackPropertyCondition(msrest.serialization.Model): """Class to specify one track property condition. @@ -6412,7 +7328,7 @@ def __init__( self.track_selections = kwargs.get('track_selections', None) -class Transform(Resource): +class Transform(ProxyResource): """A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs. Variables are only populated by the server, and will be ignored when sending a request. @@ -6425,13 +7341,15 @@ class Transform(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar created: The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ' format. :vartype created: ~datetime.datetime :param description: An optional verbose description of the Transform. :type description: str - :ivar last_modified: The UTC date and time when the Transform was last updated, in 'YYYY-MM- - DDThh:mm:ssZ' format. + :ivar last_modified: The UTC date and time when the Transform was last updated, in + 'YYYY-MM-DDThh:mm:ssZ' format. :vartype last_modified: ~datetime.datetime :param outputs: An array of one or more TransformOutputs that the Transform should generate. :type outputs: list[~azure.mgmt.media.models.TransformOutput] @@ -6441,6 +7359,7 @@ class Transform(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, 'last_modified': {'readonly': True}, } @@ -6449,6 +7368,7 @@ class Transform(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, @@ -6460,6 +7380,7 @@ def __init__( **kwargs ): super(Transform, self).__init__(**kwargs) + self.system_data = None self.created = None self.description = kwargs.get('description', None) self.last_modified = None @@ -6545,8 +7466,9 @@ class TransportStreamFormat(MultiBitrateFormat): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str :param output_files: The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . @@ -6617,8 +7539,8 @@ class VideoAnalyzerPreset(AudioAnalyzerPreset): language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable - speech. If automatic detection fails to find the language, transcription would fallback to 'en- - US'." The list of supported languages is available here: + speech. If automatic detection fails to find the language, transcription would fallback to + 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. :type audio_language: str :param mode: Determines the set of audio analysis operations to be performed. If unspecified, @@ -6667,8 +7589,8 @@ class VideoOverlay(Overlay): :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str :param input_label: Required. The label of the job input which is to be used as an overlay. The - Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an - audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See + Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP + format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. :type input_label: str :param start: The start position, with reference to the input video, at which the overlay diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py index 95ae195c441d..90f65b7358a3 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/models/_models_py3.py @@ -221,6 +221,34 @@ def __init__( self.time = time +class AccessControl(msrest.serialization.Model): + """AccessControl. + + :param default_action: The behavior for IP access control in Key Delivery. Possible values + include: "Allow", "Deny". + :type default_action: str or ~azure.mgmt.media.models.DefaultAction + :param ip_allow_list: The IP allow list for access control in Key Delivery. If the default + action is set to 'Allow', the IP allow list must be empty. + :type ip_allow_list: list[str] + """ + + _attribute_map = { + 'default_action': {'key': 'defaultAction', 'type': 'str'}, + 'ip_allow_list': {'key': 'ipAllowList', 'type': '[str]'}, + } + + def __init__( + self, + *, + default_action: Optional[Union[str, "DefaultAction"]] = None, + ip_allow_list: Optional[List[str]] = None, + **kwargs + ): + super(AccessControl, self).__init__(**kwargs) + self.default_action = default_action + self.ip_allow_list = ip_allow_list + + class AccountEncryption(msrest.serialization.Model): """AccountEncryption. @@ -291,7 +319,41 @@ def __init__( self.type = None -class AccountFilter(Resource): +class ProxyResource(Resource): + """The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar id: Fully qualified resource ID for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. + :vartype id: str + :ivar name: The name of the resource. + :vartype name: str + :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or + "Microsoft.Storage/storageAccounts". + :vartype type: str + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ProxyResource, self).__init__(**kwargs) + + +class AccountFilter(ProxyResource): """An Account Filter. Variables are only populated by the server, and will be ignored when sending a request. @@ -304,6 +366,8 @@ class AccountFilter(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param presentation_time_range: The presentation time range. :type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange :param first_quality: The first quality. @@ -316,12 +380,14 @@ class AccountFilter(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'}, 'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'}, 'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'}, @@ -336,6 +402,7 @@ def __init__( **kwargs ): super(AccountFilter, self).__init__(**kwargs) + self.system_data = None self.presentation_time_range = presentation_time_range self.first_quality = first_quality self.tracks = tracks @@ -442,7 +509,7 @@ def __init__( self.error = error -class Asset(Resource): +class Asset(ProxyResource): """An Asset. Variables are only populated by the server, and will be ignored when sending a request. @@ -455,6 +522,8 @@ class Asset(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar asset_id: The Asset ID. :vartype asset_id: str :ivar created: The creation date of the Asset. @@ -479,6 +548,7 @@ class Asset(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'asset_id': {'readonly': True}, 'created': {'readonly': True}, 'last_modified': {'readonly': True}, @@ -489,6 +559,7 @@ class Asset(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'asset_id': {'key': 'properties.assetId', 'type': 'str'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, @@ -509,6 +580,7 @@ def __init__( **kwargs ): super(Asset, self).__init__(**kwargs) + self.system_data = None self.asset_id = None self.created = None self.last_modified = None @@ -604,7 +676,7 @@ def __init__( self.asset_file_id = asset_file_id -class AssetFilter(Resource): +class AssetFilter(ProxyResource): """An Asset Filter. Variables are only populated by the server, and will be ignored when sending a request. @@ -617,6 +689,8 @@ class AssetFilter(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param presentation_time_range: The presentation time range. :type presentation_time_range: ~azure.mgmt.media.models.PresentationTimeRange :param first_quality: The first quality. @@ -629,12 +703,14 @@ class AssetFilter(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'presentation_time_range': {'key': 'properties.presentationTimeRange', 'type': 'PresentationTimeRange'}, 'first_quality': {'key': 'properties.firstQuality', 'type': 'FirstQuality'}, 'tracks': {'key': 'properties.tracks', 'type': '[FilterTrackSelection]'}, @@ -649,6 +725,7 @@ def __init__( **kwargs ): super(AssetFilter, self).__init__(**kwargs) + self.system_data = None self.presentation_time_range = presentation_time_range self.first_quality = first_quality self.tracks = tracks @@ -792,8 +869,8 @@ class AudioAnalyzerPreset(Preset): language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable - speech. If automatic detection fails to find the language, transcription would fallback to 'en- - US'." The list of supported languages is available here: + speech. If automatic detection fails to find the language, transcription would fallback to + 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. :type audio_language: str :param mode: Determines the set of audio analysis operations to be performed. If unspecified, @@ -845,8 +922,8 @@ class Overlay(msrest.serialization.Model): :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str :param input_label: Required. The label of the job input which is to be used as an overlay. The - Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an - audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See + Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP + format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. :type input_label: str :param start: The start position, with reference to the input video, at which the overlay @@ -922,8 +999,8 @@ class AudioOverlay(Overlay): :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str :param input_label: Required. The label of the job input which is to be used as an overlay. The - Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an - audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See + Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP + format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. :type input_label: str :param start: The start position, with reference to the input video, at which the overlay @@ -981,6 +1058,79 @@ def __init__( self.odata_type = '#Microsoft.Media.AudioOverlay' # type: str +class TrackDescriptor(msrest.serialization.Model): + """Base type for all TrackDescriptor types, which define the metadata and selection for tracks that should be processed by a Job. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AudioTrackDescriptor, VideoTrackDescriptor. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.AudioTrackDescriptor': 'AudioTrackDescriptor', '#Microsoft.Media.VideoTrackDescriptor': 'VideoTrackDescriptor'} + } + + def __init__( + self, + **kwargs + ): + super(TrackDescriptor, self).__init__(**kwargs) + self.odata_type = None # type: Optional[str] + + +class AudioTrackDescriptor(TrackDescriptor): + """A TrackSelection to select audio tracks. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SelectAudioTrackByAttribute, SelectAudioTrackById. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param channel_mapping: Optional designation for single channel audio tracks. Can be used to + combine the tracks into stereo or multi-channel audio tracks. Possible values include: + "FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight", + "StereoLeft", "StereoRight". + :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.SelectAudioTrackByAttribute': 'SelectAudioTrackByAttribute', '#Microsoft.Media.SelectAudioTrackById': 'SelectAudioTrackById'} + } + + def __init__( + self, + *, + channel_mapping: Optional[Union[str, "ChannelMapping"]] = None, + **kwargs + ): + super(AudioTrackDescriptor, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.AudioTrackDescriptor' # type: str + self.channel_mapping = channel_mapping + + class BuiltInStandardEncoderPreset(Preset): """Describes a built-in preset for encoding the input video with the Standard Encoder. @@ -992,7 +1142,9 @@ class BuiltInStandardEncoderPreset(Preset): values include: "H264SingleBitrateSD", "H264SingleBitrate720p", "H264SingleBitrate1080p", "AdaptiveStreaming", "AACGoodQualityAudio", "ContentAwareEncodingExperimental", "ContentAwareEncoding", "CopyAllBitrateNonInterleaved", "H264MultipleBitrate1080p", - "H264MultipleBitrate720p", "H264MultipleBitrateSD". + "H264MultipleBitrate720p", "H264MultipleBitrateSD", "H265ContentAwareEncoding", + "H265AdaptiveStreaming", "H265SingleBitrate720p", "H265SingleBitrate1080p", + "H265SingleBitrate4K". :type preset_name: str or ~azure.mgmt.media.models.EncoderNamedPreset """ @@ -1174,7 +1326,7 @@ def __init__( self.drm = drm -class ContentKeyPolicy(Resource): +class ContentKeyPolicy(ProxyResource): """A Content Key Policy resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -1187,6 +1339,8 @@ class ContentKeyPolicy(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar policy_id: The legacy Policy ID. :vartype policy_id: str :ivar created: The creation date of the Policy. @@ -1203,6 +1357,7 @@ class ContentKeyPolicy(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'policy_id': {'readonly': True}, 'created': {'readonly': True}, 'last_modified': {'readonly': True}, @@ -1212,6 +1367,7 @@ class ContentKeyPolicy(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'policy_id': {'key': 'properties.policyId', 'type': 'str'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, @@ -1227,6 +1383,7 @@ def __init__( **kwargs ): super(ContentKeyPolicy, self).__init__(**kwargs) + self.system_data = None self.policy_id = None self.created = None self.last_modified = None @@ -2584,7 +2741,7 @@ def __init__( class FaceDetectorPreset(Preset): - """Describes all the settings to be used when analyzing a video in order to detect all the faces present. + """Describes all the settings to be used when analyzing a video in order to detect (and optionally redact) all the faces present. All required parameters must be populated in order to send to Azure. @@ -2601,6 +2758,16 @@ class FaceDetectorPreset(Preset): However, faces that end up being too small in the resized video may not be detected. Possible values include: "SourceResolution", "StandardDefinition". :type resolution: str or ~azure.mgmt.media.models.AnalysisResolution + :param mode: This mode provides the ability to choose between the following settings: 1) + Analyze - For detection only.This mode generates a metadata JSON file marking appearances of + faces throughout the video.Where possible, appearances of the same person are assigned the same + ID. 2) Combined - Additionally redacts(blurs) detected faces. 3) Redact - This enables a 2-pass + process, allowing for selective redaction of a subset of detected faces.It takes in the + metadata file from a prior analyze pass, along with the source video, and a user-selected + subset of IDs that require redaction. Possible values include: "Analyze", "Redact", "Combined". + :type mode: str or ~azure.mgmt.media.models.FaceRedactorMode + :param blur_type: Blur type. Possible values include: "Box", "Low", "Med", "High", "Black". + :type blur_type: str or ~azure.mgmt.media.models.BlurType :param experimental_options: Dictionary containing key value pairs for parameters not exposed in the preset itself. :type experimental_options: dict[str, str] @@ -2613,6 +2780,8 @@ class FaceDetectorPreset(Preset): _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'resolution': {'key': 'resolution', 'type': 'str'}, + 'mode': {'key': 'mode', 'type': 'str'}, + 'blur_type': {'key': 'blurType', 'type': 'str'}, 'experimental_options': {'key': 'experimentalOptions', 'type': '{str}'}, } @@ -2620,12 +2789,16 @@ def __init__( self, *, resolution: Optional[Union[str, "AnalysisResolution"]] = None, + mode: Optional[Union[str, "FaceRedactorMode"]] = None, + blur_type: Optional[Union[str, "BlurType"]] = None, experimental_options: Optional[Dict[str, str]] = None, **kwargs ): super(FaceDetectorPreset, self).__init__(**kwargs) self.odata_type = '#Microsoft.Media.FaceDetectorPreset' # type: str self.resolution = resolution + self.mode = mode + self.blur_type = blur_type self.experimental_options = experimental_options @@ -2767,7 +2940,7 @@ class Format(msrest.serialization.Model): """Base class for output. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ImageFormat, JpgFormat, MultiBitrateFormat, PngFormat. + sub-classes are: ImageFormat, MultiBitrateFormat. All required parameters must be populated in order to send to Azure. @@ -2781,8 +2954,9 @@ class Format(msrest.serialization.Model): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ @@ -2797,7 +2971,7 @@ class Format(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat', '#Microsoft.Media.PngFormat': 'PngFormat'} + 'odata_type': {'#Microsoft.Media.ImageFormat': 'ImageFormat', '#Microsoft.Media.MultiBitrateFormat': 'MultiBitrateFormat'} } def __init__( @@ -2811,11 +2985,112 @@ def __init__( self.filename_pattern = filename_pattern +class InputDefinition(msrest.serialization.Model): + """Base class for defining an input. Use sub classes of this class to specify tracks selections and related metadata. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: FromAllInputFile, FromEachInputFile, InputFile. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.FromAllInputFile': 'FromAllInputFile', '#Microsoft.Media.FromEachInputFile': 'FromEachInputFile', '#Microsoft.Media.InputFile': 'InputFile'} + } + + def __init__( + self, + *, + included_tracks: Optional[List["TrackDescriptor"]] = None, + **kwargs + ): + super(InputDefinition, self).__init__(**kwargs) + self.odata_type = None # type: Optional[str] + self.included_tracks = included_tracks + + +class FromAllInputFile(InputDefinition): + """An InputDefinition that looks across all of the files provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to allow selection of a single track across a set of input files. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + } + + def __init__( + self, + *, + included_tracks: Optional[List["TrackDescriptor"]] = None, + **kwargs + ): + super(FromAllInputFile, self).__init__(included_tracks=included_tracks, **kwargs) + self.odata_type = '#Microsoft.Media.FromAllInputFile' # type: str + + +class FromEachInputFile(InputDefinition): + """An InputDefinition that looks at each input file provided to select tracks specified by the IncludedTracks property. Generally used with the AudioTrackByAttribute and VideoTrackByAttribute to select tracks from each file given. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + } + + def __init__( + self, + *, + included_tracks: Optional[List["TrackDescriptor"]] = None, + **kwargs + ): + super(FromEachInputFile, self).__init__(included_tracks=included_tracks, **kwargs) + self.odata_type = '#Microsoft.Media.FromEachInputFile' # type: str + + class Layer(msrest.serialization.Model): """The encoder can be configured to produce video and/or images (thumbnails) at different resolutions, by specifying a layer for each desired resolution. A layer represents the properties for the video or image at a resolution. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JpgLayer, PngLayer, VideoLayer. + sub-classes are: H265VideoLayer, JpgLayer, PngLayer, VideoLayer. All required parameters must be populated in order to send to Azure. @@ -2846,7 +3121,7 @@ class Layer(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer'} + 'odata_type': {'#Microsoft.Media.H265VideoLayer': 'H265VideoLayer', '#Microsoft.Media.JpgLayer': 'JpgLayer', '#Microsoft.Media.PngLayer': 'PngLayer', '#Microsoft.Media.VideoLayer': 'VideoLayer'} } def __init__( @@ -3070,7 +3345,7 @@ class Video(Codec): """Describes the basic properties for encoding the input video. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: H264Video, Image. + sub-classes are: H264Video, H265Video, Image. All required parameters must be populated in order to send to Azure. @@ -3106,7 +3381,7 @@ class Video(Codec): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.H264Video': 'H264Video', '#Microsoft.Media.Image': 'Image'} + 'odata_type': {'#Microsoft.Media.H264Video': 'H264Video', '#Microsoft.Media.H265Video': 'H265Video', '#Microsoft.Media.Image': 'Image'} } def __init__( @@ -3192,6 +3467,272 @@ def __init__( self.layers = layers +class H265VideoLayer(Layer): + """Describes the settings to be used when encoding the input video into a desired output bitrate layer. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: H265Layer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param width: The width of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in width as the input. + :type width: str + :param height: The height of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in height as the input. + :type height: str + :param label: The alphanumeric label for this layer, which can be used in multiplexing + different video and audio layers, or in naming the output file. + :type label: str + :param bitrate: Required. The average bitrate in bits per second at which to encode the input + video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this + value should be 3000000 This is a required field. + :type bitrate: int + :param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be + assumed to refill. If not specified, defaults to the same value as bitrate. + :type max_bitrate: int + :param b_frames: The number of B-frames to be used when encoding this layer. If not specified, + the encoder chooses an appropriate number based on the video profile and level. + :type b_frames: int + :param frame_rate: The frame rate (in frames per second) at which to encode this layer. The + value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the + form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame + rates based on the profile and level. If it is not specified, the encoder will use the same + frame rate as the input video. + :type frame_rate: str + :param slices: The number of slices to be used when encoding this layer. If not specified, + default is zero, which means that encoder will use a single slice for each frame. + :type slices: int + :param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when + encoding this layer. If not specified, the encoder will turn it on whenever the video profile + permits its use. + :type adaptive_b_frame: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'bitrate': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + 'label': {'key': 'label', 'type': 'str'}, + 'bitrate': {'key': 'bitrate', 'type': 'int'}, + 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, + 'b_frames': {'key': 'bFrames', 'type': 'int'}, + 'frame_rate': {'key': 'frameRate', 'type': 'str'}, + 'slices': {'key': 'slices', 'type': 'int'}, + 'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.H265Layer': 'H265Layer'} + } + + def __init__( + self, + *, + bitrate: int, + width: Optional[str] = None, + height: Optional[str] = None, + label: Optional[str] = None, + max_bitrate: Optional[int] = None, + b_frames: Optional[int] = None, + frame_rate: Optional[str] = None, + slices: Optional[int] = None, + adaptive_b_frame: Optional[bool] = None, + **kwargs + ): + super(H265VideoLayer, self).__init__(width=width, height=height, label=label, **kwargs) + self.odata_type = '#Microsoft.Media.H265VideoLayer' # type: str + self.bitrate = bitrate + self.max_bitrate = max_bitrate + self.b_frames = b_frames + self.frame_rate = frame_rate + self.slices = slices + self.adaptive_b_frame = adaptive_b_frame + + +class H265Layer(H265VideoLayer): + """Describes the settings to be used when encoding the input video into a desired output bitrate layer with the H.265 video codec. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param width: The width of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in width as the input. + :type width: str + :param height: The height of the output video for this layer. The value can be absolute (in + pixels) or relative (in percentage). For example 50% means the output video has half as many + pixels in height as the input. + :type height: str + :param label: The alphanumeric label for this layer, which can be used in multiplexing + different video and audio layers, or in naming the output file. + :type label: str + :param bitrate: Required. The average bitrate in bits per second at which to encode the input + video when generating this layer. For example: a target bitrate of 3000Kbps or 3Mbps means this + value should be 3000000 This is a required field. + :type bitrate: int + :param max_bitrate: The maximum bitrate (in bits per second), at which the VBV buffer should be + assumed to refill. If not specified, defaults to the same value as bitrate. + :type max_bitrate: int + :param b_frames: The number of B-frames to be used when encoding this layer. If not specified, + the encoder chooses an appropriate number based on the video profile and level. + :type b_frames: int + :param frame_rate: The frame rate (in frames per second) at which to encode this layer. The + value can be in the form of M/N where M and N are integers (For example, 30000/1001), or in the + form of a number (For example, 30, or 29.97). The encoder enforces constraints on allowed frame + rates based on the profile and level. If it is not specified, the encoder will use the same + frame rate as the input video. + :type frame_rate: str + :param slices: The number of slices to be used when encoding this layer. If not specified, + default is zero, which means that encoder will use a single slice for each frame. + :type slices: int + :param adaptive_b_frame: Specifies whether or not adaptive B-frames are to be used when + encoding this layer. If not specified, the encoder will turn it on whenever the video profile + permits its use. + :type adaptive_b_frame: bool + :param profile: We currently support Main. Default is Auto. Possible values include: "Auto", + "Main". + :type profile: str or ~azure.mgmt.media.models.H265VideoProfile + :param level: We currently support Level up to 6.2. The value can be Auto, or a number that + matches the H.265 profile. If not specified, the default is Auto, which lets the encoder choose + the Level that is appropriate for this layer. + :type level: str + :param buffer_window: The VBV buffer window length. The value should be in ISO 8601 format. The + value should be in the range [0.1-100] seconds. The default is 5 seconds (for example, PT5S). + :type buffer_window: ~datetime.timedelta + :param reference_frames: The number of reference frames to be used when encoding this layer. If + not specified, the encoder determines an appropriate number based on the encoder complexity + setting. + :type reference_frames: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'bitrate': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'width': {'key': 'width', 'type': 'str'}, + 'height': {'key': 'height', 'type': 'str'}, + 'label': {'key': 'label', 'type': 'str'}, + 'bitrate': {'key': 'bitrate', 'type': 'int'}, + 'max_bitrate': {'key': 'maxBitrate', 'type': 'int'}, + 'b_frames': {'key': 'bFrames', 'type': 'int'}, + 'frame_rate': {'key': 'frameRate', 'type': 'str'}, + 'slices': {'key': 'slices', 'type': 'int'}, + 'adaptive_b_frame': {'key': 'adaptiveBFrame', 'type': 'bool'}, + 'profile': {'key': 'profile', 'type': 'str'}, + 'level': {'key': 'level', 'type': 'str'}, + 'buffer_window': {'key': 'bufferWindow', 'type': 'duration'}, + 'reference_frames': {'key': 'referenceFrames', 'type': 'int'}, + } + + def __init__( + self, + *, + bitrate: int, + width: Optional[str] = None, + height: Optional[str] = None, + label: Optional[str] = None, + max_bitrate: Optional[int] = None, + b_frames: Optional[int] = None, + frame_rate: Optional[str] = None, + slices: Optional[int] = None, + adaptive_b_frame: Optional[bool] = None, + profile: Optional[Union[str, "H265VideoProfile"]] = None, + level: Optional[str] = None, + buffer_window: Optional[datetime.timedelta] = None, + reference_frames: Optional[int] = None, + **kwargs + ): + super(H265Layer, self).__init__(width=width, height=height, label=label, bitrate=bitrate, max_bitrate=max_bitrate, b_frames=b_frames, frame_rate=frame_rate, slices=slices, adaptive_b_frame=adaptive_b_frame, **kwargs) + self.odata_type = '#Microsoft.Media.H265Layer' # type: str + self.profile = profile + self.level = level + self.buffer_window = buffer_window + self.reference_frames = reference_frames + + +class H265Video(Video): + """Describes all the properties for encoding a video with the H.265 codec. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param label: An optional label for the codec. The label can be used to control muxing + behavior. + :type label: str + :param key_frame_interval: The distance between two key frames. The value should be non-zero in + the range [0.5, 20] seconds, specified in ISO 8601 format. The default is 2 seconds(PT2S). Note + that this setting is ignored if VideoSyncMode.Passthrough is set, where the KeyFrameInterval + value will follow the input source setting. + :type key_frame_interval: ~datetime.timedelta + :param stretch_mode: The resizing mode - how the input video will be resized to fit the desired + output resolution(s). Default is AutoSize. Possible values include: "None", "AutoSize", + "AutoFit". + :type stretch_mode: str or ~azure.mgmt.media.models.StretchMode + :param sync_mode: The Video Sync Mode. Possible values include: "Auto", "Passthrough", "Cfr", + "Vfr". + :type sync_mode: str or ~azure.mgmt.media.models.VideoSyncMode + :param scene_change_detection: Specifies whether or not the encoder should insert key frames at + scene changes. If not specified, the default is false. This flag should be set to true only + when the encoder is being configured to produce a single output video. + :type scene_change_detection: bool + :param complexity: Tells the encoder how to choose its encoding settings. Quality will provide + for a higher compression ratio but at a higher cost and longer compute time. Speed will + produce a relatively larger file but is faster and more economical. The default value is + Balanced. Possible values include: "Speed", "Balanced", "Quality". + :type complexity: str or ~azure.mgmt.media.models.H265Complexity + :param layers: The collection of output H.265 layers to be produced by the encoder. + :type layers: list[~azure.mgmt.media.models.H265Layer] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'label': {'key': 'label', 'type': 'str'}, + 'key_frame_interval': {'key': 'keyFrameInterval', 'type': 'duration'}, + 'stretch_mode': {'key': 'stretchMode', 'type': 'str'}, + 'sync_mode': {'key': 'syncMode', 'type': 'str'}, + 'scene_change_detection': {'key': 'sceneChangeDetection', 'type': 'bool'}, + 'complexity': {'key': 'complexity', 'type': 'str'}, + 'layers': {'key': 'layers', 'type': '[H265Layer]'}, + } + + def __init__( + self, + *, + label: Optional[str] = None, + key_frame_interval: Optional[datetime.timedelta] = None, + stretch_mode: Optional[Union[str, "StretchMode"]] = None, + sync_mode: Optional[Union[str, "VideoSyncMode"]] = None, + scene_change_detection: Optional[bool] = None, + complexity: Optional[Union[str, "H265Complexity"]] = None, + layers: Optional[List["H265Layer"]] = None, + **kwargs + ): + super(H265Video, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, **kwargs) + self.odata_type = '#Microsoft.Media.H265Video' # type: str + self.scene_change_detection = scene_change_detection + self.complexity = complexity + self.layers = layers + + class Hls(msrest.serialization.Model): """HTTP Live Streaming (HLS) packing setting for the live output. @@ -3311,6 +3852,9 @@ def __init__( class ImageFormat(Format): """Describes the properties for an output image file. + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: JpgFormat, PngFormat. + All required parameters must be populated in order to send to Azure. :param odata_type: Required. The discriminator for derived types.Constant filled by server. @@ -3323,29 +3867,70 @@ class ImageFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ _validation = { 'odata_type': {'required': True}, - 'filename_pattern': {'required': True}, + 'filename_pattern': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.JpgFormat': 'JpgFormat', '#Microsoft.Media.PngFormat': 'PngFormat'} + } + + def __init__( + self, + *, + filename_pattern: str, + **kwargs + ): + super(ImageFormat, self).__init__(filename_pattern=filename_pattern, **kwargs) + self.odata_type = '#Microsoft.Media.ImageFormat' # type: str + + +class InputFile(InputDefinition): + """An InputDefinition for a single file. TrackSelections are scoped to the file specified. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param included_tracks: The list of TrackDescriptors which define the metadata and selection of + tracks in the input. + :type included_tracks: list[~azure.mgmt.media.models.TrackDescriptor] + :param filename: Name of the file that this input definition applies to. + :type filename: str + """ + + _validation = { + 'odata_type': {'required': True}, } _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'filename_pattern': {'key': 'filenamePattern', 'type': 'str'}, + 'included_tracks': {'key': 'includedTracks', 'type': '[TrackDescriptor]'}, + 'filename': {'key': 'filename', 'type': 'str'}, } def __init__( self, *, - filename_pattern: str, + included_tracks: Optional[List["TrackDescriptor"]] = None, + filename: Optional[str] = None, **kwargs ): - super(ImageFormat, self).__init__(filename_pattern=filename_pattern, **kwargs) - self.odata_type = '#Microsoft.Media.ImageFormat' # type: str + super(InputFile, self).__init__(included_tracks=included_tracks, **kwargs) + self.odata_type = '#Microsoft.Media.InputFile' # type: str + self.filename = filename class IPAccessControl(msrest.serialization.Model): @@ -3400,7 +3985,7 @@ def __init__( self.subnet_prefix_length = subnet_prefix_length -class Job(Resource): +class Job(ProxyResource): """A Job resource type. The progress and state can be obtained by polling a Job or subscribing to events using EventGrid. Variables are only populated by the server, and will be ignored when sending a request. @@ -3413,8 +3998,10 @@ class Job(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str - :ivar created: The UTC date and time when the customer has created the Job, in 'YYYY-MM- - DDThh:mm:ssZ' format. + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData + :ivar created: The UTC date and time when the customer has created the Job, in + 'YYYY-MM-DDThh:mm:ssZ' format. :vartype created: ~datetime.datetime :ivar state: The current state of the job. Possible values include: "Canceled", "Canceling", "Error", "Finished", "Processing", "Queued", "Scheduled". @@ -3445,6 +4032,7 @@ class Job(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, 'state': {'readonly': True}, 'last_modified': {'readonly': True}, @@ -3456,6 +4044,7 @@ class Job(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, @@ -3479,6 +4068,7 @@ def __init__( **kwargs ): super(Job, self).__init__(**kwargs) + self.system_data = None self.created = None self.state = None self.description = description @@ -3603,7 +4193,7 @@ class JobInput(msrest.serialization.Model): """Base class for inputs to a Job. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: JobInputClip, JobInputs. + sub-classes are: JobInputClip, JobInputSequence, JobInputs. All required parameters must be populated in order to send to Azure. @@ -3620,7 +4210,7 @@ class JobInput(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputs': 'JobInputs'} + 'odata_type': {'#Microsoft.Media.JobInputClip': 'JobInputClip', '#Microsoft.Media.JobInputSequence': 'JobInputSequence', '#Microsoft.Media.JobInputs': 'JobInputs'} } def __init__( @@ -3655,6 +4245,9 @@ class JobInputClip(JobInput): submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. :type label: str + :param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it + defines a list of track selections and related metadata. + :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] """ _validation = { @@ -3667,6 +4260,7 @@ class JobInputClip(JobInput): 'start': {'key': 'start', 'type': 'ClipTime'}, 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, + 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, } _subtype_map = { @@ -3680,6 +4274,7 @@ def __init__( start: Optional["ClipTime"] = None, end: Optional["ClipTime"] = None, label: Optional[str] = None, + input_definitions: Optional[List["InputDefinition"]] = None, **kwargs ): super(JobInputClip, self).__init__(**kwargs) @@ -3688,6 +4283,7 @@ def __init__( self.start = start self.end = end self.label = label + self.input_definitions = input_definitions class JobInputAsset(JobInputClip): @@ -3711,6 +4307,9 @@ class JobInputAsset(JobInputClip): submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. :type label: str + :param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it + defines a list of track selections and related metadata. + :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] :param asset_name: Required. The name of the input Asset. :type asset_name: str """ @@ -3726,6 +4325,7 @@ class JobInputAsset(JobInputClip): 'start': {'key': 'start', 'type': 'ClipTime'}, 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, + 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, 'asset_name': {'key': 'assetName', 'type': 'str'}, } @@ -3737,9 +4337,10 @@ def __init__( start: Optional["ClipTime"] = None, end: Optional["ClipTime"] = None, label: Optional[str] = None, + input_definitions: Optional[List["InputDefinition"]] = None, **kwargs ): - super(JobInputAsset, self).__init__(files=files, start=start, end=end, label=label, **kwargs) + super(JobInputAsset, self).__init__(files=files, start=start, end=end, label=label, input_definitions=input_definitions, **kwargs) self.odata_type = '#Microsoft.Media.JobInputAsset' # type: str self.asset_name = asset_name @@ -3765,6 +4366,9 @@ class JobInputHttp(JobInputClip): submitting a Job, exactly one of the JobInputs should be the image file, and it should have the label 'xyz'. :type label: str + :param input_definitions: Defines a list of InputDefinitions. For each InputDefinition, it + defines a list of track selections and related metadata. + :type input_definitions: list[~azure.mgmt.media.models.InputDefinition] :param base_uri: Base URI for HTTPS job input. It will be concatenated with provided file names. If no base uri is given, then the provided file list is assumed to be fully qualified uris. Maximum length of 4000 characters. @@ -3781,6 +4385,7 @@ class JobInputHttp(JobInputClip): 'start': {'key': 'start', 'type': 'ClipTime'}, 'end': {'key': 'end', 'type': 'ClipTime'}, 'label': {'key': 'label', 'type': 'str'}, + 'input_definitions': {'key': 'inputDefinitions', 'type': '[InputDefinition]'}, 'base_uri': {'key': 'baseUri', 'type': 'str'}, } @@ -3791,10 +4396,11 @@ def __init__( start: Optional["ClipTime"] = None, end: Optional["ClipTime"] = None, label: Optional[str] = None, + input_definitions: Optional[List["InputDefinition"]] = None, base_uri: Optional[str] = None, **kwargs ): - super(JobInputHttp, self).__init__(files=files, start=start, end=end, label=label, **kwargs) + super(JobInputHttp, self).__init__(files=files, start=start, end=end, label=label, input_definitions=input_definitions, **kwargs) self.odata_type = '#Microsoft.Media.JobInputHttp' # type: str self.base_uri = base_uri @@ -3830,6 +4436,37 @@ def __init__( self.inputs = inputs +class JobInputSequence(JobInput): + """A Sequence contains an ordered list of Clips where each clip is a JobInput. The Sequence will be treated as a single input. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param inputs: JobInputs that make up the timeline. + :type inputs: list[~azure.mgmt.media.models.JobInputClip] + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'inputs': {'key': 'inputs', 'type': '[JobInputClip]'}, + } + + def __init__( + self, + *, + inputs: Optional[List["JobInputClip"]] = None, + **kwargs + ): + super(JobInputSequence, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.JobInputSequence' # type: str + self.inputs = inputs + + class JobOutput(msrest.serialization.Model): """Describes all the properties of a JobOutput. @@ -3977,7 +4614,7 @@ def __init__( self.asset_name = asset_name -class JpgFormat(Format): +class JpgFormat(ImageFormat): """Describes the settings for producing JPEG thumbnails. All required parameters must be populated in order to send to Azure. @@ -3992,8 +4629,9 @@ class JpgFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ @@ -4163,6 +4801,27 @@ def __init__( self.quality = quality +class KeyDelivery(msrest.serialization.Model): + """KeyDelivery. + + :param access_control: The access control properties for Key Delivery. + :type access_control: ~azure.mgmt.media.models.AccessControl + """ + + _attribute_map = { + 'access_control': {'key': 'accessControl', 'type': 'AccessControl'}, + } + + def __init__( + self, + *, + access_control: Optional["AccessControl"] = None, + **kwargs + ): + super(KeyDelivery, self).__init__(**kwargs) + self.access_control = access_control + + class KeyVaultProperties(msrest.serialization.Model): """KeyVaultProperties. @@ -4385,6 +5044,8 @@ class LiveEvent(TrackedResource): :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param description: A description for the live event. :type description: str :param input: Live event input settings. It defines how the live event receives input from a @@ -4432,6 +5093,7 @@ class LiveEvent(TrackedResource): 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, + 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'resource_state': {'readonly': True}, 'created': {'readonly': True}, @@ -4444,6 +5106,7 @@ class LiveEvent(TrackedResource): 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'input': {'key': 'properties.input', 'type': 'LiveEventInput'}, 'preview': {'key': 'properties.preview', 'type': 'LiveEventPreview'}, @@ -4476,6 +5139,7 @@ def __init__( **kwargs ): super(LiveEvent, self).__init__(tags=tags, location=location, **kwargs) + self.system_data = None self.description = description self.input = input self.preview = preview @@ -4866,7 +5530,7 @@ def __init__( self.output_transcription_track = output_transcription_track -class LiveOutput(Resource): +class LiveOutput(ProxyResource): """The Live Output. Variables are only populated by the server, and will be ignored when sending a request. @@ -5072,6 +5736,8 @@ class MediaService(TrackedResource): :type location: str :param identity: The Managed Identity for the Media Services account. :type identity: ~azure.mgmt.media.models.MediaServiceIdentity + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar media_service_id: The Media Services account ID. :vartype media_service_id: str :param storage_accounts: The storage accounts for this resource. @@ -5080,6 +5746,8 @@ class MediaService(TrackedResource): :type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication :param encryption: The account encryption properties. :type encryption: ~azure.mgmt.media.models.AccountEncryption + :param key_delivery: The Key Delivery properties for Media Services account. + :type key_delivery: ~azure.mgmt.media.models.KeyDelivery """ _validation = { @@ -5087,6 +5755,7 @@ class MediaService(TrackedResource): 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, + 'system_data': {'readonly': True}, 'media_service_id': {'readonly': True}, } @@ -5097,10 +5766,12 @@ class MediaService(TrackedResource): 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, + 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, } def __init__( @@ -5112,14 +5783,17 @@ def __init__( storage_accounts: Optional[List["StorageAccount"]] = None, storage_authentication: Optional[Union[str, "StorageAuthentication"]] = None, encryption: Optional["AccountEncryption"] = None, + key_delivery: Optional["KeyDelivery"] = None, **kwargs ): super(MediaService, self).__init__(tags=tags, location=location, **kwargs) self.identity = identity + self.system_data = None self.media_service_id = None self.storage_accounts = storage_accounts self.storage_authentication = storage_authentication self.encryption = encryption + self.key_delivery = key_delivery class MediaServiceCollection(msrest.serialization.Model): @@ -5188,6 +5862,62 @@ def __init__( self.tenant_id = None +class MediaServiceUpdate(msrest.serialization.Model): + """A Media Services account update. + + Variables are only populated by the server, and will be ignored when sending a request. + + :param tags: A set of tags. Resource tags. + :type tags: dict[str, str] + :param identity: The Managed Identity for the Media Services account. + :type identity: ~azure.mgmt.media.models.MediaServiceIdentity + :ivar media_service_id: The Media Services account ID. + :vartype media_service_id: str + :param storage_accounts: The storage accounts for this resource. + :type storage_accounts: list[~azure.mgmt.media.models.StorageAccount] + :param storage_authentication: Possible values include: "System", "ManagedIdentity". + :type storage_authentication: str or ~azure.mgmt.media.models.StorageAuthentication + :param encryption: The account encryption properties. + :type encryption: ~azure.mgmt.media.models.AccountEncryption + :param key_delivery: The Key Delivery properties for Media Services account. + :type key_delivery: ~azure.mgmt.media.models.KeyDelivery + """ + + _validation = { + 'media_service_id': {'readonly': True}, + } + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '{str}'}, + 'identity': {'key': 'identity', 'type': 'MediaServiceIdentity'}, + 'media_service_id': {'key': 'properties.mediaServiceId', 'type': 'str'}, + 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, + 'storage_authentication': {'key': 'properties.storageAuthentication', 'type': 'str'}, + 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, + 'key_delivery': {'key': 'properties.keyDelivery', 'type': 'KeyDelivery'}, + } + + def __init__( + self, + *, + tags: Optional[Dict[str, str]] = None, + identity: Optional["MediaServiceIdentity"] = None, + storage_accounts: Optional[List["StorageAccount"]] = None, + storage_authentication: Optional[Union[str, "StorageAuthentication"]] = None, + encryption: Optional["AccountEncryption"] = None, + key_delivery: Optional["KeyDelivery"] = None, + **kwargs + ): + super(MediaServiceUpdate, self).__init__(**kwargs) + self.tags = tags + self.identity = identity + self.media_service_id = None + self.storage_accounts = storage_accounts + self.storage_authentication = storage_authentication + self.encryption = encryption + self.key_delivery = key_delivery + + class MetricDimension(msrest.serialization.Model): """A metric dimension. @@ -5239,10 +5969,21 @@ class MetricSpecification(msrest.serialization.Model): :ivar aggregation_type: The metric aggregation type. Possible values include: "Average", "Count", "Total". :vartype aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType + :ivar lock_aggregation_type: The metric lock aggregation type. Possible values include: + "Average", "Count", "Total". + :vartype lock_aggregation_type: str or ~azure.mgmt.media.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~azure.mgmt.media.models.MetricDimension] + :ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled. + :vartype enable_regional_mdm_account: bool + :ivar source_mdm_account: The source MDM account. + :vartype source_mdm_account: str + :ivar source_mdm_namespace: The source MDM namespace. + :vartype source_mdm_namespace: str + :ivar supported_time_grain_types: The supported time grain types. + :vartype supported_time_grain_types: list[str] """ _validation = { @@ -5251,7 +5992,12 @@ class MetricSpecification(msrest.serialization.Model): 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, + 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, + 'enable_regional_mdm_account': {'readonly': True}, + 'source_mdm_account': {'readonly': True}, + 'source_mdm_namespace': {'readonly': True}, + 'supported_time_grain_types': {'readonly': True}, } _attribute_map = { @@ -5260,8 +6006,13 @@ class MetricSpecification(msrest.serialization.Model): 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, + 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, + 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, + 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, + 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, + 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( @@ -5276,8 +6027,13 @@ def __init__( self.display_description = None self.unit = None self.aggregation_type = None + self.lock_aggregation_type = None self.supported_aggregation_types = supported_aggregation_types self.dimensions = None + self.enable_regional_mdm_account = None + self.source_mdm_account = None + self.source_mdm_namespace = None + self.supported_time_grain_types = None class MultiBitrateFormat(Format): @@ -5298,8 +6054,9 @@ class MultiBitrateFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str :param output_files: The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . @@ -5348,8 +6105,9 @@ class Mp4Format(MultiBitrateFormat): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str :param output_files: The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . @@ -5448,6 +6206,10 @@ class Operation(msrest.serialization.Model): :type origin: str :param properties: Operation properties format. :type properties: ~azure.mgmt.media.models.Properties + :param is_data_action: Whether the operation applies to data-plane. + :type is_data_action: bool + :param action_type: Indicates the action type. Possible values include: "Internal". + :type action_type: str or ~azure.mgmt.media.models.ActionType """ _validation = { @@ -5459,6 +6221,8 @@ class Operation(msrest.serialization.Model): 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, + 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, + 'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__( @@ -5468,6 +6232,8 @@ def __init__( display: Optional["OperationDisplay"] = None, origin: Optional[str] = None, properties: Optional["Properties"] = None, + is_data_action: Optional[bool] = None, + action_type: Optional[Union[str, "ActionType"]] = None, **kwargs ): super(Operation, self).__init__(**kwargs) @@ -5475,6 +6241,8 @@ def __init__( self.display = display self.origin = origin self.properties = properties + self.is_data_action = is_data_action + self.action_type = action_type class OperationCollection(msrest.serialization.Model): @@ -5571,7 +6339,7 @@ def __init__( self.labels = labels -class PngFormat(Format): +class PngFormat(ImageFormat): """Describes the settings for producing PNG thumbnails. All required parameters must be populated in order to send to Azure. @@ -5586,8 +6354,9 @@ class PngFormat(Format): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str """ @@ -5661,7 +6430,7 @@ class PngImage(Image): stop at the end of the stream. :type range: str :param layers: A collection of output PNG image layers to be produced by the encoder. - :type layers: list[~azure.mgmt.media.models.Layer] + :type layers: list[~azure.mgmt.media.models.PngLayer] """ _validation = { @@ -5678,7 +6447,7 @@ class PngImage(Image): 'start': {'key': 'start', 'type': 'str'}, 'step': {'key': 'step', 'type': 'str'}, 'range': {'key': 'range', 'type': 'str'}, - 'layers': {'key': 'layers', 'type': '[Layer]'}, + 'layers': {'key': 'layers', 'type': '[PngLayer]'}, } def __init__( @@ -5691,7 +6460,7 @@ def __init__( sync_mode: Optional[Union[str, "VideoSyncMode"]] = None, step: Optional[str] = None, range: Optional[str] = None, - layers: Optional[List["Layer"]] = None, + layers: Optional[List["PngLayer"]] = None, **kwargs ): super(PngImage, self).__init__(label=label, key_frame_interval=key_frame_interval, stretch_mode=stretch_mode, sync_mode=sync_mode, start=start, step=step, range=range, **kwargs) @@ -6044,40 +6813,6 @@ def __init__( self.provider_name = provider_name -class ProxyResource(Resource): - """The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar id: Fully qualified resource ID for the resource. Ex - - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. - :vartype id: str - :ivar name: The name of the resource. - :vartype name: str - :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or - "Microsoft.Storage/storageAccounts". - :vartype type: str - """ - - _validation = { - 'id': {'readonly': True}, - 'name': {'readonly': True}, - 'type': {'readonly': True}, - } - - _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ProxyResource, self).__init__(**kwargs) - - class Rectangle(msrest.serialization.Model): """Describes the properties of a rectangular window applied to the input media before processing it. @@ -6118,6 +6853,210 @@ def __init__( self.height = height +class SelectAudioTrackByAttribute(AudioTrackDescriptor): + """Select audio tracks from the input by specifying an attribute and an attribute filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param channel_mapping: Optional designation for single channel audio tracks. Can be used to + combine the tracks into stereo or multi-channel audio tracks. Possible values include: + "FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight", + "StereoLeft", "StereoRight". + :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping + :param attribute: Required. The TrackAttribute to filter the tracks by. Possible values + include: "Bitrate", "Language". + :type attribute: str or ~azure.mgmt.media.models.TrackAttribute + :param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to + select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals". + :type filter: str or ~azure.mgmt.media.models.AttributeFilter + :param filter_value: The value to filter the tracks by. Only used when + AttributeFilter.ValueEquals is specified for the Filter property. + :type filter_value: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'attribute': {'required': True}, + 'filter': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, + 'attribute': {'key': 'attribute', 'type': 'str'}, + 'filter': {'key': 'filter', 'type': 'str'}, + 'filter_value': {'key': 'filterValue', 'type': 'str'}, + } + + def __init__( + self, + *, + attribute: Union[str, "TrackAttribute"], + filter: Union[str, "AttributeFilter"], + channel_mapping: Optional[Union[str, "ChannelMapping"]] = None, + filter_value: Optional[str] = None, + **kwargs + ): + super(SelectAudioTrackByAttribute, self).__init__(channel_mapping=channel_mapping, **kwargs) + self.odata_type = '#Microsoft.Media.SelectAudioTrackByAttribute' # type: str + self.attribute = attribute + self.filter = filter + self.filter_value = filter_value + + +class SelectAudioTrackById(AudioTrackDescriptor): + """Select audio tracks from the input by specifying a track identifier. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param channel_mapping: Optional designation for single channel audio tracks. Can be used to + combine the tracks into stereo or multi-channel audio tracks. Possible values include: + "FrontLeft", "FrontRight", "Center", "LowFrequencyEffects", "BackLeft", "BackRight", + "StereoLeft", "StereoRight". + :type channel_mapping: str or ~azure.mgmt.media.models.ChannelMapping + :param track_id: Required. Track identifier to select. + :type track_id: long + """ + + _validation = { + 'odata_type': {'required': True}, + 'track_id': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'channel_mapping': {'key': 'channelMapping', 'type': 'str'}, + 'track_id': {'key': 'trackId', 'type': 'long'}, + } + + def __init__( + self, + *, + track_id: int, + channel_mapping: Optional[Union[str, "ChannelMapping"]] = None, + **kwargs + ): + super(SelectAudioTrackById, self).__init__(channel_mapping=channel_mapping, **kwargs) + self.odata_type = '#Microsoft.Media.SelectAudioTrackById' # type: str + self.track_id = track_id + + +class VideoTrackDescriptor(TrackDescriptor): + """A TrackSelection to select video tracks. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SelectVideoTrackByAttribute, SelectVideoTrackById. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Media.SelectVideoTrackByAttribute': 'SelectVideoTrackByAttribute', '#Microsoft.Media.SelectVideoTrackById': 'SelectVideoTrackById'} + } + + def __init__( + self, + **kwargs + ): + super(VideoTrackDescriptor, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.VideoTrackDescriptor' # type: str + + +class SelectVideoTrackByAttribute(VideoTrackDescriptor): + """Select video tracks from the input by specifying an attribute and an attribute filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param attribute: Required. The TrackAttribute to filter the tracks by. Possible values + include: "Bitrate", "Language". + :type attribute: str or ~azure.mgmt.media.models.TrackAttribute + :param filter: Required. The type of AttributeFilter to apply to the TrackAttribute in order to + select the tracks. Possible values include: "All", "Top", "Bottom", "ValueEquals". + :type filter: str or ~azure.mgmt.media.models.AttributeFilter + :param filter_value: The value to filter the tracks by. Only used when + AttributeFilter.ValueEquals is specified for the Filter property. For TrackAttribute.Bitrate, + this should be an integer value in bits per second (e.g: '1500000'). The + TrackAttribute.Language is not supported for video tracks. + :type filter_value: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'attribute': {'required': True}, + 'filter': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'attribute': {'key': 'attribute', 'type': 'str'}, + 'filter': {'key': 'filter', 'type': 'str'}, + 'filter_value': {'key': 'filterValue', 'type': 'str'}, + } + + def __init__( + self, + *, + attribute: Union[str, "TrackAttribute"], + filter: Union[str, "AttributeFilter"], + filter_value: Optional[str] = None, + **kwargs + ): + super(SelectVideoTrackByAttribute, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.SelectVideoTrackByAttribute' # type: str + self.attribute = attribute + self.filter = filter + self.filter_value = filter_value + + +class SelectVideoTrackById(VideoTrackDescriptor): + """Select video tracks from the input by specifying a track identifier. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. The discriminator for derived types.Constant filled by server. + :type odata_type: str + :param track_id: Required. Track identifier to select. + :type track_id: long + """ + + _validation = { + 'odata_type': {'required': True}, + 'track_id': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'track_id': {'key': 'trackId', 'type': 'long'}, + } + + def __init__( + self, + *, + track_id: int, + **kwargs + ): + super(SelectVideoTrackById, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Media.SelectVideoTrackById' # type: str + self.track_id = track_id + + class ServiceSpecification(msrest.serialization.Model): """The service metric specifications. @@ -6274,6 +7213,8 @@ class StreamingEndpoint(TrackedResource): :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param description: The streaming endpoint description. :type description: str :param scale_units: The number of scale units. Use the Scale operation to adjust this value. @@ -6314,6 +7255,7 @@ class StreamingEndpoint(TrackedResource): 'name': {'readonly': True}, 'type': {'readonly': True}, 'location': {'required': True}, + 'system_data': {'readonly': True}, 'host_name': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'resource_state': {'readonly': True}, @@ -6328,6 +7270,7 @@ class StreamingEndpoint(TrackedResource): 'type': {'key': 'type', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'scale_units': {'key': 'properties.scaleUnits', 'type': 'int'}, 'availability_set_name': {'key': 'properties.availabilitySetName', 'type': 'str'}, @@ -6364,6 +7307,7 @@ def __init__( **kwargs ): super(StreamingEndpoint, self).__init__(tags=tags, location=location, **kwargs) + self.system_data = None self.description = description self.scale_units = scale_units self.availability_set_name = availability_set_name @@ -6461,7 +7405,7 @@ def __init__( self.scale_unit = scale_unit -class StreamingLocator(Resource): +class StreamingLocator(ProxyResource): """A Streaming Locator resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -6474,6 +7418,8 @@ class StreamingLocator(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :param asset_name: Asset Name. :type asset_name: str :ivar created: The creation time of the Streaming Locator. @@ -6505,6 +7451,7 @@ class StreamingLocator(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, } @@ -6512,6 +7459,7 @@ class StreamingLocator(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'asset_name': {'key': 'properties.assetName', 'type': 'str'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'}, @@ -6539,6 +7487,7 @@ def __init__( **kwargs ): super(StreamingLocator, self).__init__(**kwargs) + self.system_data = None self.asset_name = asset_name self.created = None self.start_time = start_time @@ -6674,7 +7623,7 @@ def __init__( self.paths = paths -class StreamingPolicy(Resource): +class StreamingPolicy(ProxyResource): """A Streaming Policy resource. Variables are only populated by the server, and will be ignored when sending a request. @@ -6687,6 +7636,8 @@ class StreamingPolicy(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar created: Creation time of Streaming Policy. :vartype created: ~datetime.datetime :param default_content_key_policy_name: Default ContentKey used by current Streaming Policy. @@ -6705,6 +7656,7 @@ class StreamingPolicy(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, } @@ -6712,6 +7664,7 @@ class StreamingPolicy(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'default_content_key_policy_name': {'key': 'properties.defaultContentKeyPolicyName', 'type': 'str'}, 'envelope_encryption': {'key': 'properties.envelopeEncryption', 'type': 'EnvelopeEncryption'}, @@ -6731,6 +7684,7 @@ def __init__( **kwargs ): super(StreamingPolicy, self).__init__(**kwargs) + self.system_data = None self.created = None self.default_content_key_policy_name = default_content_key_policy_name self.envelope_encryption = envelope_encryption @@ -6941,6 +7895,54 @@ def __init__( self.id = id +class SystemData(msrest.serialization.Model): + """Metadata pertaining to creation and last modification of the resource. + + :param created_by: The identity that created the resource. + :type created_by: str + :param created_by_type: The type of identity that created the resource. Possible values + include: "User", "Application", "ManagedIdentity", "Key". + :type created_by_type: str or ~azure.mgmt.media.models.CreatedByType + :param created_at: The timestamp of resource creation (UTC). + :type created_at: ~datetime.datetime + :param last_modified_by: The identity that last modified the resource. + :type last_modified_by: str + :param last_modified_by_type: The type of identity that last modified the resource. Possible + values include: "User", "Application", "ManagedIdentity", "Key". + :type last_modified_by_type: str or ~azure.mgmt.media.models.CreatedByType + :param last_modified_at: The timestamp of resource last modification (UTC). + :type last_modified_at: ~datetime.datetime + """ + + _attribute_map = { + 'created_by': {'key': 'createdBy', 'type': 'str'}, + 'created_by_type': {'key': 'createdByType', 'type': 'str'}, + 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, + 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, + 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, + 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, + } + + def __init__( + self, + *, + created_by: Optional[str] = None, + created_by_type: Optional[Union[str, "CreatedByType"]] = None, + created_at: Optional[datetime.datetime] = None, + last_modified_by: Optional[str] = None, + last_modified_by_type: Optional[Union[str, "CreatedByType"]] = None, + last_modified_at: Optional[datetime.datetime] = None, + **kwargs + ): + super(SystemData, self).__init__(**kwargs) + self.created_by = created_by + self.created_by_type = created_by_type + self.created_at = created_at + self.last_modified_by = last_modified_by + self.last_modified_by_type = last_modified_by_type + self.last_modified_at = last_modified_at + + class TrackPropertyCondition(msrest.serialization.Model): """Class to specify one track property condition. @@ -7002,7 +8004,7 @@ def __init__( self.track_selections = track_selections -class Transform(Resource): +class Transform(ProxyResource): """A Transform encapsulates the rules or instructions for generating desired outputs from input media, such as by transcoding or by extracting insights. After the Transform is created, it can be applied to input media by creating Jobs. Variables are only populated by the server, and will be ignored when sending a request. @@ -7015,13 +8017,15 @@ class Transform(Resource): :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str + :ivar system_data: The system metadata relating to this resource. + :vartype system_data: ~azure.mgmt.media.models.SystemData :ivar created: The UTC date and time when the Transform was created, in 'YYYY-MM-DDThh:mm:ssZ' format. :vartype created: ~datetime.datetime :param description: An optional verbose description of the Transform. :type description: str - :ivar last_modified: The UTC date and time when the Transform was last updated, in 'YYYY-MM- - DDThh:mm:ssZ' format. + :ivar last_modified: The UTC date and time when the Transform was last updated, in + 'YYYY-MM-DDThh:mm:ssZ' format. :vartype last_modified: ~datetime.datetime :param outputs: An array of one or more TransformOutputs that the Transform should generate. :type outputs: list[~azure.mgmt.media.models.TransformOutput] @@ -7031,6 +8035,7 @@ class Transform(Resource): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, + 'system_data': {'readonly': True}, 'created': {'readonly': True}, 'last_modified': {'readonly': True}, } @@ -7039,6 +8044,7 @@ class Transform(Resource): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, + 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'created': {'key': 'properties.created', 'type': 'iso-8601'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'last_modified': {'key': 'properties.lastModified', 'type': 'iso-8601'}, @@ -7053,6 +8059,7 @@ def __init__( **kwargs ): super(Transform, self).__init__(**kwargs) + self.system_data = None self.created = None self.description = description self.last_modified = None @@ -7145,8 +8152,9 @@ class TransportStreamFormat(MultiBitrateFormat): the base name is truncated to the first 32 characters in total length. {Extension} - The appropriate extension for this format. {Label} - The label assigned to the codec/layer. {Index} - A unique index for thumbnails. Only applicable to thumbnails. {Bitrate} - The audio/video - bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. Any - unsubstituted macros will be collapsed and removed from the filename. + bitrate. Not applicable to thumbnails. {Codec} - The type of the audio/video codec. + {Resolution} - The video resolution. Any unsubstituted macros will be collapsed and removed + from the filename. :type filename_pattern: str :param output_files: The list of output files to produce. Each entry in the list is a set of audio and video layer labels to be muxed together . @@ -7222,8 +8230,8 @@ class VideoAnalyzerPreset(AudioAnalyzerPreset): language detected and process with the selected language for the duration of the file. It does not currently support dynamically switching between languages after the first language is detected. The automatic detection works best with audio recordings with clearly discernable - speech. If automatic detection fails to find the language, transcription would fallback to 'en- - US'." The list of supported languages is available here: + speech. If automatic detection fails to find the language, transcription would fallback to + 'en-US'." The list of supported languages is available here: https://go.microsoft.com/fwlink/?linkid=2109463. :type audio_language: str :param mode: Determines the set of audio analysis operations to be performed. If unspecified, @@ -7277,8 +8285,8 @@ class VideoOverlay(Overlay): :param odata_type: Required. The discriminator for derived types.Constant filled by server. :type odata_type: str :param input_label: Required. The label of the job input which is to be used as an overlay. The - Input must specify exactly one file. You can specify an image file in JPG or PNG formats, or an - audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See + Input must specify exactly one file. You can specify an image file in JPG, PNG, GIF or BMP + format, or an audio file (such as a WAV, MP3, WMA or M4A file), or a video file. See https://aka.ms/mesformats for the complete list of supported audio and video file formats. :type input_label: str :param start: The start position, with reference to the input video, at which the overlay diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py index 1a57ef87a8b4..3dd75bc20ead 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_account_filters_operations.py @@ -112,7 +112,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -178,7 +178,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -254,7 +254,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -324,7 +324,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -395,7 +395,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AccountFilter', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py index 574575db6c32..be11b037d0f3 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_asset_filters_operations.py @@ -116,7 +116,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -186,7 +186,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -266,7 +266,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -340,7 +340,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -415,7 +415,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AssetFilter', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py index 8f60c1f59810..cfc302f54b4c 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_assets_operations.py @@ -129,7 +129,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -195,7 +195,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -271,7 +271,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -341,7 +341,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -412,7 +412,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Asset', pipeline_response) @@ -487,7 +487,7 @@ def list_container_sas( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('AssetContainerSas', pipeline_response) @@ -554,7 +554,7 @@ def get_encryption_key( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('StorageEncryptedAssetDecryptionData', pipeline_response) @@ -620,7 +620,7 @@ def list_streaming_locators( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ListStreamingLocatorsResponse', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py index 4ea66ed93208..56c283947af5 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_content_key_policies_operations.py @@ -129,7 +129,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -195,7 +195,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -271,7 +271,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -341,7 +341,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -412,7 +412,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ContentKeyPolicy', pipeline_response) @@ -478,7 +478,7 @@ def get_policy_properties_with_secrets( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py index 53e6ba0280f8..2b1d4e6c2da1 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_jobs_operations.py @@ -126,7 +126,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -196,7 +196,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -276,7 +276,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Job', pipeline_response) @@ -346,7 +346,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -423,7 +423,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Job', pipeline_response) @@ -493,7 +493,7 @@ def cancel_job( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py index 0366bf5a3dfd..4be8b2e64b96 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_events_operations.py @@ -114,7 +114,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -180,7 +180,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -242,7 +242,7 @@ def _create_initial( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -284,8 +284,8 @@ def begin_create( :type auto_start: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either LiveEvent or the result of cls(response) @@ -387,7 +387,7 @@ def _update_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -423,8 +423,8 @@ def begin_update( :type parameters: ~azure.mgmt.media.models.LiveEvent :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either LiveEvent or the result of cls(response) @@ -519,7 +519,7 @@ def _delete_initial( if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -547,8 +547,8 @@ def begin_delete( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -639,7 +639,7 @@ def _allocate_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -667,8 +667,8 @@ def begin_allocate( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -759,7 +759,7 @@ def _start_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -788,8 +788,8 @@ def begin_start( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -886,7 +886,7 @@ def _stop_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -917,8 +917,8 @@ def begin_stop( :type parameters: ~azure.mgmt.media.models.LiveEventActionInput :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -1010,7 +1010,7 @@ def _reset_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -1040,8 +1040,8 @@ def begin_reset( :type live_event_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py index 2fa7656a3777..821b06a0fac9 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_live_outputs_operations.py @@ -118,7 +118,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -188,7 +188,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -249,7 +249,7 @@ def _create_initial( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -290,8 +290,8 @@ def begin_create( :type parameters: ~azure.mgmt.media.models.LiveOutput :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either LiveOutput or the result of cls(response) @@ -390,7 +390,7 @@ def _delete_initial( if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -422,8 +422,8 @@ def begin_delete( :type live_output_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py index cb328a32ae74..6d887d5f4281 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_locations_operations.py @@ -69,7 +69,7 @@ def check_name_availability( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -99,7 +99,7 @@ def check_name_availability( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('EntityNameAvailabilityCheckOutput', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py index 1c0e617f50ec..8282848c691b 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_mediaservices_operations.py @@ -67,7 +67,7 @@ def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): @@ -108,7 +108,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -144,7 +144,7 @@ def get( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -170,7 +170,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MediaService', pipeline_response) @@ -209,7 +209,7 @@ def create_or_update( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -240,7 +240,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -280,7 +280,7 @@ def delete( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -306,7 +306,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -318,7 +318,7 @@ def update( self, resource_group_name, # type: str account_name, # type: str - parameters, # type: "_models.MediaService" + parameters, # type: "_models.MediaServiceUpdate" **kwargs # type: Any ): # type: (...) -> "_models.MediaService" @@ -331,7 +331,7 @@ def update( :param account_name: The Media Services account name. :type account_name: str :param parameters: The request parameters. - :type parameters: ~azure.mgmt.media.models.MediaService + :type parameters: ~azure.mgmt.media.models.MediaServiceUpdate :keyword callable cls: A custom type or function that will be passed the direct response :return: MediaService, or the result of cls(response) :rtype: ~azure.mgmt.media.models.MediaService @@ -342,7 +342,7 @@ def update( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -365,7 +365,7 @@ def update( header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(parameters, 'MediaService') + body_content = self._serialize.body(parameters, 'MediaServiceUpdate') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) @@ -373,7 +373,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('MediaService', pipeline_response) @@ -413,7 +413,7 @@ def sync_storage_keys( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -444,7 +444,7 @@ def sync_storage_keys( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -480,7 +480,7 @@ def list_edge_policies( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -511,7 +511,7 @@ def list_edge_policies( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('EdgePolicies', pipeline_response) @@ -541,7 +541,7 @@ def list_by_subscription( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): @@ -581,7 +581,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -591,61 +591,3 @@ def get_next(next_link=None): get_next, extract_data ) list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Media/mediaservices'} # type: ignore - - def get_by_subscription( - self, - account_name, # type: str - **kwargs # type: Any - ): - # type: (...) -> "_models.MediaService" - """Get a Media Services account. - - Get the details of a Media Services account. - - :param account_name: The Media Services account name. - :type account_name: str - :keyword callable cls: A custom type or function that will be passed the direct response - :return: MediaService, or the result of cls(response) - :rtype: ~azure.mgmt.media.models.MediaService - :raises: ~azure.core.exceptions.HttpResponseError - """ - cls = kwargs.pop('cls', None) # type: ClsType["_models.MediaService"] - error_map = { - 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError - } - error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" - accept = "application/json" - - # Construct URL - url = self.get_by_subscription.metadata['url'] # type: ignore - path_format_arguments = { - 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str'), - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - - # Construct headers - header_parameters = {} # type: Dict[str, Any] - header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') - - request = self._client.get(url, query_parameters, header_parameters) - pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) - response = pipeline_response.http_response - - if response.status_code not in [200]: - map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) - raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) - - deserialized = self._deserialize('MediaService', pipeline_response) - - if cls: - return cls(pipeline_response, deserialized, {}) - - return deserialized - get_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Media/mediaservices/{accountName}'} # type: ignore diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py index 3454bbb9a7cb..8ec9da80fcc3 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_operations.py @@ -64,7 +64,7 @@ def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" def prepare_request(next_link=None): @@ -100,7 +100,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py index c70a58def0a7..ec9330ad1028 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_endpoint_connections_operations.py @@ -69,7 +69,7 @@ def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -95,7 +95,7 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response) @@ -134,7 +134,7 @@ def get( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -161,7 +161,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) @@ -203,7 +203,7 @@ def create_or_update( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json" @@ -235,7 +235,7 @@ def create_or_update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response) @@ -274,7 +274,7 @@ def delete( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -301,7 +301,7 @@ def delete( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py index c8e683efb25b..8669e2971e98 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_private_link_resources_operations.py @@ -69,7 +69,7 @@ def list( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -95,7 +95,7 @@ def list( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateLinkResourceListResult', pipeline_response) @@ -134,7 +134,7 @@ def get( 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) - api_version = "2020-05-01" + api_version = "2021-05-01" accept = "application/json" # Construct URL @@ -161,7 +161,7 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('PrivateLinkResource', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py index 9b369800a00c..609086c507b9 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_endpoints_operations.py @@ -114,7 +114,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -180,7 +180,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -242,7 +242,7 @@ def _create_initial( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -284,8 +284,8 @@ def begin_create( :type auto_start: bool :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either StreamingEndpoint or the result of cls(response) @@ -387,7 +387,7 @@ def _update_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -425,8 +425,8 @@ def begin_update( :type parameters: ~azure.mgmt.media.models.StreamingEndpoint :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either StreamingEndpoint or the result of cls(response) @@ -521,7 +521,7 @@ def _delete_initial( if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -549,8 +549,8 @@ def begin_delete( :type streaming_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -641,7 +641,7 @@ def _start_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -669,8 +669,8 @@ def begin_start( :type streaming_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -761,7 +761,7 @@ def _stop_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -789,8 +789,8 @@ def begin_stop( :type streaming_endpoint_name: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) @@ -887,7 +887,7 @@ def _scale_initial( if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -918,8 +918,8 @@ def begin_scale( :type parameters: ~azure.mgmt.media.models.StreamingEntityScaleUnit :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. - :keyword polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy + :keyword polling: Pass in True if you'd like the ARMPolling polling method, + False for no polling, or your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py index 2ebb93a94f9a..bad3f104c951 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_locators_operations.py @@ -129,7 +129,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -195,7 +195,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -271,7 +271,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('StreamingLocator', pipeline_response) @@ -337,7 +337,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -400,7 +400,7 @@ def list_content_keys( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ListContentKeysResponse', pipeline_response) @@ -466,7 +466,7 @@ def list_paths( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('ListPathsResponse', pipeline_response) diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py index a15787a9b1c7..10553bbe29d3 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_streaming_policies_operations.py @@ -129,7 +129,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -195,7 +195,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -271,7 +271,7 @@ def create( if response.status_code not in [201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('StreamingPolicy', pipeline_response) @@ -337,7 +337,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: diff --git a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py index b3cf7cf1a7f9..365c14892355 100644 --- a/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py +++ b/sdk/media/azure-mgmt-media/azure/mgmt/media/operations/_transforms_operations.py @@ -122,7 +122,7 @@ def get_next(next_link=None): response = pipeline_response.http_response if response.status_code not in [200]: - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) @@ -188,7 +188,7 @@ def get( if response.status_code not in [200, 404]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = None @@ -264,7 +264,7 @@ def create_or_update( if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if response.status_code == 200: @@ -334,7 +334,7 @@ def delete( if response.status_code not in [200, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) if cls: @@ -405,7 +405,7 @@ def update( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - error = self._deserialize(_models.ApiError, response) + error = self._deserialize.failsafe_deserialize(_models.ApiError, response) raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat) deserialized = self._deserialize('Transform', pipeline_response)