From 146bc864c8917f62ebe8325a1098d0b179a5069c Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Mon, 18 May 2020 09:26:51 -0700 Subject: [PATCH 01/28] [formrecognizer] consistency on handling LRO's with failed status (#11445) * samples handle invalid models from training * update to throw exception on training methods that return invalid model * update tests now that we treat invalid status differently * pass response to error --- .../azure-ai-formrecognizer/CHANGELOG.md | 1 + .../formrecognizer/_form_training_client.py | 2 ++ .../azure/ai/formrecognizer/_polling.py | 21 +++++++++++++------ .../aio/_form_training_client_async.py | 2 ++ .../sample_get_bounding_boxes_async.py | 4 +--- .../sample_train_model_with_labels_async.py | 5 ++--- .../samples/sample_get_bounding_boxes.py | 2 -- .../samples/sample_train_model_with_labels.py | 2 -- .../tests/test_training.py | 8 +++---- .../tests/test_training_async.py | 6 +++--- 10 files changed, 30 insertions(+), 23 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md index b0cbb94555ab..49d598ad0cbd 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md @@ -9,6 +9,7 @@ - `list_model_infos` method has been renamed to `list_custom_models` - Removed `get_form_training_client` from `FormRecognizerClient` - Added `get_form_recognizer_client` to `FormTrainingClient` +- A `HttpResponseError` is now raised if a model with `status=="invalid"` is returned from the `begin_train_model()` or `train_model()` methods **New features** diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py index 638fc6a72c83..ffca7b40c457 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py @@ -101,6 +101,8 @@ def begin_train_model(self, training_files_url, use_training_labels=False, **kwa object to return a :class:`~azure.ai.formrecognizer.CustomFormModel`. :rtype: ~azure.core.polling.LROPoller[~azure.ai.formrecognizer.CustomFormModel] :raises ~azure.core.exceptions.HttpResponseError: + Note that if the training fails, the exception is raised, but a model with an + "invalid" status is still created. You can delete this model by calling :func:`~delete_model()` .. admonition:: Example: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py index 15785d0aae18..07717e108c8e 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py @@ -17,6 +17,12 @@ from azure.core.pipeline import PipelineResponse +def raise_error(response, errors, message): + for err in errors: + message += "({}) {}\n".format(err["code"], err["message"]) + raise HttpResponseError(message=message, response=response) + + class TrainingPolling(LocationPolling): """Polling method overrides for training endpoints. """ @@ -40,6 +46,14 @@ def get_status(self, pipeline_response): # pylint: disable=no-self-use status = body['modelInfo']['status'] if not status: raise BadResponse("No status found in body") + if status.lower() == "invalid": + train_result = body.get('trainResult') + if train_result: + errors = train_result.get("errors") + if errors: + message = "Invalid model created with ID={}\n".format(body["modelInfo"]["modelId"]) + raise_error(response, errors, message) + return "Failed" if status.lower() != "creating": return "Succeeded" @@ -50,8 +64,6 @@ def get_status(self, pipeline_response): # pylint: disable=no-self-use class AnalyzePolling(OperationResourcePolling): """Polling method overrides for custom analyze endpoints. - - :param str operation_location_header: Name of the header to return operation format (default 'operation-location') """ def get_status(self, pipeline_response): # pylint: disable=no-self-use @@ -78,8 +90,5 @@ def get_status(self, pipeline_response): # pylint: disable=no-self-use if analyze_result: errors = analyze_result.get("errors") if errors: - message = "" - for err in errors: - message += "({}) {}\n".format(err.get("code"), err.get("message")) - raise HttpResponseError(message) + raise_error(response, errors, message="") return status diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py index fb93c382a61f..54ef2982ffdc 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py @@ -109,6 +109,8 @@ async def train_model( :return: CustomFormModel :rtype: ~azure.ai.formrecognizer.CustomFormModel :raises ~azure.core.exceptions.HttpResponseError: + Note that if the training fails, the exception is raised, but a model with an + "invalid" status is still created. You can delete this model by calling :func:`~delete_model()` .. admonition:: Example: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py index e627b169b639..5c7b80d05f1b 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py @@ -41,16 +41,14 @@ async def get_bounding_boxes(self): # the sample forms are located in this file's parent's parent's files. path_to_sample_forms = Path(__file__).parent.parent.absolute() / Path("sample_forms/forms/Form_1.jpg") from azure.ai.formrecognizer import FormWord, FormLine - # [START create_form_recognizer_client_async] from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient form_recognizer_client = FormRecognizerClient( endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) - # [END create_form_recognizer_client_async] - async with form_recognizer_client: + async with form_recognizer_client: # Make sure your form's type is included in the list of form types the custom model can recognize with open(path_to_sample_forms, "rb") as f: forms = await form_recognizer_client.recognize_custom_forms( diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_with_labels_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_with_labels_async.py index 75bf11aff14c..9205260da05b 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_with_labels_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_with_labels_async.py @@ -36,17 +36,16 @@ class TrainModelWithLabelsSampleAsync(object): container_sas_url = os.environ["CONTAINER_SAS_URL"] async def train_model_with_labels(self): - # [START create_form_training_client_async] from azure.ai.formrecognizer.aio import FormTrainingClient from azure.core.credentials import AzureKeyCredential form_training_client = FormTrainingClient( endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) - # [END create_form_training_client_async] - async with form_training_client: + async with form_training_client: model = await form_training_client.train_model(self.container_sas_url, use_training_labels=True) + # Custom model information print("Model ID: {}".format(model.model_id)) print("Status: {}".format(model.status)) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py index 070a27aecb45..0924014c7bdc 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py @@ -37,13 +37,11 @@ class GetBoundingBoxesSample(object): def get_bounding_boxes(self): from azure.ai.formrecognizer import FormWord, FormLine - # [START create_form_recognizer_client] from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer import FormRecognizerClient form_recognizer_client = FormRecognizerClient( endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) - # [END create_form_recognizer_client] # Make sure your form's type is included in the list of form types the custom model can recognize with open("sample_forms/forms/Form_1.jpg", "rb") as f: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_train_model_with_labels.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_train_model_with_labels.py index 3ffd0789c088..d550d8588ccc 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_train_model_with_labels.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_train_model_with_labels.py @@ -35,12 +35,10 @@ class TrainModelWithLabelsSample(object): container_sas_url = os.environ["CONTAINER_SAS_URL"] def train_model_with_labels(self): - # [START create_form_training_client] from azure.ai.formrecognizer import FormTrainingClient from azure.core.credentials import AzureKeyCredential form_training_client = FormTrainingClient(self.endpoint, AzureKeyCredential(self.key)) - # [END create_form_training_client] poller = form_training_client.begin_train_model(self.container_sas_url, use_training_labels=True) model = poller.result() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training.py index a68a70513dda..87a273367f53 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training.py @@ -6,7 +6,7 @@ import functools from azure.core.credentials import AzureKeyCredential -from azure.core.exceptions import ClientAuthenticationError +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError from azure.ai.formrecognizer._generated.models import Model from azure.ai.formrecognizer._models import CustomFormModel from azure.ai.formrecognizer import FormTrainingClient @@ -209,6 +209,6 @@ def test_training_with_files_filter(self, client, container_sas_url): self.assertEqual(len(model.training_documents), 1) self.assertEqual(model.training_documents[0].document_name, "subfolder/Form_6.jpg") # we filtered for only subfolders - poller = client.begin_train_model(training_files_url=container_sas_url, prefix="xxx") - model = poller.result() - self.assertEqual(model.status, "invalid") # prefix doesn't include any files so training fails + with self.assertRaises(HttpResponseError): + poller = client.begin_train_model(training_files_url=container_sas_url, prefix="xxx") + model = poller.result() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training_async.py index 32f3af6b28e7..22c4dc09ee48 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_training_async.py @@ -6,7 +6,7 @@ import functools from azure.core.credentials import AzureKeyCredential -from azure.core.exceptions import ClientAuthenticationError +from azure.core.exceptions import ClientAuthenticationError, HttpResponseError from azure.ai.formrecognizer._generated.models import Model from azure.ai.formrecognizer._models import CustomFormModel from azure.ai.formrecognizer.aio import FormTrainingClient @@ -197,5 +197,5 @@ async def test_training_with_files_filter(self, client, container_sas_url): self.assertEqual(len(model.training_documents), 1) self.assertEqual(model.training_documents[0].document_name, "subfolder/Form_6.jpg") # we filtered for only subfolders - model = await client.train_model(training_files_url=container_sas_url, prefix="xxx") - self.assertEqual(model.status, "invalid") # prefix doesn't include any files so training fails + with self.assertRaises(HttpResponseError): + model = await client.train_model(training_files_url=container_sas_url, prefix="xxx") From 74f4fd3b44c1e39edb61d65debdcd1d7d28b3deb Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Mon, 18 May 2020 10:08:59 -0700 Subject: [PATCH 02/28] Separate modules for client credential types (#11496) --- .../azure/identity/_credentials/__init__.py | 3 +- .../identity/_credentials/certificate.py | 52 ++++++++++++++++ ...{client_credential.py => client_secret.py} | 44 +------------- .../identity/_credentials/environment.py | 3 +- .../identity/aio/_credentials/__init__.py | 3 +- .../{client_credential.py => certificate.py} | 50 +--------------- .../aio/_credentials/client_secret.py | 59 +++++++++++++++++++ .../identity/aio/_credentials/environment.py | 3 +- 8 files changed, 123 insertions(+), 94 deletions(-) create mode 100644 sdk/identity/azure-identity/azure/identity/_credentials/certificate.py rename sdk/identity/azure-identity/azure/identity/_credentials/{client_credential.py => client_secret.py} (52%) rename sdk/identity/azure-identity/azure/identity/aio/_credentials/{client_credential.py => certificate.py} (53%) create mode 100644 sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/__init__.py b/sdk/identity/azure-identity/azure/identity/_credentials/__init__.py index 88c94d65bd60..cd0068cc3cd8 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/__init__.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/__init__.py @@ -4,8 +4,9 @@ # ------------------------------------ from .authorization_code import AuthorizationCodeCredential from .browser import InteractiveBrowserCredential +from .certificate import CertificateCredential from .chained import ChainedTokenCredential -from .client_credential import CertificateCredential, ClientSecretCredential +from .client_secret import ClientSecretCredential from .default import DefaultAzureCredential from .environment import EnvironmentCredential from .managed_identity import ManagedIdentityCredential diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py b/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py new file mode 100644 index 000000000000..3ed84cd335a0 --- /dev/null +++ b/sdk/identity/azure-identity/azure/identity/_credentials/certificate.py @@ -0,0 +1,52 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import TYPE_CHECKING + +from .._authn_client import AuthnClient +from .._base import CertificateCredentialBase + +if TYPE_CHECKING: + from azure.core.credentials import AccessToken + from typing import Any + + +class CertificateCredential(CertificateCredentialBase): + """Authenticates as a service principal using a certificate. + + :param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID. + :param str client_id: the service principal's client ID + :param str certificate_path: path to a PEM-encoded certificate file including the private key. + + :keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com', + the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.KnownAuthorities` + defines authorities for other clouds. + :keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate + requires a different encoding, pass appropriately encoded bytes instead. + :paramtype password: str or bytes + """ + + def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument + # type: (*str, **Any) -> AccessToken + """Request an access token for `scopes`. + + .. note:: This method is called by Azure SDK clients. It isn't intended for use in application code. + + :param str scopes: desired scopes for the access token. This method requires at least one scope. + :rtype: :class:`azure.core.credentials.AccessToken` + :raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message`` + attribute gives a reason. Any error response from Azure Active Directory is available as the error's + ``response`` attribute. + """ + if not scopes: + raise ValueError("'get_token' requires at least one scope") + + token = self._client.get_cached_token(scopes) + if not token: + data = self._get_request_data(*scopes) + token = self._client.request_token(scopes, form_data=data) + return token + + def _get_auth_client(self, tenant_id, **kwargs): + return AuthnClient(tenant=tenant_id, **kwargs) diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/client_credential.py b/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py similarity index 52% rename from sdk/identity/azure-identity/azure/identity/_credentials/client_credential.py rename to sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py index 1ee646d80d1c..f8dbd45bce9c 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/client_credential.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/client_secret.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ from .._authn_client import AuthnClient -from .._base import ClientSecretCredentialBase, CertificateCredentialBase +from .._base import ClientSecretCredentialBase try: from typing import TYPE_CHECKING @@ -12,7 +12,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from typing import Any, Mapping + from typing import Any from azure.core.credentials import AccessToken @@ -53,43 +53,3 @@ def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument data = dict(self._form_data, scope=" ".join(scopes)) token = self._client.request_token(scopes, form_data=data) return token - - -class CertificateCredential(CertificateCredentialBase): - """Authenticates as a service principal using a certificate. - - :param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID. - :param str client_id: the service principal's client ID - :param str certificate_path: path to a PEM-encoded certificate file including the private key. - - :keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com', - the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.KnownAuthorities` - defines authorities for other clouds. - :keyword password: The certificate's password. If a unicode string, it will be encoded as UTF-8. If the certificate - requires a different encoding, pass appropriately encoded bytes instead. - :paramtype password: str or bytes - """ - - def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument - # type: (*str, **Any) -> AccessToken - """Request an access token for `scopes`. - - .. note:: This method is called by Azure SDK clients. It isn't intended for use in application code. - - :param str scopes: desired scopes for the access token. This method requires at least one scope. - :rtype: :class:`azure.core.credentials.AccessToken` - :raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message`` - attribute gives a reason. Any error response from Azure Active Directory is available as the error's - ``response`` attribute. - """ - if not scopes: - raise ValueError("'get_token' requires at least one scope") - - token = self._client.get_cached_token(scopes) - if not token: - data = self._get_request_data(*scopes) - token = self._client.request_token(scopes, form_data=data) - return token - - def _get_auth_client(self, tenant_id, **kwargs): - return AuthnClient(tenant=tenant_id, **kwargs) diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/environment.py b/sdk/identity/azure-identity/azure/identity/_credentials/environment.py index 3cea7422907f..dc37abca83c1 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/environment.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/environment.py @@ -6,7 +6,8 @@ from .. import CredentialUnavailableError from .._constants import EnvironmentVariables -from .client_credential import CertificateCredential, ClientSecretCredential +from .certificate import CertificateCredential +from .client_secret import ClientSecretCredential from .user_password import UsernamePasswordCredential diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/__init__.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/__init__.py index 5b06446c4775..cf63acbd84d6 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/__init__.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/__init__.py @@ -7,7 +7,8 @@ from .default import DefaultAzureCredential from .environment import EnvironmentCredential from .managed_identity import ManagedIdentityCredential -from .client_credential import CertificateCredential, ClientSecretCredential +from .certificate import CertificateCredential +from .client_secret import ClientSecretCredential from .shared_cache import SharedTokenCacheCredential from .azure_cli import AzureCliCredential diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_credential.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py similarity index 53% rename from sdk/identity/azure-identity/azure/identity/aio/_credentials/client_credential.py rename to sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py index 8801347f3c31..300728f221f1 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_credential.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/certificate.py @@ -6,59 +6,13 @@ from .base import AsyncCredentialBase from .._authn_client import AsyncAuthnClient -from ..._base import ClientSecretCredentialBase, CertificateCredentialBase +from ..._base import CertificateCredentialBase if TYPE_CHECKING: - from typing import Any, Mapping + from typing import Any from azure.core.credentials import AccessToken -class ClientSecretCredential(ClientSecretCredentialBase, AsyncCredentialBase): - """Authenticates as a service principal using a client ID and client secret. - - :param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID. - :param str client_id: the service principal's client ID - :param str client_secret: one of the service principal's client secrets - - :keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com', - the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.KnownAuthorities` - defines authorities for other clouds. - """ - - def __init__(self, tenant_id: str, client_id: str, client_secret: str, **kwargs: "Any") -> None: - super(ClientSecretCredential, self).__init__(tenant_id, client_id, client_secret, **kwargs) - self._client = AsyncAuthnClient(tenant=tenant_id, **kwargs) - - async def __aenter__(self): - await self._client.__aenter__() - return self - - async def close(self): - """Close the credential's transport session.""" - - await self._client.__aexit__() - - async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": # pylint:disable=unused-argument - """Asynchronously request an access token for `scopes`. - - .. note:: This method is called by Azure SDK clients. It isn't intended for use in application code. - - :param str scopes: desired scopes for the access token. This method requires at least one scope. - :rtype: :class:`azure.core.credentials.AccessToken` - :raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message`` - attribute gives a reason. Any error response from Azure Active Directory is available as the error's - ``response`` attribute. - """ - if not scopes: - raise ValueError("'get_token' requires at least one scope") - - token = self._client.get_cached_token(scopes) - if not token: - data = dict(self._form_data, scope=" ".join(scopes)) - token = await self._client.request_token(scopes, form_data=data) - return token # type: ignore - - class CertificateCredential(CertificateCredentialBase, AsyncCredentialBase): """Authenticates as a service principal using a certificate. diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py new file mode 100644 index 000000000000..60a51bb037e5 --- /dev/null +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/client_secret.py @@ -0,0 +1,59 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from typing import TYPE_CHECKING + +from .base import AsyncCredentialBase +from .._authn_client import AsyncAuthnClient +from ..._base import ClientSecretCredentialBase + +if TYPE_CHECKING: + from typing import Any + from azure.core.credentials import AccessToken + + +class ClientSecretCredential(ClientSecretCredentialBase, AsyncCredentialBase): + """Authenticates as a service principal using a client ID and client secret. + + :param str tenant_id: ID of the service principal's tenant. Also called its 'directory' ID. + :param str client_id: the service principal's client ID + :param str client_secret: one of the service principal's client secrets + + :keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com', + the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.KnownAuthorities` + defines authorities for other clouds. + """ + + def __init__(self, tenant_id: str, client_id: str, client_secret: str, **kwargs: "Any") -> None: + super(ClientSecretCredential, self).__init__(tenant_id, client_id, client_secret, **kwargs) + self._client = AsyncAuthnClient(tenant=tenant_id, **kwargs) + + async def __aenter__(self): + await self._client.__aenter__() + return self + + async def close(self): + """Close the credential's transport session.""" + + await self._client.__aexit__() + + async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken": # pylint:disable=unused-argument + """Asynchronously request an access token for `scopes`. + + .. note:: This method is called by Azure SDK clients. It isn't intended for use in application code. + + :param str scopes: desired scopes for the access token. This method requires at least one scope. + :rtype: :class:`azure.core.credentials.AccessToken` + :raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message`` + attribute gives a reason. Any error response from Azure Active Directory is available as the error's + ``response`` attribute. + """ + if not scopes: + raise ValueError("'get_token' requires at least one scope") + + token = self._client.get_cached_token(scopes) + if not token: + data = dict(self._form_data, scope=" ".join(scopes)) + token = await self._client.request_token(scopes, form_data=data) + return token # type: ignore diff --git a/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py b/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py index 84c3a7e45d63..7e1197d702c2 100644 --- a/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py +++ b/sdk/identity/azure-identity/azure/identity/aio/_credentials/environment.py @@ -7,7 +7,8 @@ from ... import CredentialUnavailableError from ..._constants import EnvironmentVariables -from .client_credential import CertificateCredential, ClientSecretCredential +from .certificate import CertificateCredential +from .client_secret import ClientSecretCredential from .base import AsyncCredentialBase if TYPE_CHECKING: From 99668db644fe606c24cc7cb553135b6614c10ffd Mon Sep 17 00:00:00 2001 From: "openapi-sdkautomation[bot]" <37845953+openapi-sdkautomation[bot]@users.noreply.github.com> Date: Mon, 18 May 2020 11:00:59 -0700 Subject: [PATCH 03/28] [ReleasePR azure-cognitiveservices-vision-computervision] [Cognitive Service Computer Vision] Create CV v3.0 API version swagger (#11464) * Generated from 6c2e36f271e8bd30f4ec2ba3c79890bd441feed2 Run Prettier script on new examples * ChangeLog * Udpate Readme Co-authored-by: SDK Automation Co-authored-by: Laurent Mazuel --- .../CHANGELOG.md | 23 ++ .../README.md | 23 +- .../computervision/_computer_vision_client.py | 2 +- .../vision/computervision/_configuration.py | 2 +- .../vision/computervision/models/__init__.py | 22 +- .../models/_computer_vision_client_enums.py | 27 +- .../vision/computervision/models/_models.py | 199 ++++++++------ .../computervision/models/_models_py3.py | 207 +++++++++------ .../_computer_vision_client_operations.py | 246 ++++-------------- .../vision/computervision/version.py | 2 +- .../sdk_packaging.toml | 1 + .../setup.py | 5 +- 12 files changed, 349 insertions(+), 410 deletions(-) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/CHANGELOG.md b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/CHANGELOG.md index 45c9901dbbd4..830deb5428e4 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/CHANGELOG.md +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/CHANGELOG.md @@ -1,5 +1,28 @@ # Release History +## 0.6.0 (2020-05-18) + +**Features** + + - Model Line has a new parameter language + - Added operation ComputerVisionClientOperationsMixin.read + - Added operation ComputerVisionClientOperationsMixin.get_read_result + - Added operation ComputerVisionClientOperationsMixin.read_in_stream + +**Breaking changes** + + - Parameter words of model Line is now required + - Parameter bounding_box of model Line is now required + - Parameter text of model Line is now required + - Parameter confidence of model Word is now required + - Removed operation ComputerVisionClientOperationsMixin.get_text_operation_result + - Removed operation ComputerVisionClientOperationsMixin.get_read_operation_result + - Removed operation ComputerVisionClientOperationsMixin.recognize_text_in_stream + - Removed operation ComputerVisionClientOperationsMixin.recognize_text + - Removed operation ComputerVisionClientOperationsMixin.batch_read_file + - Removed operation ComputerVisionClientOperationsMixin.batch_read_file_in_stream + - Model ReadOperationResult has a new signature + ## 0.5.0 (2019-10-01) **Features** diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/README.md b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/README.md index 0956f6eb55b0..8d793d5cf878 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/README.md +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/README.md @@ -183,21 +183,18 @@ for caption in analysis.captions: ### Get text from image -You can get any handwritten or printed text from an image. This requires two calls to the SDK: [`recognize_text`][ref_computervisionclient_recognize_text] and [`get_text_operation_result`][ref_computervisionclient_get_text_operation_result]. The call to recognize_text is asynchronous. In the results of the get_text_operation_result call, you need to check if the first call completed with [`TextOperationStatusCodes`][ref_computervision_model_textoperationstatuscodes] before extracting the text data. The results include the text as well as the bounding box coordinates for the text. +You can get any handwritten or printed text from an image. This requires two calls to the SDK: [`read`][ref_computervisionclient_read] and [`get_read_result`][ref_computervisionclient_get_read_result]. The call to read is asynchronous. In the results of the get_read_result call, you need to check if the first call completed with [`OperationStatusCodes`][ref_computervision_model_operationstatuscodes] before extracting the text data. The results include the text as well as the bounding box coordinates for the text. ```Python # import models -from azure.cognitiveservices.vision.computervision.models import TextRecognitionMode -from azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes +from azure.cognitiveservices.vision.computervision.models import OperationStatusCodes url = "https://github.com/Azure-Samples/cognitive-services-python-sdk-samples/raw/master/samples/vision/images/make_things_happen.jpg" -mode = TextRecognitionMode.printed raw = True -custom_headers = None numberOfCharsInOperationId = 36 # SDK call -rawHttpResponse = client.recognize_text(url, mode, custom_headers, raw) +rawHttpResponse = client.read(url, language="en", raw=True) # Get ID from returned headers operationLocation = rawHttpResponse.headers["Operation-Location"] @@ -205,12 +202,12 @@ idLocation = len(operationLocation) - numberOfCharsInOperationId operationId = operationLocation[idLocation:] # SDK call -result = client.get_text_operation_result(operationId) +result = client.get_read_result(operationId) # Get data -if result.status == TextOperationStatusCodes.succeeded: +if result.status == OperationStatusCodes.succeeded: - for line in result.recognition_result.lines: + for line in result.analyze_result.read_results[0].lines: print(line.text) print(line.bounding_box) ``` @@ -275,7 +272,7 @@ While working with the [ComputerVisionClient][ref_computervisionclient] client, Several Computer Vision Python SDK samples are available to you in the SDK's GitHub repository. These samples provide example code for additional scenarios commonly encountered while working with Computer Vision: -* [recognize_text][recognize-text] +* [See sample repo][recognize-text] ### Additional documentation @@ -317,14 +314,14 @@ For more extensive documentation on the Computer Vision service, see the [Azure [ref_computervisionclient_list_models]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#list-models-custom-headers-none--raw-false----operation-config- [ref_computervisionclient_analyze_image_by_domain]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#analyze-image-by-domain-model--url--language--en---custom-headers-none--raw-false----operation-config- [ref_computervisionclient_describe_image]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#describe-image-url--max-candidates--1---language--en---custom-headers-none--raw-false----operation-config- -[ref_computervisionclient_recognize_text]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#recognize-text-url--mode--custom-headers-none--raw-false----operation-config- -[ref_computervisionclient_get_text_operation_result]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#get-text-operation-result-operation-id--custom-headers-none--raw-false----operation-config- +[ref_computervisionclient_read]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#read-url--mode--custom-headers-none--raw-false----operation-config- +[ref_computervisionclient_get_read_result]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#get-read-result-operation-id--custom-headers-none--raw-false----operation-config- [ref_computervisionclient_generate_thumbnail]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.computervisionclient?view=azure-python#generate-thumbnail-width--height--url--smart-cropping-false--custom-headers-none--raw-false--callback-none----operation-config- [ref_computervision_model_visualfeatures]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.models.visualfeaturetypes?view=azure-python -[ref_computervision_model_textoperationstatuscodes]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.models.textoperationstatuscodes?view=azure-python +[ref_computervision_model_operationstatuscodes]:https://docs.microsoft.com/python/api/azure-cognitiveservices-vision-computervision/azure.cognitiveservices.vision.computervision.models.operationstatuscodes?view=azure-python [computervision_request_units]:https://azure.microsoft.com/pricing/details/cognitive-services/computer-vision/ diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_computer_vision_client.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_computer_vision_client.py index 66db7e8147cd..e2d380c69638 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_computer_vision_client.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_computer_vision_client.py @@ -38,7 +38,7 @@ def __init__( super(ComputerVisionClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2.1' + self.api_version = '3.0' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_configuration.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_configuration.py index 4231ac56a0a4..7a5c5c553bc9 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_configuration.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/_configuration.py @@ -33,7 +33,7 @@ def __init__( raise ValueError("Parameter 'endpoint' must not be None.") if credentials is None: raise ValueError("Parameter 'credentials' must not be None.") - base_url = '{Endpoint}/vision/v2.1' + base_url = '{Endpoint}/vision/v3.0' super(ComputerVisionClientConfiguration, self).__init__(base_url) diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py index 98057e314d77..5633061bc93b 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/__init__.py @@ -11,6 +11,7 @@ try: from ._models_py3 import AdultInfo + from ._models_py3 import AnalyzeResults from ._models_py3 import AreaOfInterestResult from ._models_py3 import BoundingRect from ._models_py3 import Category @@ -44,12 +45,12 @@ from ._models_py3 import OcrResult from ._models_py3 import OcrWord from ._models_py3 import ReadOperationResult + from ._models_py3 import ReadResult from ._models_py3 import TagResult - from ._models_py3 import TextOperationResult - from ._models_py3 import TextRecognitionResult from ._models_py3 import Word except (SyntaxError, ImportError): from ._models import AdultInfo + from ._models import AnalyzeResults from ._models import AreaOfInterestResult from ._models import BoundingRect from ._models import Category @@ -83,24 +84,23 @@ from ._models import OcrResult from ._models import OcrWord from ._models import ReadOperationResult + from ._models import ReadResult from ._models import TagResult - from ._models import TextOperationResult - from ._models import TextRecognitionResult from ._models import Word from ._computer_vision_client_enums import ( DescriptionExclude, Details, Gender, + OcrDetectionLanguage, OcrLanguages, - TextOperationStatusCodes, - TextRecognitionMode, - TextRecognitionResultConfidenceClass, + OperationStatusCodes, TextRecognitionResultDimensionUnit, VisualFeatureTypes, ) __all__ = [ 'AdultInfo', + 'AnalyzeResults', 'AreaOfInterestResult', 'BoundingRect', 'Category', @@ -134,17 +134,15 @@ 'OcrResult', 'OcrWord', 'ReadOperationResult', + 'ReadResult', 'TagResult', - 'TextOperationResult', - 'TextRecognitionResult', 'Word', 'Gender', - 'TextOperationStatusCodes', + 'OperationStatusCodes', 'TextRecognitionResultDimensionUnit', - 'TextRecognitionResultConfidenceClass', 'DescriptionExclude', 'OcrLanguages', 'VisualFeatureTypes', - 'TextRecognitionMode', + 'OcrDetectionLanguage', 'Details', ] diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_computer_vision_client_enums.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_computer_vision_client_enums.py index 2c319134cff0..d8cb9260286e 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_computer_vision_client_enums.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_computer_vision_client_enums.py @@ -18,12 +18,12 @@ class Gender(str, Enum): female = "Female" -class TextOperationStatusCodes(str, Enum): +class OperationStatusCodes(str, Enum): - not_started = "NotStarted" - running = "Running" - failed = "Failed" - succeeded = "Succeeded" + not_started = "notStarted" + running = "running" + failed = "failed" + succeeded = "succeeded" class TextRecognitionResultDimensionUnit(str, Enum): @@ -32,12 +32,6 @@ class TextRecognitionResultDimensionUnit(str, Enum): inch = "inch" -class TextRecognitionResultConfidenceClass(str, Enum): - - high = "High" - low = "Low" - - class DescriptionExclude(str, Enum): celebrities = "Celebrities" @@ -88,10 +82,15 @@ class VisualFeatureTypes(str, Enum): brands = "Brands" -class TextRecognitionMode(str, Enum): +class OcrDetectionLanguage(str, Enum): - handwritten = "Handwritten" - printed = "Printed" + en = "en" + es = "es" + fr = "fr" + de = "de" + it = "it" + nl = "nl" + pt = "pt" class Details(str, Enum): diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models.py index 1954cb533892..f0ef37445759 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models.py @@ -53,6 +53,34 @@ def __init__(self, **kwargs): self.gore_score = kwargs.get('gore_score', None) +class AnalyzeResults(Model): + """Analyze batch operation result. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. Version of schema used for this result. + :type version: str + :param read_results: Required. Text extracted from the input. + :type read_results: + list[~azure.cognitiveservices.vision.computervision.models.ReadResult] + """ + + _validation = { + 'version': {'required': True}, + 'read_results': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'read_results': {'key': 'readResults', 'type': '[ReadResult]'}, + } + + def __init__(self, **kwargs): + super(AnalyzeResults, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.read_results = kwargs.get('read_results', None) + + class AreaOfInterestResult(Model): """Result of AreaOfInterest operation. @@ -767,16 +795,28 @@ def __init__(self, **kwargs): class Line(Model): """An object representing a recognized text line. - :param bounding_box: Bounding box of a recognized line. + All required parameters must be populated in order to send to Azure. + + :param language: The BCP-47 language code of the recognized text line. + Only provided where the language of the line differs from the page's. + :type language: str + :param bounding_box: Required. Bounding box of a recognized line. :type bounding_box: list[float] - :param text: The text content of the line. + :param text: Required. The text content of the line. :type text: str - :param words: List of words in the text line. + :param words: Required. List of words in the text line. :type words: list[~azure.cognitiveservices.vision.computervision.models.Word] """ + _validation = { + 'bounding_box': {'required': True}, + 'text': {'required': True}, + 'words': {'required': True}, + } + _attribute_map = { + 'language': {'key': 'language', 'type': 'str'}, 'bounding_box': {'key': 'boundingBox', 'type': '[float]'}, 'text': {'key': 'text', 'type': 'str'}, 'words': {'key': 'words', 'type': '[Word]'}, @@ -784,6 +824,7 @@ class Line(Model): def __init__(self, **kwargs): super(Line, self).__init__(**kwargs) + self.language = kwargs.get('language', None) self.bounding_box = kwargs.get('bounding_box', None) self.text = kwargs.get('text', None) self.words = kwargs.get('words', None) @@ -989,93 +1030,56 @@ class ReadOperationResult(Model): """OCR result of the read operation. :param status: Status of the read operation. Possible values include: - 'NotStarted', 'Running', 'Failed', 'Succeeded' + 'notStarted', 'running', 'failed', 'succeeded' :type status: str or - ~azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes - :param recognition_results: An array of text recognition result of the - read operation. - :type recognition_results: - list[~azure.cognitiveservices.vision.computervision.models.TextRecognitionResult] + ~azure.cognitiveservices.vision.computervision.models.OperationStatusCodes + :param created_date_time: Get UTC date time the batch operation was + submitted. + :type created_date_time: str + :param last_updated_date_time: Get last updated UTC date time of this + batch operation. + :type last_updated_date_time: str + :param analyze_result: Analyze batch operation result. + :type analyze_result: + ~azure.cognitiveservices.vision.computervision.models.AnalyzeResults """ _attribute_map = { - 'status': {'key': 'status', 'type': 'TextOperationStatusCodes'}, - 'recognition_results': {'key': 'recognitionResults', 'type': '[TextRecognitionResult]'}, + 'status': {'key': 'status', 'type': 'OperationStatusCodes'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'str'}, + 'last_updated_date_time': {'key': 'lastUpdatedDateTime', 'type': 'str'}, + 'analyze_result': {'key': 'analyzeResult', 'type': 'AnalyzeResults'}, } def __init__(self, **kwargs): super(ReadOperationResult, self).__init__(**kwargs) self.status = kwargs.get('status', None) - self.recognition_results = kwargs.get('recognition_results', None) - - -class TagResult(Model): - """The results of a image tag operation, including any tags and image - metadata. - - :param tags: A list of tags with confidence level. - :type tags: - list[~azure.cognitiveservices.vision.computervision.models.ImageTag] - :param request_id: Id of the REST API request. - :type request_id: str - :param metadata: - :type metadata: - ~azure.cognitiveservices.vision.computervision.models.ImageMetadata - """ - - _attribute_map = { - 'tags': {'key': 'tags', 'type': '[ImageTag]'}, - 'request_id': {'key': 'requestId', 'type': 'str'}, - 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, - } - - def __init__(self, **kwargs): - super(TagResult, self).__init__(**kwargs) - self.tags = kwargs.get('tags', None) - self.request_id = kwargs.get('request_id', None) - self.metadata = kwargs.get('metadata', None) - - -class TextOperationResult(Model): - """Result of recognition text operation. - - :param status: Status of the text operation. Possible values include: - 'NotStarted', 'Running', 'Failed', 'Succeeded' - :type status: str or - ~azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes - :param recognition_result: Text recognition result of the text operation. - :type recognition_result: - ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResult - """ - - _attribute_map = { - 'status': {'key': 'status', 'type': 'TextOperationStatusCodes'}, - 'recognition_result': {'key': 'recognitionResult', 'type': 'TextRecognitionResult'}, - } - - def __init__(self, **kwargs): - super(TextOperationResult, self).__init__(**kwargs) - self.status = kwargs.get('status', None) - self.recognition_result = kwargs.get('recognition_result', None) + self.created_date_time = kwargs.get('created_date_time', None) + self.last_updated_date_time = kwargs.get('last_updated_date_time', None) + self.analyze_result = kwargs.get('analyze_result', None) -class TextRecognitionResult(Model): - """An object representing a recognized text region. +class ReadResult(Model): + """Text extracted from a page in the input document. All required parameters must be populated in order to send to Azure. - :param page: The 1-based page number of the recognition result. + :param page: Required. The 1-based page number of the recognition result. :type page: int - :param clockwise_orientation: The orientation of the image in degrees in - the clockwise direction. Range between [0, 360). - :type clockwise_orientation: float - :param width: The width of the image in pixels or the PDF in inches. + :param language: The BCP-47 language code of the recognized text page. + :type language: str + :param angle: Required. The orientation of the image in degrees in the + clockwise direction. Range between [-180, 180). + :type angle: float + :param width: Required. The width of the image in pixels or the PDF in + inches. :type width: float - :param height: The height of the image in pixels or the PDF in inches. + :param height: Required. The height of the image in pixels or the PDF in + inches. :type height: float - :param unit: The unit used in the Width, Height and BoundingBox. For - images, the unit is 'pixel'. For PDF, the unit is 'inch'. Possible values - include: 'pixel', 'inch' + :param unit: Required. The unit used in the Width, Height and BoundingBox. + For images, the unit is 'pixel'. For PDF, the unit is 'inch'. Possible + values include: 'pixel', 'inch' :type unit: str or ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResultDimensionUnit :param lines: Required. A list of recognized text lines. @@ -1084,12 +1088,18 @@ class TextRecognitionResult(Model): """ _validation = { + 'page': {'required': True}, + 'angle': {'required': True}, + 'width': {'required': True}, + 'height': {'required': True}, + 'unit': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'page': {'key': 'page', 'type': 'int'}, - 'clockwise_orientation': {'key': 'clockwiseOrientation', 'type': 'float'}, + 'language': {'key': 'language', 'type': 'str'}, + 'angle': {'key': 'angle', 'type': 'float'}, 'width': {'key': 'width', 'type': 'float'}, 'height': {'key': 'height', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'TextRecognitionResultDimensionUnit'}, @@ -1097,15 +1107,43 @@ class TextRecognitionResult(Model): } def __init__(self, **kwargs): - super(TextRecognitionResult, self).__init__(**kwargs) + super(ReadResult, self).__init__(**kwargs) self.page = kwargs.get('page', None) - self.clockwise_orientation = kwargs.get('clockwise_orientation', None) + self.language = kwargs.get('language', None) + self.angle = kwargs.get('angle', None) self.width = kwargs.get('width', None) self.height = kwargs.get('height', None) self.unit = kwargs.get('unit', None) self.lines = kwargs.get('lines', None) +class TagResult(Model): + """The results of a image tag operation, including any tags and image + metadata. + + :param tags: A list of tags with confidence level. + :type tags: + list[~azure.cognitiveservices.vision.computervision.models.ImageTag] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '[ImageTag]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, **kwargs): + super(TagResult, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.request_id = kwargs.get('request_id', None) + self.metadata = kwargs.get('metadata', None) + + class Word(Model): """An object representing a recognized word. @@ -1115,21 +1153,20 @@ class Word(Model): :type bounding_box: list[float] :param text: Required. The text content of the word. :type text: str - :param confidence: Qualitative confidence measure. Possible values - include: 'High', 'Low' - :type confidence: str or - ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResultConfidenceClass + :param confidence: Required. Qualitative confidence measure. + :type confidence: float """ _validation = { 'bounding_box': {'required': True}, 'text': {'required': True}, + 'confidence': {'required': True}, } _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': '[float]'}, 'text': {'key': 'text', 'type': 'str'}, - 'confidence': {'key': 'confidence', 'type': 'TextRecognitionResultConfidenceClass'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, } def __init__(self, **kwargs): diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models_py3.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models_py3.py index 0df15fb9e295..09951eeef9a3 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models_py3.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/models/_models_py3.py @@ -53,6 +53,34 @@ def __init__(self, *, is_adult_content: bool=None, is_racy_content: bool=None, i self.gore_score = gore_score +class AnalyzeResults(Model): + """Analyze batch operation result. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. Version of schema used for this result. + :type version: str + :param read_results: Required. Text extracted from the input. + :type read_results: + list[~azure.cognitiveservices.vision.computervision.models.ReadResult] + """ + + _validation = { + 'version': {'required': True}, + 'read_results': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'version', 'type': 'str'}, + 'read_results': {'key': 'readResults', 'type': '[ReadResult]'}, + } + + def __init__(self, *, version: str, read_results, **kwargs) -> None: + super(AnalyzeResults, self).__init__(**kwargs) + self.version = version + self.read_results = read_results + + class AreaOfInterestResult(Model): """Result of AreaOfInterest operation. @@ -767,23 +795,36 @@ def __init__(self, *, name: str=None, confidence: float=None, **kwargs) -> None: class Line(Model): """An object representing a recognized text line. - :param bounding_box: Bounding box of a recognized line. + All required parameters must be populated in order to send to Azure. + + :param language: The BCP-47 language code of the recognized text line. + Only provided where the language of the line differs from the page's. + :type language: str + :param bounding_box: Required. Bounding box of a recognized line. :type bounding_box: list[float] - :param text: The text content of the line. + :param text: Required. The text content of the line. :type text: str - :param words: List of words in the text line. + :param words: Required. List of words in the text line. :type words: list[~azure.cognitiveservices.vision.computervision.models.Word] """ + _validation = { + 'bounding_box': {'required': True}, + 'text': {'required': True}, + 'words': {'required': True}, + } + _attribute_map = { + 'language': {'key': 'language', 'type': 'str'}, 'bounding_box': {'key': 'boundingBox', 'type': '[float]'}, 'text': {'key': 'text', 'type': 'str'}, 'words': {'key': 'words', 'type': '[Word]'}, } - def __init__(self, *, bounding_box=None, text: str=None, words=None, **kwargs) -> None: + def __init__(self, *, bounding_box, text: str, words, language: str=None, **kwargs) -> None: super(Line, self).__init__(**kwargs) + self.language = language self.bounding_box = bounding_box self.text = text self.words = words @@ -989,93 +1030,56 @@ class ReadOperationResult(Model): """OCR result of the read operation. :param status: Status of the read operation. Possible values include: - 'NotStarted', 'Running', 'Failed', 'Succeeded' + 'notStarted', 'running', 'failed', 'succeeded' :type status: str or - ~azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes - :param recognition_results: An array of text recognition result of the - read operation. - :type recognition_results: - list[~azure.cognitiveservices.vision.computervision.models.TextRecognitionResult] + ~azure.cognitiveservices.vision.computervision.models.OperationStatusCodes + :param created_date_time: Get UTC date time the batch operation was + submitted. + :type created_date_time: str + :param last_updated_date_time: Get last updated UTC date time of this + batch operation. + :type last_updated_date_time: str + :param analyze_result: Analyze batch operation result. + :type analyze_result: + ~azure.cognitiveservices.vision.computervision.models.AnalyzeResults """ _attribute_map = { - 'status': {'key': 'status', 'type': 'TextOperationStatusCodes'}, - 'recognition_results': {'key': 'recognitionResults', 'type': '[TextRecognitionResult]'}, + 'status': {'key': 'status', 'type': 'OperationStatusCodes'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'str'}, + 'last_updated_date_time': {'key': 'lastUpdatedDateTime', 'type': 'str'}, + 'analyze_result': {'key': 'analyzeResult', 'type': 'AnalyzeResults'}, } - def __init__(self, *, status=None, recognition_results=None, **kwargs) -> None: + def __init__(self, *, status=None, created_date_time: str=None, last_updated_date_time: str=None, analyze_result=None, **kwargs) -> None: super(ReadOperationResult, self).__init__(**kwargs) self.status = status - self.recognition_results = recognition_results - - -class TagResult(Model): - """The results of a image tag operation, including any tags and image - metadata. - - :param tags: A list of tags with confidence level. - :type tags: - list[~azure.cognitiveservices.vision.computervision.models.ImageTag] - :param request_id: Id of the REST API request. - :type request_id: str - :param metadata: - :type metadata: - ~azure.cognitiveservices.vision.computervision.models.ImageMetadata - """ - - _attribute_map = { - 'tags': {'key': 'tags', 'type': '[ImageTag]'}, - 'request_id': {'key': 'requestId', 'type': 'str'}, - 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, - } - - def __init__(self, *, tags=None, request_id: str=None, metadata=None, **kwargs) -> None: - super(TagResult, self).__init__(**kwargs) - self.tags = tags - self.request_id = request_id - self.metadata = metadata - - -class TextOperationResult(Model): - """Result of recognition text operation. - - :param status: Status of the text operation. Possible values include: - 'NotStarted', 'Running', 'Failed', 'Succeeded' - :type status: str or - ~azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes - :param recognition_result: Text recognition result of the text operation. - :type recognition_result: - ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResult - """ - - _attribute_map = { - 'status': {'key': 'status', 'type': 'TextOperationStatusCodes'}, - 'recognition_result': {'key': 'recognitionResult', 'type': 'TextRecognitionResult'}, - } - - def __init__(self, *, status=None, recognition_result=None, **kwargs) -> None: - super(TextOperationResult, self).__init__(**kwargs) - self.status = status - self.recognition_result = recognition_result + self.created_date_time = created_date_time + self.last_updated_date_time = last_updated_date_time + self.analyze_result = analyze_result -class TextRecognitionResult(Model): - """An object representing a recognized text region. +class ReadResult(Model): + """Text extracted from a page in the input document. All required parameters must be populated in order to send to Azure. - :param page: The 1-based page number of the recognition result. + :param page: Required. The 1-based page number of the recognition result. :type page: int - :param clockwise_orientation: The orientation of the image in degrees in - the clockwise direction. Range between [0, 360). - :type clockwise_orientation: float - :param width: The width of the image in pixels or the PDF in inches. + :param language: The BCP-47 language code of the recognized text page. + :type language: str + :param angle: Required. The orientation of the image in degrees in the + clockwise direction. Range between [-180, 180). + :type angle: float + :param width: Required. The width of the image in pixels or the PDF in + inches. :type width: float - :param height: The height of the image in pixels or the PDF in inches. + :param height: Required. The height of the image in pixels or the PDF in + inches. :type height: float - :param unit: The unit used in the Width, Height and BoundingBox. For - images, the unit is 'pixel'. For PDF, the unit is 'inch'. Possible values - include: 'pixel', 'inch' + :param unit: Required. The unit used in the Width, Height and BoundingBox. + For images, the unit is 'pixel'. For PDF, the unit is 'inch'. Possible + values include: 'pixel', 'inch' :type unit: str or ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResultDimensionUnit :param lines: Required. A list of recognized text lines. @@ -1084,28 +1088,62 @@ class TextRecognitionResult(Model): """ _validation = { + 'page': {'required': True}, + 'angle': {'required': True}, + 'width': {'required': True}, + 'height': {'required': True}, + 'unit': {'required': True}, 'lines': {'required': True}, } _attribute_map = { 'page': {'key': 'page', 'type': 'int'}, - 'clockwise_orientation': {'key': 'clockwiseOrientation', 'type': 'float'}, + 'language': {'key': 'language', 'type': 'str'}, + 'angle': {'key': 'angle', 'type': 'float'}, 'width': {'key': 'width', 'type': 'float'}, 'height': {'key': 'height', 'type': 'float'}, 'unit': {'key': 'unit', 'type': 'TextRecognitionResultDimensionUnit'}, 'lines': {'key': 'lines', 'type': '[Line]'}, } - def __init__(self, *, lines, page: int=None, clockwise_orientation: float=None, width: float=None, height: float=None, unit=None, **kwargs) -> None: - super(TextRecognitionResult, self).__init__(**kwargs) + def __init__(self, *, page: int, angle: float, width: float, height: float, unit, lines, language: str=None, **kwargs) -> None: + super(ReadResult, self).__init__(**kwargs) self.page = page - self.clockwise_orientation = clockwise_orientation + self.language = language + self.angle = angle self.width = width self.height = height self.unit = unit self.lines = lines +class TagResult(Model): + """The results of a image tag operation, including any tags and image + metadata. + + :param tags: A list of tags with confidence level. + :type tags: + list[~azure.cognitiveservices.vision.computervision.models.ImageTag] + :param request_id: Id of the REST API request. + :type request_id: str + :param metadata: + :type metadata: + ~azure.cognitiveservices.vision.computervision.models.ImageMetadata + """ + + _attribute_map = { + 'tags': {'key': 'tags', 'type': '[ImageTag]'}, + 'request_id': {'key': 'requestId', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'ImageMetadata'}, + } + + def __init__(self, *, tags=None, request_id: str=None, metadata=None, **kwargs) -> None: + super(TagResult, self).__init__(**kwargs) + self.tags = tags + self.request_id = request_id + self.metadata = metadata + + class Word(Model): """An object representing a recognized word. @@ -1115,24 +1153,23 @@ class Word(Model): :type bounding_box: list[float] :param text: Required. The text content of the word. :type text: str - :param confidence: Qualitative confidence measure. Possible values - include: 'High', 'Low' - :type confidence: str or - ~azure.cognitiveservices.vision.computervision.models.TextRecognitionResultConfidenceClass + :param confidence: Required. Qualitative confidence measure. + :type confidence: float """ _validation = { 'bounding_box': {'required': True}, 'text': {'required': True}, + 'confidence': {'required': True}, } _attribute_map = { 'bounding_box': {'key': 'boundingBox', 'type': '[float]'}, 'text': {'key': 'text', 'type': 'str'}, - 'confidence': {'key': 'confidence', 'type': 'TextRecognitionResultConfidenceClass'}, + 'confidence': {'key': 'confidence', 'type': 'float'}, } - def __init__(self, *, bounding_box, text: str, confidence=None, **kwargs) -> None: + def __init__(self, *, bounding_box, text: str, confidence: float, **kwargs) -> None: super(Word, self).__init__(**kwargs) self.bounding_box = bounding_box self.text = text diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/operations/_computer_vision_client_operations.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/operations/_computer_vision_client_operations.py index 3690d7aa9499..a2417588988b 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/operations/_computer_vision_client_operations.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/operations/_computer_vision_client_operations.py @@ -715,133 +715,27 @@ def get_area_of_interest( return deserialized get_area_of_interest.metadata = {'url': '/areaOfInterest'} - def recognize_text( - self, url, mode, custom_headers=None, raw=False, **operation_config): - """Recognize Text operation. When you use the Recognize Text interface, - the response contains a field called 'Operation-Location'. The - 'Operation-Location' field contains the URL that you must use for your - Get Recognize Text Operation Result operation. - - :param mode: Type of text to recognize. Possible values include: - 'Handwritten', 'Printed' - :type mode: str or - ~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode - :param url: Publicly reachable URL of an image. - :type url: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ComputerVisionErrorException` - """ - image_url = models.ImageUrl(url=url) - - # Construct URL - url = self.recognize_text.metadata['url'] - path_format_arguments = { - 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._serialize.body(image_url, 'ImageUrl') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.ComputerVisionErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'Operation-Location': 'str', - }) - return client_raw_response - recognize_text.metadata = {'url': '/recognizeText'} - - def get_text_operation_result( - self, operation_id, custom_headers=None, raw=False, **operation_config): - """This interface is used for getting text operation result. The URL to - this interface should be retrieved from 'Operation-Location' field - returned from Recognize Text interface. - - :param operation_id: Id of the text operation returned in the response - of the 'Recognize Text' - :type operation_id: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: TextOperationResult or ClientRawResponse if raw=true - :rtype: - ~azure.cognitiveservices.vision.computervision.models.TextOperationResult - or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ComputerVisionErrorException` - """ - # Construct URL - url = self.get_text_operation_result.metadata['url'] - path_format_arguments = { - 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), - 'operationId': self._serialize.url("operation_id", operation_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ComputerVisionErrorException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('TextOperationResult', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get_text_operation_result.metadata = {'url': '/textOperations/{operationId}'} - - def batch_read_file( - self, url, custom_headers=None, raw=False, **operation_config): + def read( + self, url, language="en", custom_headers=None, raw=False, **operation_config): """Use this interface to get the result of a Read operation, employing the state-of-the-art Optical Character Recognition (OCR) algorithms - optimized for text-heavy documents. When you use the Read File - interface, the response contains a field called 'Operation-Location'. - The 'Operation-Location' field contains the URL that you must use for - your 'GetReadOperationResult' operation to access OCR results.​. + optimized for text-heavy documents. When you use the Read interface, + the response contains a field called 'Operation-Location'. The + 'Operation-Location' field contains the URL that you must use for your + 'GetReadResult' operation to access OCR results.​. :param url: Publicly reachable URL of an image. :type url: str + :param language: The BCP-47 language code of the text to be detected + in the image. In future versions, when language parameter is not + passed, language detection will be used to determine the language. + However, in the current version, missing language parameter will cause + English to be used. To ensure that your document is always parsed in + English without the use of language detection in the future, pass “en” + in the language parameter. Possible values include: 'en', 'es', 'fr', + 'de', 'it', 'nl', 'pt' + :type language: str or + ~azure.cognitiveservices.vision.computervision.models.OcrDetectionLanguage :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -855,7 +749,7 @@ def batch_read_file( image_url = models.ImageUrl(url=url) # Construct URL - url = self.batch_read_file.metadata['url'] + url = self.read.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } @@ -863,6 +757,8 @@ def batch_read_file( # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -886,16 +782,16 @@ def batch_read_file( 'Operation-Location': 'str', }) return client_raw_response - batch_read_file.metadata = {'url': '/read/core/asyncBatchAnalyze'} + read.metadata = {'url': '/read/analyze'} - def get_read_operation_result( + def get_read_result( self, operation_id, custom_headers=None, raw=False, **operation_config): """This interface is used for getting OCR results of Read operation. The URL to this interface should be retrieved from 'Operation-Location' - field returned from Batch Read File interface. + field returned from Read interface. :param operation_id: Id of read operation returned in the response of - the 'Batch Read File' interface. + the 'Read' interface. :type operation_id: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -910,7 +806,7 @@ def get_read_operation_result( :class:`ComputerVisionErrorException` """ # Construct URL - url = self.get_read_operation_result.metadata['url'] + url = self.get_read_result.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True), 'operationId': self._serialize.url("operation_id", operation_id, 'str') @@ -942,7 +838,7 @@ def get_read_operation_result( return client_raw_response return deserialized - get_read_operation_result.metadata = {'url': '/read/operations/{operationId}'} + get_read_result.metadata = {'url': '/read/analyzeResults/{operationId}'} def analyze_image_in_stream( self, image, visual_features=None, details=None, language="en", description_exclude=None, custom_headers=None, raw=False, callback=None, **operation_config): @@ -1606,81 +1502,27 @@ def tag_image_in_stream( return deserialized tag_image_in_stream.metadata = {'url': '/tag'} - def recognize_text_in_stream( - self, image, mode, custom_headers=None, raw=False, callback=None, **operation_config): - """Recognize Text operation. When you use the Recognize Text interface, + def read_in_stream( + self, image, language="en", custom_headers=None, raw=False, callback=None, **operation_config): + """Use this interface to get the result of a Read operation, employing the + state-of-the-art Optical Character Recognition (OCR) algorithms + optimized for text-heavy documents. When you use the Read interface, the response contains a field called 'Operation-Location'. The 'Operation-Location' field contains the URL that you must use for your - Get Recognize Text Operation Result operation. - - :param image: An image stream. - :type image: Generator - :param mode: Type of text to recognize. Possible values include: - 'Handwritten', 'Printed' - :type mode: str or - ~azure.cognitiveservices.vision.computervision.models.TextRecognitionMode - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param callback: When specified, will be called with each chunk of - data that is streamed. The callback should take two arguments, the - bytes of the current chunk of data and the response object. If the - data is uploading, response will be None. - :type callback: Callable[Bytes, response=None] - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ComputerVisionErrorException` - """ - # Construct URL - url = self.recognize_text_in_stream.metadata['url'] - path_format_arguments = { - 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['mode'] = self._serialize.query("mode", mode, 'TextRecognitionMode') - - # Construct headers - header_parameters = {} - header_parameters['Content-Type'] = 'application/octet-stream' - if custom_headers: - header_parameters.update(custom_headers) - - # Construct body - body_content = self._client.stream_upload(image, callback) - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [202]: - raise models.ComputerVisionErrorException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - client_raw_response.add_headers({ - 'Operation-Location': 'str', - }) - return client_raw_response - recognize_text_in_stream.metadata = {'url': '/recognizeText'} - - def batch_read_file_in_stream( - self, image, custom_headers=None, raw=False, callback=None, **operation_config): - """Use this interface to get the result of a Read Document operation, - employing the state-of-the-art Optical Character Recognition (OCR) - algorithms optimized for text-heavy documents. When you use the Read - Document interface, the response contains a field called - 'Operation-Location'. The 'Operation-Location' field contains the URL - that you must use for your 'Get Read Result operation' to access OCR - results.​. + 'GetReadResult' operation to access OCR results.​. :param image: An image stream. :type image: Generator + :param language: The BCP-47 language code of the text to be detected + in the image. In future versions, when language parameter is not + passed, language detection will be used to determine the language. + However, in the current version, missing language parameter will cause + English to be used. To ensure that your document is always parsed in + English without the use of language detection in the future, pass “en” + in the language parameter. Possible values include: 'en', 'es', 'fr', + 'de', 'it', 'nl', 'pt' + :type language: str or + ~azure.cognitiveservices.vision.computervision.models.OcrDetectionLanguage :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -1697,7 +1539,7 @@ def batch_read_file_in_stream( :class:`ComputerVisionErrorException` """ # Construct URL - url = self.batch_read_file_in_stream.metadata['url'] + url = self.read_in_stream.metadata['url'] path_format_arguments = { 'Endpoint': self._serialize.url("self.config.endpoint", self.config.endpoint, 'str', skip_quote=True) } @@ -1705,6 +1547,8 @@ def batch_read_file_in_stream( # Construct parameters query_parameters = {} + if language is not None: + query_parameters['language'] = self._serialize.query("language", language, 'str') # Construct headers header_parameters = {} @@ -1728,4 +1572,4 @@ def batch_read_file_in_stream( 'Operation-Location': 'str', }) return client_raw_response - batch_read_file_in_stream.metadata = {'url': '/read/core/asyncBatchAnalyze'} + read_in_stream.metadata = {'url': '/read/analyze'} diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py index 266f5a486d79..5a7feab42d26 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/azure/cognitiveservices/vision/computervision/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "0.5.0" +VERSION = "0.6.0" diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/sdk_packaging.toml b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/sdk_packaging.toml index 164ed3e891e0..287064235678 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/sdk_packaging.toml +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/sdk_packaging.toml @@ -5,3 +5,4 @@ package_doc_id = "cognitive-services" is_stable = false is_arm = false need_msrestazure = false +auto_update = false \ No newline at end of file diff --git a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/setup.py b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/setup.py index e60acd4d7a62..485791c6943d 100644 --- a/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/setup.py +++ b/sdk/cognitiveservices/azure-cognitiveservices-vision-computervision/setup.py @@ -36,7 +36,9 @@ pass # Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd: +with open(os.path.join(package_folder_path, 'version.py') + if os.path.exists(os.path.join(package_folder_path, 'version.py')) + else os.path.join(package_folder_path, '_version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) @@ -67,6 +69,7 @@ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', ], zip_safe=False, From 2f2ed373b82680c54d1a00a0d554af0ab42e94d7 Mon Sep 17 00:00:00 2001 From: Daniel Jurek Date: Mon, 18 May 2020 12:07:05 -0700 Subject: [PATCH 04/28] update parameters to use SubscriptionConfiguration (#11425) --- common/smoketest/smoke-test.yml | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/common/smoketest/smoke-test.yml b/common/smoketest/smoke-test.yml index 0df4c50d35f6..ad7c8ebe6a4c 100644 --- a/common/smoketest/smoke-test.yml +++ b/common/smoketest/smoke-test.yml @@ -9,58 +9,58 @@ jobs: PythonVersion: '2.7' InstallAsyncRequirements: false OSVmImage: ubuntu-18.04 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_37_Linux (AzureCloud): PythonVersion: '3.7' OSVmImage: ubuntu-18.04 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_38_Linux (AzureCloud): PythonVersion: '3.8' OSVmImage: ubuntu-18.04 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_37_Windows (AzureCloud): PythonVersion: '3.7' OSVmImage: windows-2019 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_38_Windows (AzureCloud): PythonVersion: '3.8' OSVmImage: windows-2019 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_37_Mac (AzureCloud): PythonVersion: '3.7' OSVmImage: macOS-10.15 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_38_Mac (AzureCloud): PythonVersion: '3.8' OSVmImage: macOS-10.15 - CloudType: AzureCloud + SubscriptionConfiguration: $(sub-config-azure-cloud-test-resources) ArmTemplateParameters: $(azureCloudArmParameters) Python_38_Linux (AzureUSGovernment): PythonVersion: '3.8' OSVmImage: ubuntu-18.04 - CloudType: AzureUSGovernment + SubscriptionConfiguration: $(sub-config-gov-test-resources) ArmTemplateParameters: $(azureUSGovernmentArmParameters) Python_37_Windows (AzureUSGovernment): PythonVersion: '3.7' OSVmImage: windows-2019 - CloudType: AzureUSGovernment + SubscriptionConfiguration: $(sub-config-gov-test-resources) ArmTemplateParameters: $(azureUSGovernmentArmParameters) Python_38_Linux (AzureChinaCloud): PythonVersion: '3.8' OSVmImage: ubuntu-18.04 - CloudType: AzureChinaCloud + SubscriptionConfiguration: $(sub-config-cn-test-resources) Location: 'chinanorth' ArmTemplateParameters: $(azureChinaCloudArmParameters) Python_37_Windows (AzureChinaCloud): PythonVersion: '3.7' OSVmImage: windows-2019 - CloudType: AzureChinaCloud + SubscriptionConfiguration: $(sub-config-cn-test-resources) Location: 'chinanorth' ArmTemplateParameters: $(azureChinaCloudArmParameters) @@ -103,7 +103,7 @@ jobs: - template: /eng/common/TestResources/deploy-test-resources.yml parameters: ServiceDirectory: '$(Build.SourcesDirectory)/common/smoketest/' - CloudType: $(CloudType) + SubscriptionConfiguration: $(SubscriptionConfiguration) Location: $(Location) ArmTemplateParameters: $(ArmTemplateParameters) @@ -113,3 +113,5 @@ jobs: - template: /eng/common/TestResources/remove-test-resources.yml parameters: ServiceDirectory: '$(Build.SourcesDirectory)/common/smoketest/' + SubscriptionConfiguration: $(SubscriptionConfiguration) + From 6e12533eb012eb65e24bbe9702bdb8829a804bf5 Mon Sep 17 00:00:00 2001 From: Scott Beddall <45376673+scbedd@users.noreply.github.com> Date: Mon, 18 May 2020 12:21:09 -0700 Subject: [PATCH 05/28] update artifactname to ensure that wheel get's picked up properly (#11502) --- sdk/eventhub/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/ci.yml b/sdk/eventhub/ci.yml index 245947591855..e6ecca083d8b 100644 --- a/sdk/eventhub/ci.yml +++ b/sdk/eventhub/ci.yml @@ -51,5 +51,5 @@ stages: safeName: azureeventhubcheckpointstoreblobaio - name: azure_eventhub_checkpointstoreblob safeName: azureeventhubcheckpointstoreblob - - name: azure-mgmt-eventhub + - name: azure_mgmt_eventhub safeName: azuremgmteventhub From 00779346c3b910dd419207d8918148ec43623f2b Mon Sep 17 00:00:00 2001 From: annatisch Date: Mon, 18 May 2020 12:51:11 -0700 Subject: [PATCH 06/28] [Cosmos] GA release prep (#11468) * Version bump * Update classifier * Update readme URLs * Update samples readme * One more URL * Changelog feedback --- sdk/cosmos/azure-cosmos/CHANGELOG.md | 5 ++-- sdk/cosmos/azure-cosmos/README.md | 30 ++++++++----------- .../azure-cosmos/azure/cosmos/_version.py | 2 +- sdk/cosmos/azure-cosmos/samples/README.md | 4 +-- .../access_cosmos_with_resource_token.py | 2 +- sdk/cosmos/azure-cosmos/setup.py | 2 +- 6 files changed, 21 insertions(+), 24 deletions(-) diff --git a/sdk/cosmos/azure-cosmos/CHANGELOG.md b/sdk/cosmos/azure-cosmos/CHANGELOG.md index 7b5f48d50fd3..b64254da1684 100644 --- a/sdk/cosmos/azure-cosmos/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos/CHANGELOG.md @@ -1,7 +1,8 @@ # Release History -## 4.0.0 (unreleased) +## 4.0.0 (2020-05-18) +- Stable release. - Added HttpLoggingPolicy to pipeline to enable passing in a custom logger for request and response headers. @@ -99,7 +100,7 @@ Version 4.0.0b1 is the first preview of our efforts to create a user-friendly an - `Container`: This client handles operations for a particular container. This includes querying and inserting items and managing properties. - `User`: This client handles operations for a particular user. This includes adding and deleting permissions and managing user properties. - These clients can be accessed by navigating down the client hierarchy using the `get__client` method. For full details on the new API, please see the [reference documentation](http://azure.github.io/azure-sdk-for-python/ref/azure.cosmos.html). + These clients can be accessed by navigating down the client hierarchy using the `get__client` method. For full details on the new API, please see the [reference documentation](https://aka.ms/azsdk-python-cosmos-ref). - Clients are accessed by name rather than by Id. No need to concatenate strings to create links. - No more need to import types and methods from individual modules. The public API surface area is available directly in the `azure.cosmos` package. - Individual request properties can be provided as keyword arguments rather than constructing a separate `RequestOptions` instance. diff --git a/sdk/cosmos/azure-cosmos/README.md b/sdk/cosmos/azure-cosmos/README.md index 816eb3736556..064191219d8b 100644 --- a/sdk/cosmos/azure-cosmos/README.md +++ b/sdk/cosmos/azure-cosmos/README.md @@ -9,10 +9,8 @@ Use the Azure Cosmos DB SQL API SDK for Python to manage databases and the JSON * Create, read, update, and delete the **items** (JSON documents) in your containers * Query the documents in your database using **SQL-like syntax** -Looking for source code or API reference? +[SDK source code][source_code] | [Package (PyPI)][cosmos_pypi] | [API reference documentation][ref_cosmos_sdk] | [Product documentation][cosmos_docs] | [Samples][cosmos_samples] -* [SDK source code][source_code] -* [SDK reference documentation][ref_cosmos_sdk] ## Getting started @@ -30,7 +28,7 @@ az cosmosdb create --resource-group --name Date: Mon, 18 May 2020 13:22:49 -0700 Subject: [PATCH 07/28] [formrecognizer] update docs to specify encoded url input (#11471) * update docs to specify encoded url input * fix receipt * add back missing types --- .../formrecognizer/_form_recognizer_client.py | 19 ++++++++++--------- .../aio/_form_recognizer_client_async.py | 19 ++++++++++--------- .../sample_recognize_content_async.py | 2 +- .../samples/sample_recognize_content.py | 2 +- 4 files changed, 22 insertions(+), 20 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py index 61dfa284cd07..60364b21a328 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py @@ -83,7 +83,7 @@ def begin_recognize_receipts(self, stream, **kwargs): The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: .pdf, .jpg, .png or .tiff type file stream. + :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. Currently only supports US sales receipts. :type stream: bytes or IO[bytes] :keyword bool include_text_content: @@ -134,8 +134,9 @@ def begin_recognize_receipts_from_url(self, url, **kwargs): """Extract field text and semantic values from a given US sales receipt. The input document must be the location (Url) of the receipt to be analyzed. - :param url: The url of the receipt. Currently only supports US sales receipts. - :type url: str + :param str url: The url of the receipt to analyze. The input must be a valid, encoded url + of one of the supported formats: JPEG, PNG, PDF and TIFF. Currently only supports + US sales receipts. :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword int polling_interval: Waiting time between two polls for LRO operations @@ -178,7 +179,7 @@ def begin_recognize_content(self, stream, **kwargs): The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: .pdf, .jpg, .png or .tiff type file stream. + :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. :type stream: bytes or IO[bytes] :keyword str content_type: Media type of the body sent to the API. Content-type is auto-detected, but can be overridden by passing this keyword argument. For options, @@ -223,8 +224,8 @@ def begin_recognize_content_from_url(self, url, **kwargs): """Extract text and layout information from a given document. The input document must be the location (Url) of the document to be analyzed. - :param url: The url of the document. - :type url: str + :param str url: The url of the form to analyze. The input must be a valid, encoded url + of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. :return: An instance of an LROPoller. Call `result()` on the poller @@ -252,7 +253,7 @@ def begin_recognize_custom_forms(self, model_id, stream, **kwargs): 'image/jpeg', 'image/png' or 'image/tiff'. :param str model_id: Custom model identifier. - :param stream: .pdf, .jpg, .png or .tiff type file stream. + :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. :type stream: bytes or IO[bytes] :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. @@ -310,8 +311,8 @@ def begin_recognize_custom_forms_from_url(self, model_id, url, **kwargs): The input document must be the location (Url) of the document to be analyzed. :param str model_id: Custom model identifier. - :param url: The url of the document. - :type url: str + :param str url: The url of the form to analyze. The input must be a valid, encoded url + of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword int polling_interval: Waiting time between two polls for LRO operations diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py index 81d996a05e33..f802dd2c982a 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py @@ -96,7 +96,7 @@ async def recognize_receipts( The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: .pdf, .jpg, .png or .tiff type file stream. + :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. Currently only supports US sales receipts. :type stream: bytes or IO[bytes] :keyword bool include_text_content: @@ -149,8 +149,9 @@ async def recognize_receipts_from_url( """Extract field text and semantic values from a given US sales receipt. The input document must be the location (Url) of the receipt to be analyzed. - :param url: The url of the receipt. Currently only supports US sales receipts. - :type url: str + :param str url: The url of the receipt to analyze. The input must be a valid, encoded url + of one of the supported formats: JPEG, PNG, PDF and TIFF. Currently only supports + US sales receipts. :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword int polling_interval: Waiting time between two polls for LRO operations @@ -191,7 +192,7 @@ async def recognize_content(self, stream: Union[bytes, IO[bytes]], **kwargs: Any The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: .pdf, .jpg, .png or .tiff type file stream. + :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. :type stream: bytes or IO[bytes] :keyword str content_type: Media type of the body sent to the API. Content-type is auto-detected, but can be overridden by passing this keyword argument. For options, @@ -234,8 +235,8 @@ async def recognize_content_from_url(self, url: str, **kwargs: Any) -> List["For """Extract text and layout information from a given document. The input document must be the location (Url) of the document to be analyzed. - :param url: The url of the document. - :type url: str + :param str url: The url of the form to analyze. The input must be a valid, encoded url + of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. :return: A list of FormPage. @@ -265,7 +266,7 @@ async def recognize_custom_forms( 'image/jpeg', 'image/png' or 'image/tiff'. :param str model_id: Custom model identifier. - :param stream: .pdf, .jpg, .png or .tiff type file stream. + :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. :type stream: bytes or IO[bytes] :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. @@ -327,8 +328,8 @@ async def recognize_custom_forms_from_url( The input document must be the location (Url) of the document to be analyzed. :param str model_id: Custom model identifier. - :param url: The url of the document. - :type url: str + :param str url: The url of the form to analyze. The input must be a valid, encoded url + of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword int polling_interval: Waiting time between two polls for LRO operations diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py index 01c56bdd1c15..2f51830f7aae 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py @@ -10,7 +10,7 @@ FILE: sample_recognize_content_async.py DESCRIPTION: - This sample demonstrates how to extact text and content information from a document + This sample demonstrates how to extract text and content information from a document given through a file. USAGE: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py index a70c8f6b8381..cffed4bb3135 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py @@ -10,7 +10,7 @@ FILE: sample_recognize_content.py DESCRIPTION: - This sample demonstrates how to extact text and content information from a document + This sample demonstrates how to extract text and content information from a document given through a file. USAGE: python sample_recognize_content.py From 83915faf3e50b10a3e199ae1eac27b43c171f3c6 Mon Sep 17 00:00:00 2001 From: Rakshith Bhyravabhotla Date: Mon, 18 May 2020 16:38:27 -0700 Subject: [PATCH 08/28] Consistency related changes in form recognizer (#11467) * page range + form field page number * us receipt * RecognizedReceipt * Update sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md * async * comments --- .../azure-ai-formrecognizer/CHANGELOG.md | 5 +- .../azure-ai-formrecognizer/README.md | 2 +- .../azure/ai/formrecognizer/__init__.py | 8 +-- .../formrecognizer/_form_recognizer_client.py | 8 +-- .../azure/ai/formrecognizer/_models.py | 70 ++++++++++++------- .../ai/formrecognizer/_response_handlers.py | 14 ++-- .../aio/_form_recognizer_client_async.py | 14 ++-- .../tests/test_custom_forms.py | 4 -- .../tests/test_custom_forms_async.py | 4 -- .../tests/test_custom_forms_from_url.py | 4 -- .../tests/test_custom_forms_from_url_async.py | 4 -- .../tests/test_repr.py | 25 ++++--- .../azure-ai-formrecognizer/tests/testcase.py | 2 - 13 files changed, 88 insertions(+), 76 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md index 49d598ad0cbd..584b5c4032b4 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md @@ -10,13 +10,16 @@ - Removed `get_form_training_client` from `FormRecognizerClient` - Added `get_form_recognizer_client` to `FormTrainingClient` - A `HttpResponseError` is now raised if a model with `status=="invalid"` is returned from the `begin_train_model()` or `train_model()` methods +- `PageRange` is renamed to `FormPageRange` +- `FormField` does not have a page_number. +- `begin_recognize_receipts` APIs now return `RecognizedReceipt` instead of `USReceipt` +- `USReceiptType` is renamed to `ReceiptType` **New features** - Authentication using `azure-identity` credentials now supported - see the [Azure Identity documentation](https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/identity/azure-identity/README.md) for more information - ## 1.0.0b2 (2020-05-06) **Fixes and improvements** diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/README.md index f3421f5e524e..9e5545fe0e1f 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/README.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/README.md @@ -123,7 +123,7 @@ See the full details regarding [authentication][cognitive_authentication] of cog - Recognizing form fields and content using custom models trained to recognize your custom forms. These values are returned in a collection of `RecognizedForm` objects. - Recognizing form content, including tables, lines and words, without the need to train a model. Form content is returned in a collection of `FormPage` objects. - - Recognizing common fields from US receipts, using a pre-trained receipt model on the Form Recognizer service. These fields and meta-data are returned in a collection of `USReceipt` objects. + - Recognizing common fields from US receipts, using a pre-trained receipt model on the Form Recognizer service. These fields and meta-data are returned in a collection of `RecognizedReceipt` objects. ### FormTrainingClient `FormTrainingClient` provides operations for: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/__init__.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/__init__.py index b8225d383dbe..ef3321151b18 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/__init__.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/__init__.py @@ -15,7 +15,7 @@ CustomFormModelStatus, FormContentType, USReceipt, - USReceiptType, + ReceiptType, USReceiptItem, FormTable, FormTableCell, @@ -24,7 +24,7 @@ CustomFormModelInfo, AccountProperties, Point, - PageRange, + FormPageRange, RecognizedForm, FormField, FieldText, @@ -46,7 +46,7 @@ 'FormContentType', 'FormContent', 'USReceipt', - 'USReceiptType', + 'ReceiptType', 'USReceiptItem', 'FormTable', 'FormTableCell', @@ -55,7 +55,7 @@ 'CustomFormModelInfo', 'AccountProperties', 'Point', - 'PageRange', + 'FormPageRange', 'RecognizedForm', 'FormField', 'FieldText', diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py index 60364b21a328..38f5aaf25fd0 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py @@ -94,8 +94,8 @@ def begin_recognize_receipts(self, stream, **kwargs): :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. :return: An instance of an LROPoller. Call `result()` on the poller - object to return a list[:class:`~azure.ai.formrecognizer.USReceipt`]. - :rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.USReceipt]] + object to return a list[:class:`~azure.ai.formrecognizer.RecognizedReceipt`]. + :rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedReceipt]] :raises ~azure.core.exceptions.HttpResponseError: .. admonition:: Example: @@ -142,8 +142,8 @@ def begin_recognize_receipts_from_url(self, url, **kwargs): :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. :return: An instance of an LROPoller. Call `result()` on the poller - object to return a list[:class:`~azure.ai.formrecognizer.USReceipt`]. - :rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.USReceipt]] + object to return a list[:class:`~azure.ai.formrecognizer.RecognizedReceipt`]. + :rtype: ~azure.core.polling.LROPoller[list[~azure.ai.formrecognizer.RecognizedReceipt]] :raises ~azure.core.exceptions.HttpResponseError: .. admonition:: Example: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py index ae13e57a4967..59e8f2e97c30 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py @@ -120,17 +120,17 @@ def __new__(cls, x, y): return super(Point, cls).__new__(cls, x, y) -class PageRange(namedtuple("PageRange", "first_page last_page")): - """The 1-based page range of the document. +class FormPageRange(namedtuple("FormPageRange", "first_page last_page")): + """The 1-based page range of the form. - :ivar int first_page: The first page number of the document. - :ivar int last_page: The last page number of the document. + :ivar int first_page: The first page number of the form. + :ivar int last_page: The last page number of the form. """ __slots__ = () def __new__(cls, first_page, last_page): - return super(PageRange, cls).__new__(cls, first_page, last_page) + return super(FormPageRange, cls).__new__(cls, first_page, last_page) class FormContent(object): @@ -162,7 +162,7 @@ class RecognizedForm(object): this is the training-time label of the field. For models trained without labels, a unique name is generated for each field. :vartype fields: dict[str, ~azure.ai.formrecognizer.FormField] - :ivar ~azure.ai.formrecognizer.PageRange page_range: + :ivar ~azure.ai.formrecognizer.FormPageRange page_range: The first and last page of the input form. :ivar list[~azure.ai.formrecognizer.FormPage] pages: A list of pages recognized from the input document. Contains lines, @@ -179,8 +179,39 @@ def __repr__(self): self.form_type, repr(self.fields), repr(self.page_range), repr(self.pages) )[:1024] +class RecognizedReceipt(RecognizedForm): + """Represents a receipt that has been recognized by a trained model. -class USReceipt(object): # pylint: disable=too-many-instance-attributes + :ivar str form_type: + The type of form the model identified the submitted form to be. + :ivar fields: + A dictionary of the fields found on the form. The fields dictionary + keys are the `name` of the field. For models trained with labels, + this is the training-time label of the field. For models trained + without labels, a unique name is generated for each field. + :vartype fields: dict[str, ~azure.ai.formrecognizer.FormField] + :ivar ~azure.ai.formrecognizer.FormPageRange page_range: + The first and last page of the input form. + :ivar list[~azure.ai.formrecognizer.FormPage] pages: + A list of pages recognized from the input document. Contains lines, + words, tables and page metadata. + :ivar ~azure.ai.formrecognizer.ReceiptType receipt_type: + The reciept type and confidence. + :ivar str receipt_locale: Defaults to "en-US". + """ + def __init__(self, **kwargs): + super(RecognizedReceipt, self).__init__(**kwargs) + self.receipt_type = kwargs.get("receipt_type", None) + self.receipt_locale = kwargs.get("receipt_locale", "en-US") + + def __repr__(self): + return "RecognizedReceipt(form_type={}, fields={}, page_range={}, pages={}, " \ + "receipt_type={}, receipt_locale={})".format( + self.form_type, repr(self.fields), repr(self.page_range), repr(self.pages), + repr(self.receipt_type), self.receipt_locale + )[:1024] + +class USReceipt(RecognizedReceipt): # pylint: disable=too-many-instance-attributes """Extracted fields found on the US sales receipt. Provides attributes for accessing common fields present in US sales receipts. @@ -190,8 +221,6 @@ class USReceipt(object): # pylint: disable=too-many-instance-attributes The name of the merchant. :ivar ~azure.ai.formrecognizer.FormField merchant_phone_number: The phone number associated with the merchant. - :ivar ~azure.ai.formrecognizer.USReceiptType receipt_type: - The reciept type and confidence. :ivar list[~azure.ai.formrecognizer.USReceiptItem] receipt_items: The purchased items found on the receipt. :ivar ~azure.ai.formrecognizer.FormField subtotal: @@ -209,21 +238,20 @@ class USReceipt(object): # pylint: disable=too-many-instance-attributes :ivar fields: A dictionary of the fields found on the receipt. :vartype fields: dict[str, ~azure.ai.formrecognizer.FormField] - :ivar ~azure.ai.formrecognizer.PageRange page_range: + :ivar ~azure.ai.formrecognizer.FormPageRange page_range: The first and last page of the input receipt. :ivar list[~azure.ai.formrecognizer.FormPage] pages: Contains page metadata such as page width, length, text angle, unit. If `include_text_content=True` is passed, contains a list of extracted text lines for each page in the input document. :ivar str form_type: The type of form. - :ivar str receipt_locale: Defaults to "en-US". """ def __init__(self, **kwargs): + super(USReceipt, self).__init__(**kwargs) self.merchant_address = kwargs.get("merchant_address", None) self.merchant_name = kwargs.get("merchant_name", None) self.merchant_phone_number = kwargs.get("merchant_phone_number", None) - self.receipt_type = kwargs.get("receipt_type", None) self.receipt_items = kwargs.get("receipt_items", None) self.subtotal = kwargs.get("subtotal", None) self.tax = kwargs.get("tax", None) @@ -231,11 +259,6 @@ def __init__(self, **kwargs): self.total = kwargs.get("total", None) self.transaction_date = kwargs.get("transaction_date", None) self.transaction_time = kwargs.get("transaction_time", None) - self.fields = kwargs.get("fields", None) - self.page_range = kwargs.get("page_range", None) - self.pages = kwargs.get("pages", None) - self.form_type = kwargs.get("form_type", None) - self.receipt_locale = kwargs.get("receipt_locale", "en-US") def __repr__(self): return "USReceipt(merchant_address={}, merchant_name={}, merchant_phone_number={}, " \ @@ -264,8 +287,6 @@ class FormField(object): :class:`~azure.ai.formrecognizer.FormField`, or list[:class:`~azure.ai.formrecognizer.FormField`] :ivar float confidence: Measures the degree of certainty of the recognition result. Value is between [0.0, 1.0]. - :ivar int page_number: - The 1-based number of the page in which this content is present. """ def __init__(self, **kwargs): @@ -274,7 +295,6 @@ def __init__(self, **kwargs): self.name = kwargs.get("name", None) self.value = kwargs.get("value", None) self.confidence = kwargs.get("confidence", None) - self.page_number = kwargs.get("page_number", None) @classmethod def _from_generated(cls, field, value, read_result): @@ -284,7 +304,6 @@ def _from_generated(cls, field, value, read_result): value=get_field_value(field, value, read_result), name=field, confidence=adjust_confidence(value.confidence) if value else None, - page_number=value.page if value else None, ) @@ -296,12 +315,11 @@ def _from_generated_unlabeled(cls, field, idx, page, read_result): value=field.value.text, name="field-" + str(idx), confidence=adjust_confidence(field.confidence), - page_number=page, ) def __repr__(self): - return "FormField(label_data={}, value_data={}, name={}, value={}, confidence={}, page_number={})".format( - repr(self.label_data), repr(self.value_data), self.name, repr(self.value), self.confidence, self.page_number + return "FormField(label_data={}, value_data={}, name={}, value={}, confidence={})".format( + repr(self.label_data), repr(self.value_data), self.name, repr(self.value), self.confidence )[:1024] @@ -495,7 +513,7 @@ def __repr__(self): )[:1024] -class USReceiptType(object): +class ReceiptType(object): """The type of the analyzed US receipt and the confidence value of that type. @@ -516,7 +534,7 @@ def _from_generated(cls, item): confidence=adjust_confidence(item.confidence)) if item else None def __repr__(self): - return "USReceiptType(type={}, confidence={})".format(self.type, self.confidence)[:1024] + return "ReceiptType(type={}, confidence={})".format(self.type, self.confidence)[:1024] class USReceiptItem(object): diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_response_handlers.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_response_handlers.py index 23d3839bca29..a7ae21eb1938 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_response_handlers.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_response_handlers.py @@ -8,14 +8,14 @@ from ._models import ( USReceipt, - USReceiptType, + ReceiptType, FormField, USReceiptItem, FormPage, FormLine, FormTable, FormTableCell, - PageRange, + FormPageRange, RecognizedForm ) @@ -29,7 +29,7 @@ def prepare_us_receipt(response): for page in document_result: if page.fields is None: receipt = USReceipt( - page_range=PageRange(first_page=page.page_range[0], last_page=page.page_range[1]), + page_range=FormPageRange(first_page=page.page_range[0], last_page=page.page_range[1]), pages=form_page[page.page_range[0]-1:page.page_range[1]], form_type=page.doc_type, ) @@ -47,7 +47,7 @@ def prepare_us_receipt(response): page.fields.get("MerchantPhoneNumber"), read_result, ), - receipt_type=USReceiptType._from_generated(page.fields.get("ReceiptType")), + receipt_type=ReceiptType._from_generated(page.fields.get("ReceiptType")), receipt_items=USReceiptItem._from_generated( page.fields.get("Items"), read_result ), @@ -65,7 +65,7 @@ def prepare_us_receipt(response): transaction_time=FormField._from_generated( "TransactionTime", page.fields.get("TransactionTime"), read_result ), - page_range=PageRange( + page_range=FormPageRange( first_page=page.page_range[0], last_page=page.page_range[1] ), pages=form_page[page.page_range[0]-1:page.page_range[1]], @@ -132,7 +132,7 @@ def prepare_unlabeled_result(response): if unlabeled_fields: unlabeled_fields = {field.name: field for field in unlabeled_fields} form = RecognizedForm( - page_range=PageRange( + page_range=FormPageRange( first_page=page.page, last_page=page.page ), @@ -152,7 +152,7 @@ def prepare_labeled_result(response, model_id): result = [] for doc in response.analyze_result.document_results: form = RecognizedForm( - page_range=PageRange( + page_range=FormPageRange( first_page=doc.page_range[0], last_page=doc.page_range[1] ), diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py index f802dd2c982a..215ce186a275 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py @@ -29,7 +29,7 @@ from azure.core.credentials import AzureKeyCredential from azure.core.credentials_async import AsyncTokenCredential from .._models import ( - USReceipt, + RecognizedReceipt, FormPage, RecognizedForm ) @@ -91,7 +91,7 @@ async def recognize_receipts( self, stream: Union[bytes, IO[bytes]], **kwargs: Any - ) -> List["USReceipt"]: + ) -> List["RecognizedReceipt"]: """Extract field text and semantic values from a given US sales receipt. The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. @@ -106,8 +106,8 @@ async def recognize_receipts( see :class:`~azure.ai.formrecognizer.FormContentType`. :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. - :return: A list of USReceipt. - :rtype: list[~azure.ai.formrecognizer.USReceipt] + :return: A list of RecognizedReceipt. + :rtype: list[~azure.ai.formrecognizer.RecognizedReceipt] :raises ~azure.core.exceptions.HttpResponseError: .. admonition:: Example: @@ -145,7 +145,7 @@ async def recognize_receipts_from_url( self, url: str, **kwargs: Any - ) -> List["USReceipt"]: + ) -> List["RecognizedReceipt"]: """Extract field text and semantic values from a given US sales receipt. The input document must be the location (Url) of the receipt to be analyzed. @@ -156,8 +156,8 @@ async def recognize_receipts_from_url( Whether or not to include text elements such as lines and words in addition to form fields. :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. - :return: A list of USReceipt. - :rtype: list[~azure.ai.formrecognizer.USReceipt] + :return: A list of RecognizedReceipt. + :rtype: list[~azure.ai.formrecognizer.RecognizedReceipt] :raises ~azure.core.exceptions.HttpResponseError: .. admonition:: Example: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py index 50fa9f13e89a..e21805a9549f 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py @@ -89,7 +89,6 @@ def test_custom_form_unlabeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -116,7 +115,6 @@ def test_custom_form_multipage_unlabeled(self, client, container_sas_url): for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -143,7 +141,6 @@ def test_custom_form_labeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) @@ -174,7 +171,6 @@ def test_custom_form_multipage_labeled(self, client, container_sas_url): for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py index 96590739aeaa..6c0fe5b94b7e 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py @@ -85,7 +85,6 @@ async def test_custom_form_unlabeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -111,7 +110,6 @@ async def test_custom_form_multipage_unlabeled(self, client, container_sas_url): for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -133,7 +131,6 @@ async def test_custom_form_labeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) @@ -162,7 +159,6 @@ async def test_custom_form_multipage_labeled(self, client, container_sas_url): for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py index 30055d8136ba..4be0b5913b5c 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py @@ -80,7 +80,6 @@ def test_custom_form_unlabeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -105,7 +104,6 @@ def test_form_multipage_unlabeled(self, client, container_sas_url, blob_sas_url) for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -126,7 +124,6 @@ def test_custom_form_labeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) @@ -153,7 +150,6 @@ def test_form_multipage_labeled(self, client, container_sas_url, blob_sas_url): for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py index 5b9d62340272..3c196abf0b2e 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py @@ -76,7 +76,6 @@ async def test_form_unlabeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -99,7 +98,6 @@ async def test_custom_form_multipage_unlabeled(self, client, container_sas_url, for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.label_data.text) @@ -118,7 +116,6 @@ async def test_form_labeled(self, client, container_sas_url): for label, field in form[0].fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) @@ -143,7 +140,6 @@ async def test_form_multipage_labeled(self, client, container_sas_url, blob_sas_ for label, field in form.fields.items(): self.assertIsNotNone(field.confidence) self.assertIsNotNone(field.name) - self.assertIsNotNone(field.page_number) self.assertIsNotNone(field.value_data.text) self.assertIsNotNone(field.value_data.bounding_box) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_repr.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_repr.py index 5cf69159bf9b..fb0f88fb81ea 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_repr.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_repr.py @@ -67,22 +67,22 @@ def field_text(bounding_box, form_word, form_line): @pytest.fixture def form_field_two(field_text): - model = _models.FormField(label_data=field_text[0], value_data=field_text[0], name="form_field_two", value="value", confidence=0, page_number=1) - model_repr = "FormField(label_data={}, value_data={}, name=form_field_two, value='value', confidence=0, page_number=1)".format(field_text[1], field_text[1])[:1024] + model = _models.FormField(label_data=field_text[0], value_data=field_text[0], name="form_field_two", value="value", confidence=0) + model_repr = "FormField(label_data={}, value_data={}, name=form_field_two, value='value', confidence=0)".format(field_text[1], field_text[1])[:1024] assert repr(model) == model_repr return model, model_repr @pytest.fixture def form_field_one(field_text, form_field_two): - model = _models.FormField(label_data=field_text[0], value_data=field_text[0], name="form_field_one", value=form_field_two[0], confidence=1.0, page_number=5) - model_repr = "FormField(label_data={}, value_data={}, name=form_field_one, value={}, confidence=1.0, page_number=5)".format(field_text[1], field_text[1], form_field_two[1])[:1024] + model = _models.FormField(label_data=field_text[0], value_data=field_text[0], name="form_field_one", value=form_field_two[0], confidence=1.0) + model_repr = "FormField(label_data={}, value_data={}, name=form_field_one, value={}, confidence=1.0)".format(field_text[1], field_text[1], form_field_two[1])[:1024] assert repr(model) == model_repr return model, model_repr @pytest.fixture def page_range(): - model = _models.PageRange(first_page=1, last_page=100) - model_repr = "PageRange(first_page=1, last_page=100)" + model = _models.FormPageRange(first_page=1, last_page=100) + model_repr = "FormPageRange(first_page=1, last_page=100)" assert repr(model) == model_repr return model, model_repr @@ -97,8 +97,8 @@ def form_page(form_table, form_line): @pytest.fixture def us_receipt_type(): - model = _models.USReceiptType(type="Itemized", confidence=1.0) - model_repr = "USReceiptType(type=Itemized, confidence=1.0)" + model = _models.ReceiptType(type="Itemized", confidence=1.0) + model_repr = "ReceiptType(type=Itemized, confidence=1.0)" assert repr(model) == model_repr return model, model_repr @@ -148,6 +148,15 @@ def test_recognized_form(self, form_field_one, page_range, form_page, us_receipt )[:1024] assert repr(model) == model_repr + def test_recognized_receipt(self, form_field_one, page_range, form_page, us_receipt_type): + model = _models.RecognizedReceipt( + form_type="receipt", fields={"one": form_field_one[0]}, page_range=page_range[0], pages=[form_page[0]], + receipt_type=us_receipt_type[0], receipt_locale="en-US") + model_repr = "RecognizedReceipt(form_type=receipt, fields={{'one': {}}}, page_range={}, pages=[{}])".format( + form_field_one[1], page_range[1], form_page[1], us_receipt_type[0], "en-US" + )[:1024] + assert repr(model) == model_repr + def test_us_receipt(self, form_field_one, form_field_two, us_receipt_type, us_receipt_item, page_range, form_page): model = _models.USReceipt( merchant_address=form_field_one[0], diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py index 3e6aa4ae3e60..fdcfbb69d363 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py @@ -155,7 +155,6 @@ def assertLabeledFormFieldDictTransformCorrect(self, form_fields, actual_fields, b = form_fields for label, a in actual_fields.items(): self.assertEqual(label, b[label].name) - self.assertEqual(a.page, b[label].page_number) self.assertEqual(a.confidence, b[label].confidence if a.confidence is not None else 1.0) self.assertBoundingBoxTransformCorrect(b[label].value_data.bounding_box, a.bounding_box) self.assertEqual(a.text, b[label].value_data.text) @@ -221,7 +220,6 @@ def assertFormFieldTransformCorrect(self, receipt_field, actual_field, read_resu self.assertBoundingBoxTransformCorrect(receipt_field.value_data.bounding_box, actual_field.bounding_box) self.assertEqual(receipt_field.value_data.text, actual_field.text) self.assertEqual(receipt_field.confidence, actual_field.confidence if actual_field.confidence is not None else 1.0) - self.assertEqual(receipt_field.page_number, actual_field.page) if read_results: self.assertTextContentTransformCorrect( receipt_field.value_data.text_content, From 72a895d077157b77b183805fab9358ecd50aa96c Mon Sep 17 00:00:00 2001 From: Qiaoqiao Zhang <55688292+qiaozha@users.noreply.github.com> Date: Tue, 19 May 2020 09:27:16 +0800 Subject: [PATCH 09/28] release-for-hanaonazure-mgmt (#11441) --- .../azure-mgmt-hanaonazure/CHANGELOG.md | 23 + .../azure-mgmt-hanaonazure/README.md | 2 +- .../hanaonazure/_hana_management_client.py | 12 +- .../azure/mgmt/hanaonazure/models/__init__.py | 43 +- .../models/_hana_management_client_enums.py | 63 -- .../azure/mgmt/hanaonazure/models/_models.py | 463 +++-------- .../mgmt/hanaonazure/models/_models_py3.py | 475 +++-------- .../mgmt/hanaonazure/models/_paged_models.py | 16 +- .../mgmt/hanaonazure/operations/__init__.py | 4 +- .../operations/_hana_instances_operations.py | 738 ------------------ .../hanaonazure/operations/_operations.py | 4 +- .../_provider_instances_operations.py | 376 +++++++++ .../operations/_sap_monitors_operations.py | 6 +- .../azure/mgmt/hanaonazure/version.py | 2 +- .../azure-mgmt-hanaonazure/setup.py | 4 +- .../tests/test_azure_mgmt_hanaonazure.py | 42 +- 16 files changed, 707 insertions(+), 1566 deletions(-) delete mode 100644 sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_hana_instances_operations.py create mode 100644 sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_provider_instances_operations.py diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/CHANGELOG.md b/sdk/hanaonazure/azure-mgmt-hanaonazure/CHANGELOG.md index d1d9f3211aa1..291003e4e762 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/CHANGELOG.md +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/CHANGELOG.md @@ -1,5 +1,28 @@ # Release History +## 0.14.0 (2020-05-14) + +**Features** + + - Model SapMonitor has a new parameter sap_monitor_collector_version + - Model SapMonitor has a new parameter monitor_subnet + - Added operation group ProviderInstancesOperations + +**Breaking changes** + + - Model Resource no longer has parameter tags + - Model Resource no longer has parameter location + - Model SapMonitor no longer has parameter key_vault_id + - Model SapMonitor no longer has parameter hana_db_password_key_vault_url + - Model SapMonitor no longer has parameter hana_db_name + - Model SapMonitor no longer has parameter hana_db_credentials_msi_id + - Model SapMonitor no longer has parameter hana_hostname + - Model SapMonitor no longer has parameter hana_db_username + - Model SapMonitor no longer has parameter hana_db_password + - Model SapMonitor no longer has parameter hana_subnet + - Model SapMonitor no longer has parameter hana_db_sql_port + - Removed operation group HanaInstancesOperations + ## 0.13.0 (2020-02-13) **Features** diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/README.md b/sdk/hanaonazure/azure-mgmt-hanaonazure/README.md index 0520341083ff..768547abc09f 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/README.md +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/README.md @@ -2,7 +2,7 @@ This is the Microsoft Azure SAP Hana on Azure Management Client Library. This package has been tested with Python 2.7, 3.5, 3.6, 3.7 and 3.8. -For a more complete view of Azure libraries, see the [Github repo](https://github.com/Azure/azure-sdk-for-python/sdk) +For a more complete view of Azure libraries, see the [Github repo](https://github.com/Azure/azure-sdk-for-python/) # Usage diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/_hana_management_client.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/_hana_management_client.py index 14a418e6c608..f33f93a665be 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/_hana_management_client.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/_hana_management_client.py @@ -14,8 +14,8 @@ from ._configuration import HanaManagementClientConfiguration from .operations import Operations -from .operations import HanaInstancesOperations from .operations import SapMonitorsOperations +from .operations import ProviderInstancesOperations from . import models @@ -27,10 +27,10 @@ class HanaManagementClient(SDKClient): :ivar operations: Operations operations :vartype operations: azure.mgmt.hanaonazure.operations.Operations - :ivar hana_instances: HanaInstances operations - :vartype hana_instances: azure.mgmt.hanaonazure.operations.HanaInstancesOperations :ivar sap_monitors: SapMonitors operations :vartype sap_monitors: azure.mgmt.hanaonazure.operations.SapMonitorsOperations + :ivar provider_instances: ProviderInstances operations + :vartype provider_instances: azure.mgmt.hanaonazure.operations.ProviderInstancesOperations :param credentials: Credentials needed for the client to connect to Azure. :type credentials: :mod:`A msrestazure Credentials @@ -49,13 +49,13 @@ def __init__( super(HanaManagementClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2017-11-03-preview' + self.api_version = '2020-02-07-preview' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self.operations = Operations( self._client, self.config, self._serialize, self._deserialize) - self.hana_instances = HanaInstancesOperations( - self._client, self.config, self._serialize, self._deserialize) self.sap_monitors = SapMonitorsOperations( self._client, self.config, self._serialize, self._deserialize) + self.provider_instances = ProviderInstancesOperations( + self._client, self.config, self._serialize, self._deserialize) diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/__init__.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/__init__.py index 2d5850e75153..edf9a1ac8042 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/__init__.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/__init__.py @@ -10,65 +10,44 @@ # -------------------------------------------------------------------------- try: - from ._models_py3 import Disk from ._models_py3 import Display from ._models_py3 import ErrorResponse, ErrorResponseException - from ._models_py3 import HanaInstance - from ._models_py3 import HardwareProfile - from ._models_py3 import IpAddress - from ._models_py3 import MonitoringDetails - from ._models_py3 import NetworkProfile from ._models_py3 import Operation - from ._models_py3 import OSProfile + from ._models_py3 import ProviderInstance + from ._models_py3 import ProxyResource from ._models_py3 import Resource from ._models_py3 import SapMonitor - from ._models_py3 import StorageProfile from ._models_py3 import Tags + from ._models_py3 import TrackedResource except (SyntaxError, ImportError): - from ._models import Disk from ._models import Display from ._models import ErrorResponse, ErrorResponseException - from ._models import HanaInstance - from ._models import HardwareProfile - from ._models import IpAddress - from ._models import MonitoringDetails - from ._models import NetworkProfile from ._models import Operation - from ._models import OSProfile + from ._models import ProviderInstance + from ._models import ProxyResource from ._models import Resource from ._models import SapMonitor - from ._models import StorageProfile from ._models import Tags -from ._paged_models import HanaInstancePaged + from ._models import TrackedResource from ._paged_models import OperationPaged +from ._paged_models import ProviderInstancePaged from ._paged_models import SapMonitorPaged from ._hana_management_client_enums import ( - HanaHardwareTypeNamesEnum, - HanaInstanceSizeNamesEnum, - HanaInstancePowerStateEnum, HanaProvisioningStatesEnum, ) __all__ = [ - 'Disk', 'Display', 'ErrorResponse', 'ErrorResponseException', - 'HanaInstance', - 'HardwareProfile', - 'IpAddress', - 'MonitoringDetails', - 'NetworkProfile', 'Operation', - 'OSProfile', + 'ProviderInstance', + 'ProxyResource', 'Resource', 'SapMonitor', - 'StorageProfile', 'Tags', + 'TrackedResource', 'OperationPaged', - 'HanaInstancePaged', 'SapMonitorPaged', - 'HanaHardwareTypeNamesEnum', - 'HanaInstanceSizeNamesEnum', - 'HanaInstancePowerStateEnum', + 'ProviderInstancePaged', 'HanaProvisioningStatesEnum', ] diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_hana_management_client_enums.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_hana_management_client_enums.py index 3dd378fc347e..1b1fb05524c9 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_hana_management_client_enums.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_hana_management_client_enums.py @@ -12,69 +12,6 @@ from enum import Enum -class HanaHardwareTypeNamesEnum(str, Enum): - - cisco_ucs = "Cisco_UCS" - hpe = "HPE" - - -class HanaInstanceSizeNamesEnum(str, Enum): - - s72m = "S72m" - s144m = "S144m" - s72 = "S72" - s144 = "S144" - s192 = "S192" - s192m = "S192m" - s192xm = "S192xm" - s96 = "S96" - s112 = "S112" - s224 = "S224" - s224m = "S224m" - s224om = "S224om" - s224oo = "S224oo" - s224oom = "S224oom" - s224ooo = "S224ooo" - s384 = "S384" - s384m = "S384m" - s384xm = "S384xm" - s384xxm = "S384xxm" - s448 = "S448" - s448m = "S448m" - s448om = "S448om" - s448oo = "S448oo" - s448oom = "S448oom" - s448ooo = "S448ooo" - s576m = "S576m" - s576xm = "S576xm" - s672 = "S672" - s672m = "S672m" - s672om = "S672om" - s672oo = "S672oo" - s672oom = "S672oom" - s672ooo = "S672ooo" - s768 = "S768" - s768m = "S768m" - s768xm = "S768xm" - s896 = "S896" - s896m = "S896m" - s896om = "S896om" - s896oo = "S896oo" - s896oom = "S896oom" - s896ooo = "S896ooo" - s960m = "S960m" - - -class HanaInstancePowerStateEnum(str, Enum): - - starting = "starting" - started = "started" - stopping = "stopping" - stopped = "stopped" - restarting = "restarting" - unknown = "unknown" - - class HanaProvisioningStatesEnum(str, Enum): accepted = "Accepted" diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models.py index c9ada9c8225f..f707289710cf 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models.py @@ -21,40 +21,6 @@ class CloudError(Model): } -class Disk(Model): - """Specifies the disk information fo the HANA instance. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param name: The disk name. - :type name: str - :param disk_size_gb: Specifies the size of an empty data disk in - gigabytes. - :type disk_size_gb: int - :ivar lun: Specifies the logical unit number of the data disk. This value - is used to identify data disks within the VM and therefore must be unique - for each data disk attached to a VM. - :vartype lun: int - """ - - _validation = { - 'lun': {'readonly': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, - 'lun': {'key': 'lun', 'type': 'int'}, - } - - def __init__(self, **kwargs): - super(Disk, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.disk_size_gb = kwargs.get('disk_size_gb', None) - self.lun = None - - class Display(Model): """Detailed HANA operation information. @@ -142,364 +108,215 @@ def __init__(self, deserialize, response, *args): super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args) -class Resource(Model): - """The resource model definition. +class Operation(Model): + """HANA operation information. Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Resource ID - :vartype id: str - :ivar name: Resource name + :ivar name: The name of the operation being performed on this particular + object. This name should match the action name that appears in RBAC / the + event service. :vartype name: str - :ivar type: Resource type - :vartype type: str - :param location: Resource location - :type location: str - :ivar tags: Resource tags - :vartype tags: dict[str, str] + :param display: Displayed HANA operation information + :type display: ~azure.mgmt.hanaonazure.models.Display """ _validation = { - 'id': {'readonly': True}, 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'tags': {'readonly': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, + 'display': {'key': 'display', 'type': 'Display'}, } def __init__(self, **kwargs): - super(Resource, self).__init__(**kwargs) - self.id = None + super(Operation, self).__init__(**kwargs) self.name = None - self.type = None - self.location = kwargs.get('location', None) - self.tags = None + self.display = kwargs.get('display', None) -class HanaInstance(Resource): - """HANA instance info on Azure (ARM properties and HANA properties). +class Resource(Model): + """The core properties of ARM resources. Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Resource ID + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} :vartype id: str - :ivar name: Resource name + :ivar name: The name of the resource :vartype name: str - :ivar type: Resource type + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. :vartype type: str - :param location: Resource location - :type location: str - :ivar tags: Resource tags - :vartype tags: dict[str, str] - :param hardware_profile: Specifies the hardware settings for the HANA - instance. - :type hardware_profile: ~azure.mgmt.hanaonazure.models.HardwareProfile - :param storage_profile: Specifies the storage settings for the HANA - instance disks. - :type storage_profile: ~azure.mgmt.hanaonazure.models.StorageProfile - :param os_profile: Specifies the operating system settings for the HANA - instance. - :type os_profile: ~azure.mgmt.hanaonazure.models.OSProfile - :param network_profile: Specifies the network settings for the HANA - instance. - :type network_profile: ~azure.mgmt.hanaonazure.models.NetworkProfile - :ivar hana_instance_id: Specifies the HANA instance unique ID. - :vartype hana_instance_id: str - :ivar power_state: Resource power state. Possible values include: - 'starting', 'started', 'stopping', 'stopped', 'restarting', 'unknown' - :vartype power_state: str or - ~azure.mgmt.hanaonazure.models.HanaInstancePowerStateEnum - :ivar proximity_placement_group: Resource proximity placement group - :vartype proximity_placement_group: str - :ivar hw_revision: Hardware revision of a HANA instance - :vartype hw_revision: str - :param partner_node_id: ARM ID of another HanaInstance that will share a - network with this HanaInstance - :type partner_node_id: str - :ivar provisioning_state: State of provisioning of the HanaInstance. - Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', - 'Succeeded', 'Deleting', 'Migrating' - :vartype provisioning_state: str or - ~azure.mgmt.hanaonazure.models.HanaProvisioningStatesEnum """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, - 'tags': {'readonly': True}, - 'hana_instance_id': {'readonly': True}, - 'power_state': {'readonly': True}, - 'proximity_placement_group': {'readonly': True}, - 'hw_revision': {'readonly': True}, - 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - 'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'}, - 'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'}, - 'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'}, - 'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'}, - 'hana_instance_id': {'key': 'properties.hanaInstanceId', 'type': 'str'}, - 'power_state': {'key': 'properties.powerState', 'type': 'str'}, - 'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'str'}, - 'hw_revision': {'key': 'properties.hwRevision', 'type': 'str'}, - 'partner_node_id': {'key': 'properties.partnerNodeId', 'type': 'str'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__(self, **kwargs): - super(HanaInstance, self).__init__(**kwargs) - self.hardware_profile = kwargs.get('hardware_profile', None) - self.storage_profile = kwargs.get('storage_profile', None) - self.os_profile = kwargs.get('os_profile', None) - self.network_profile = kwargs.get('network_profile', None) - self.hana_instance_id = None - self.power_state = None - self.proximity_placement_group = None - self.hw_revision = None - self.partner_node_id = kwargs.get('partner_node_id', None) - self.provisioning_state = None + super(Resource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None -class HardwareProfile(Model): - """Specifies the hardware settings for the HANA instance. +class ProxyResource(Resource): + """The resource model definition for a ARM proxy resource. It will have + everything other than required location and tags. Variables are only populated by the server, and will be ignored when sending a request. - :ivar hardware_type: Name of the hardware type (vendor and/or their - product name). Possible values include: 'Cisco_UCS', 'HPE' - :vartype hardware_type: str or - ~azure.mgmt.hanaonazure.models.HanaHardwareTypeNamesEnum - :ivar hana_instance_size: Specifies the HANA instance SKU. Possible values - include: 'S72m', 'S144m', 'S72', 'S144', 'S192', 'S192m', 'S192xm', 'S96', - 'S112', 'S224', 'S224m', 'S224om', 'S224oo', 'S224oom', 'S224ooo', 'S384', - 'S384m', 'S384xm', 'S384xxm', 'S448', 'S448m', 'S448om', 'S448oo', - 'S448oom', 'S448ooo', 'S576m', 'S576xm', 'S672', 'S672m', 'S672om', - 'S672oo', 'S672oom', 'S672ooo', 'S768', 'S768m', 'S768xm', 'S896', - 'S896m', 'S896om', 'S896oo', 'S896oom', 'S896ooo', 'S960m' - :vartype hana_instance_size: str or - ~azure.mgmt.hanaonazure.models.HanaInstanceSizeNamesEnum + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} + :vartype id: str + :ivar name: The name of the resource + :vartype name: str + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. + :vartype type: str """ _validation = { - 'hardware_type': {'readonly': True}, - 'hana_instance_size': {'readonly': True}, - } - - _attribute_map = { - 'hardware_type': {'key': 'hardwareType', 'type': 'str'}, - 'hana_instance_size': {'key': 'hanaInstanceSize', 'type': 'str'}, - } - - def __init__(self, **kwargs): - super(HardwareProfile, self).__init__(**kwargs) - self.hardware_type = None - self.hana_instance_size = None - - -class IpAddress(Model): - """Specifies the IP address of the network interface. - - :param ip_address: Specifies the IP address of the network interface. - :type ip_address: str - """ - - _attribute_map = { - 'ip_address': {'key': 'ipAddress', 'type': 'str'}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, } - def __init__(self, **kwargs): - super(IpAddress, self).__init__(**kwargs) - self.ip_address = kwargs.get('ip_address', None) - - -class MonitoringDetails(Model): - """Details needed to monitor a Hana Instance. - - :param hana_subnet: ARM ID of an Azure Subnet with access to the HANA - instance. - :type hana_subnet: str - :param hana_hostname: Hostname of the HANA Instance blade. - :type hana_hostname: str - :param hana_db_name: Name of the database itself. - :type hana_db_name: str - :param hana_db_sql_port: The port number of the tenant DB. Used to connect - to the DB. - :type hana_db_sql_port: int - :param hana_db_username: Username for the HANA database to login to for - monitoring - :type hana_db_username: str - :param hana_db_password: Password for the HANA database to login for - monitoring - :type hana_db_password: str - """ - _attribute_map = { - 'hana_subnet': {'key': 'hanaSubnet', 'type': 'str'}, - 'hana_hostname': {'key': 'hanaHostname', 'type': 'str'}, - 'hana_db_name': {'key': 'hanaDbName', 'type': 'str'}, - 'hana_db_sql_port': {'key': 'hanaDbSqlPort', 'type': 'int'}, - 'hana_db_username': {'key': 'hanaDbUsername', 'type': 'str'}, - 'hana_db_password': {'key': 'hanaDbPassword', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, **kwargs): - super(MonitoringDetails, self).__init__(**kwargs) - self.hana_subnet = kwargs.get('hana_subnet', None) - self.hana_hostname = kwargs.get('hana_hostname', None) - self.hana_db_name = kwargs.get('hana_db_name', None) - self.hana_db_sql_port = kwargs.get('hana_db_sql_port', None) - self.hana_db_username = kwargs.get('hana_db_username', None) - self.hana_db_password = kwargs.get('hana_db_password', None) + super(ProxyResource, self).__init__(**kwargs) -class NetworkProfile(Model): - """Specifies the network settings for the HANA instance disks. +class ProviderInstance(ProxyResource): + """A provider instance associated with a SAP monitor. Variables are only populated by the server, and will be ignored when sending a request. - :param network_interfaces: Specifies the network interfaces for the HANA + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} + :vartype id: str + :ivar name: The name of the resource + :vartype name: str + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. + :vartype type: str + :param provider_instance_type: The type of provider instance. + :type provider_instance_type: str + :param properties: A JSON string containing the properties of the provider instance. - :type network_interfaces: list[~azure.mgmt.hanaonazure.models.IpAddress] - :ivar circuit_id: Specifies the circuit id for connecting to express - route. - :vartype circuit_id: str + :type properties: str + :param metadata: A JSON string containing metadata of the provider + instance. + :type metadata: str + :ivar provisioning_state: State of provisioning of the provider instance. + Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', + 'Succeeded', 'Deleting', 'Migrating' + :vartype provisioning_state: str or + ~azure.mgmt.hanaonazure.models.HanaProvisioningStatesEnum """ _validation = { - 'circuit_id': {'readonly': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'provisioning_state': {'readonly': True}, } _attribute_map = { - 'network_interfaces': {'key': 'networkInterfaces', 'type': '[IpAddress]'}, - 'circuit_id': {'key': 'circuitId', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'provider_instance_type': {'key': 'properties.type', 'type': 'str'}, + 'properties': {'key': 'properties.properties', 'type': 'str'}, + 'metadata': {'key': 'properties.metadata', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__(self, **kwargs): - super(NetworkProfile, self).__init__(**kwargs) - self.network_interfaces = kwargs.get('network_interfaces', None) - self.circuit_id = None + super(ProviderInstance, self).__init__(**kwargs) + self.provider_instance_type = kwargs.get('provider_instance_type', None) + self.properties = kwargs.get('properties', None) + self.metadata = kwargs.get('metadata', None) + self.provisioning_state = None -class Operation(Model): - """HANA operation information. +class TrackedResource(Resource): + """The resource model definition for a ARM tracked top level resource. Variables are only populated by the server, and will be ignored when sending a request. - :ivar name: The name of the operation being performed on this particular - object. This name should match the action name that appears in RBAC / the - event service. + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} + :vartype id: str + :ivar name: The name of the resource :vartype name: str - :param display: Displayed HANA operation information - :type display: ~azure.mgmt.hanaonazure.models.Display + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. + :vartype type: str + :param tags: Resource tags. + :type tags: dict[str, str] + :param location: The Azure Region where the resource lives + :type location: str """ _validation = { + 'id': {'readonly': True}, 'name': {'readonly': True}, + 'type': {'readonly': True}, } _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'display': {'key': 'display', 'type': 'Display'}, - } - - def __init__(self, **kwargs): - super(Operation, self).__init__(**kwargs) - self.name = None - self.display = kwargs.get('display', None) - - -class OSProfile(Model): - """Specifies the operating system settings for the HANA instance. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param computer_name: Specifies the host OS name of the HANA instance. - :type computer_name: str - :ivar os_type: This property allows you to specify the type of the OS. - :vartype os_type: str - :ivar version: Specifies version of operating system. - :vartype version: str - :param ssh_public_key: Specifies the SSH public key used to access the - operating system. - :type ssh_public_key: str - """ - - _validation = { - 'os_type': {'readonly': True}, - 'version': {'readonly': True}, - } - - _attribute_map = { - 'computer_name': {'key': 'computerName', 'type': 'str'}, - 'os_type': {'key': 'osType', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'location': {'key': 'location', 'type': 'str'}, } def __init__(self, **kwargs): - super(OSProfile, self).__init__(**kwargs) - self.computer_name = kwargs.get('computer_name', None) - self.os_type = None - self.version = None - self.ssh_public_key = kwargs.get('ssh_public_key', None) + super(TrackedResource, self).__init__(**kwargs) + self.tags = kwargs.get('tags', None) + self.location = kwargs.get('location', None) -class SapMonitor(Resource): +class SapMonitor(TrackedResource): """SAP monitor info on Azure (ARM properties and SAP monitor properties). Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Resource ID + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} :vartype id: str - :ivar name: Resource name + :ivar name: The name of the resource :vartype name: str - :ivar type: Resource type + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. :vartype type: str - :param location: Resource location + :param tags: Resource tags. + :type tags: dict[str, str] + :param location: The Azure Region where the resource lives :type location: str - :ivar tags: Resource tags - :vartype tags: dict[str, str] - :param hana_subnet: Specifies the SAP monitor unique ID. - :type hana_subnet: str - :param hana_hostname: Hostname of the HANA instance. - :type hana_hostname: str - :param hana_db_name: Database name of the HANA instance. - :type hana_db_name: str - :param hana_db_sql_port: Database port of the HANA instance. - :type hana_db_sql_port: int - :param hana_db_username: Database username of the HANA instance. - :type hana_db_username: str - :param hana_db_password: Database password of the HANA instance. - :type hana_db_password: str - :param hana_db_password_key_vault_url: KeyVault URL link to the password - for the HANA database. - :type hana_db_password_key_vault_url: str - :param hana_db_credentials_msi_id: MSI ID passed by customer which has - access to customer's KeyVault and to be assigned to the Collector VM. - :type hana_db_credentials_msi_id: str - :param key_vault_id: Key Vault ID containing customer's HANA credentials. - :type key_vault_id: str :ivar provisioning_state: State of provisioning of the HanaInstance. Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', 'Succeeded', 'Deleting', 'Migrating' @@ -520,91 +337,55 @@ class SapMonitor(Resource): :param log_analytics_workspace_shared_key: The shared key of the log analytics workspace that is used for monitoring :type log_analytics_workspace_shared_key: str + :ivar sap_monitor_collector_version: The version of the payload running in + the Collector VM + :vartype sap_monitor_collector_version: str + :param monitor_subnet: The subnet which the SAP monitor will be deployed + in + :type monitor_subnet: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, - 'tags': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'managed_resource_group_name': {'readonly': True}, + 'sap_monitor_collector_version': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, - 'hana_subnet': {'key': 'properties.hanaSubnet', 'type': 'str'}, - 'hana_hostname': {'key': 'properties.hanaHostname', 'type': 'str'}, - 'hana_db_name': {'key': 'properties.hanaDbName', 'type': 'str'}, - 'hana_db_sql_port': {'key': 'properties.hanaDbSqlPort', 'type': 'int'}, - 'hana_db_username': {'key': 'properties.hanaDbUsername', 'type': 'str'}, - 'hana_db_password': {'key': 'properties.hanaDbPassword', 'type': 'str'}, - 'hana_db_password_key_vault_url': {'key': 'properties.hanaDbPasswordKeyVaultUrl', 'type': 'str'}, - 'hana_db_credentials_msi_id': {'key': 'properties.hanaDbCredentialsMsiId', 'type': 'str'}, - 'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'managed_resource_group_name': {'key': 'properties.managedResourceGroupName', 'type': 'str'}, 'log_analytics_workspace_arm_id': {'key': 'properties.logAnalyticsWorkspaceArmId', 'type': 'str'}, 'enable_customer_analytics': {'key': 'properties.enableCustomerAnalytics', 'type': 'bool'}, 'log_analytics_workspace_id': {'key': 'properties.logAnalyticsWorkspaceId', 'type': 'str'}, 'log_analytics_workspace_shared_key': {'key': 'properties.logAnalyticsWorkspaceSharedKey', 'type': 'str'}, + 'sap_monitor_collector_version': {'key': 'properties.sapMonitorCollectorVersion', 'type': 'str'}, + 'monitor_subnet': {'key': 'properties.monitorSubnet', 'type': 'str'}, } def __init__(self, **kwargs): super(SapMonitor, self).__init__(**kwargs) - self.hana_subnet = kwargs.get('hana_subnet', None) - self.hana_hostname = kwargs.get('hana_hostname', None) - self.hana_db_name = kwargs.get('hana_db_name', None) - self.hana_db_sql_port = kwargs.get('hana_db_sql_port', None) - self.hana_db_username = kwargs.get('hana_db_username', None) - self.hana_db_password = kwargs.get('hana_db_password', None) - self.hana_db_password_key_vault_url = kwargs.get('hana_db_password_key_vault_url', None) - self.hana_db_credentials_msi_id = kwargs.get('hana_db_credentials_msi_id', None) - self.key_vault_id = kwargs.get('key_vault_id', None) self.provisioning_state = None self.managed_resource_group_name = None self.log_analytics_workspace_arm_id = kwargs.get('log_analytics_workspace_arm_id', None) self.enable_customer_analytics = kwargs.get('enable_customer_analytics', None) self.log_analytics_workspace_id = kwargs.get('log_analytics_workspace_id', None) self.log_analytics_workspace_shared_key = kwargs.get('log_analytics_workspace_shared_key', None) - - -class StorageProfile(Model): - """Specifies the storage settings for the HANA instance disks. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar nfs_ip_address: IP Address to connect to storage. - :vartype nfs_ip_address: str - :param os_disks: Specifies information about the operating system disk - used by the hana instance. - :type os_disks: list[~azure.mgmt.hanaonazure.models.Disk] - """ - - _validation = { - 'nfs_ip_address': {'readonly': True}, - } - - _attribute_map = { - 'nfs_ip_address': {'key': 'nfsIpAddress', 'type': 'str'}, - 'os_disks': {'key': 'osDisks', 'type': '[Disk]'}, - } - - def __init__(self, **kwargs): - super(StorageProfile, self).__init__(**kwargs) - self.nfs_ip_address = None - self.os_disks = kwargs.get('os_disks', None) + self.sap_monitor_collector_version = None + self.monitor_subnet = kwargs.get('monitor_subnet', None) class Tags(Model): - """Tags field of the HANA instance. + """Tags field of the resource. - :param tags: Tags field of the HANA instance. + :param tags: Tags field of the resource. :type tags: dict[str, str] """ diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models_py3.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models_py3.py index 71c6f2d05e96..a69a56494bd2 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models_py3.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_models_py3.py @@ -21,40 +21,6 @@ class CloudError(Model): } -class Disk(Model): - """Specifies the disk information fo the HANA instance. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param name: The disk name. - :type name: str - :param disk_size_gb: Specifies the size of an empty data disk in - gigabytes. - :type disk_size_gb: int - :ivar lun: Specifies the logical unit number of the data disk. This value - is used to identify data disks within the VM and therefore must be unique - for each data disk attached to a VM. - :vartype lun: int - """ - - _validation = { - 'lun': {'readonly': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'}, - 'lun': {'key': 'lun', 'type': 'int'}, - } - - def __init__(self, *, name: str=None, disk_size_gb: int=None, **kwargs) -> None: - super(Disk, self).__init__(**kwargs) - self.name = name - self.disk_size_gb = disk_size_gb - self.lun = None - - class Display(Model): """Detailed HANA operation information. @@ -142,364 +108,215 @@ def __init__(self, deserialize, response, *args): super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args) -class Resource(Model): - """The resource model definition. +class Operation(Model): + """HANA operation information. Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Resource ID - :vartype id: str - :ivar name: Resource name + :ivar name: The name of the operation being performed on this particular + object. This name should match the action name that appears in RBAC / the + event service. :vartype name: str - :ivar type: Resource type - :vartype type: str - :param location: Resource location - :type location: str - :ivar tags: Resource tags - :vartype tags: dict[str, str] + :param display: Displayed HANA operation information + :type display: ~azure.mgmt.hanaonazure.models.Display """ _validation = { - 'id': {'readonly': True}, 'name': {'readonly': True}, - 'type': {'readonly': True}, - 'tags': {'readonly': True}, } _attribute_map = { - 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, + 'display': {'key': 'display', 'type': 'Display'}, } - def __init__(self, *, location: str=None, **kwargs) -> None: - super(Resource, self).__init__(**kwargs) - self.id = None + def __init__(self, *, display=None, **kwargs) -> None: + super(Operation, self).__init__(**kwargs) self.name = None - self.type = None - self.location = location - self.tags = None + self.display = display -class HanaInstance(Resource): - """HANA instance info on Azure (ARM properties and HANA properties). +class Resource(Model): + """The core properties of ARM resources. Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Resource ID + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} :vartype id: str - :ivar name: Resource name + :ivar name: The name of the resource :vartype name: str - :ivar type: Resource type + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. :vartype type: str - :param location: Resource location - :type location: str - :ivar tags: Resource tags - :vartype tags: dict[str, str] - :param hardware_profile: Specifies the hardware settings for the HANA - instance. - :type hardware_profile: ~azure.mgmt.hanaonazure.models.HardwareProfile - :param storage_profile: Specifies the storage settings for the HANA - instance disks. - :type storage_profile: ~azure.mgmt.hanaonazure.models.StorageProfile - :param os_profile: Specifies the operating system settings for the HANA - instance. - :type os_profile: ~azure.mgmt.hanaonazure.models.OSProfile - :param network_profile: Specifies the network settings for the HANA - instance. - :type network_profile: ~azure.mgmt.hanaonazure.models.NetworkProfile - :ivar hana_instance_id: Specifies the HANA instance unique ID. - :vartype hana_instance_id: str - :ivar power_state: Resource power state. Possible values include: - 'starting', 'started', 'stopping', 'stopped', 'restarting', 'unknown' - :vartype power_state: str or - ~azure.mgmt.hanaonazure.models.HanaInstancePowerStateEnum - :ivar proximity_placement_group: Resource proximity placement group - :vartype proximity_placement_group: str - :ivar hw_revision: Hardware revision of a HANA instance - :vartype hw_revision: str - :param partner_node_id: ARM ID of another HanaInstance that will share a - network with this HanaInstance - :type partner_node_id: str - :ivar provisioning_state: State of provisioning of the HanaInstance. - Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', - 'Succeeded', 'Deleting', 'Migrating' - :vartype provisioning_state: str or - ~azure.mgmt.hanaonazure.models.HanaProvisioningStatesEnum """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, - 'tags': {'readonly': True}, - 'hana_instance_id': {'readonly': True}, - 'power_state': {'readonly': True}, - 'proximity_placement_group': {'readonly': True}, - 'hw_revision': {'readonly': True}, - 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, - 'tags': {'key': 'tags', 'type': '{str}'}, - 'hardware_profile': {'key': 'properties.hardwareProfile', 'type': 'HardwareProfile'}, - 'storage_profile': {'key': 'properties.storageProfile', 'type': 'StorageProfile'}, - 'os_profile': {'key': 'properties.osProfile', 'type': 'OSProfile'}, - 'network_profile': {'key': 'properties.networkProfile', 'type': 'NetworkProfile'}, - 'hana_instance_id': {'key': 'properties.hanaInstanceId', 'type': 'str'}, - 'power_state': {'key': 'properties.powerState', 'type': 'str'}, - 'proximity_placement_group': {'key': 'properties.proximityPlacementGroup', 'type': 'str'}, - 'hw_revision': {'key': 'properties.hwRevision', 'type': 'str'}, - 'partner_node_id': {'key': 'properties.partnerNodeId', 'type': 'str'}, - 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } - def __init__(self, *, location: str=None, hardware_profile=None, storage_profile=None, os_profile=None, network_profile=None, partner_node_id: str=None, **kwargs) -> None: - super(HanaInstance, self).__init__(location=location, **kwargs) - self.hardware_profile = hardware_profile - self.storage_profile = storage_profile - self.os_profile = os_profile - self.network_profile = network_profile - self.hana_instance_id = None - self.power_state = None - self.proximity_placement_group = None - self.hw_revision = None - self.partner_node_id = partner_node_id - self.provisioning_state = None + def __init__(self, **kwargs) -> None: + super(Resource, self).__init__(**kwargs) + self.id = None + self.name = None + self.type = None -class HardwareProfile(Model): - """Specifies the hardware settings for the HANA instance. +class ProxyResource(Resource): + """The resource model definition for a ARM proxy resource. It will have + everything other than required location and tags. Variables are only populated by the server, and will be ignored when sending a request. - :ivar hardware_type: Name of the hardware type (vendor and/or their - product name). Possible values include: 'Cisco_UCS', 'HPE' - :vartype hardware_type: str or - ~azure.mgmt.hanaonazure.models.HanaHardwareTypeNamesEnum - :ivar hana_instance_size: Specifies the HANA instance SKU. Possible values - include: 'S72m', 'S144m', 'S72', 'S144', 'S192', 'S192m', 'S192xm', 'S96', - 'S112', 'S224', 'S224m', 'S224om', 'S224oo', 'S224oom', 'S224ooo', 'S384', - 'S384m', 'S384xm', 'S384xxm', 'S448', 'S448m', 'S448om', 'S448oo', - 'S448oom', 'S448ooo', 'S576m', 'S576xm', 'S672', 'S672m', 'S672om', - 'S672oo', 'S672oom', 'S672ooo', 'S768', 'S768m', 'S768xm', 'S896', - 'S896m', 'S896om', 'S896oo', 'S896oom', 'S896ooo', 'S960m' - :vartype hana_instance_size: str or - ~azure.mgmt.hanaonazure.models.HanaInstanceSizeNamesEnum + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} + :vartype id: str + :ivar name: The name of the resource + :vartype name: str + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. + :vartype type: str """ _validation = { - 'hardware_type': {'readonly': True}, - 'hana_instance_size': {'readonly': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, } _attribute_map = { - 'hardware_type': {'key': 'hardwareType', 'type': 'str'}, - 'hana_instance_size': {'key': 'hanaInstanceSize', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, } def __init__(self, **kwargs) -> None: - super(HardwareProfile, self).__init__(**kwargs) - self.hardware_type = None - self.hana_instance_size = None - - -class IpAddress(Model): - """Specifies the IP address of the network interface. - - :param ip_address: Specifies the IP address of the network interface. - :type ip_address: str - """ - - _attribute_map = { - 'ip_address': {'key': 'ipAddress', 'type': 'str'}, - } - - def __init__(self, *, ip_address: str=None, **kwargs) -> None: - super(IpAddress, self).__init__(**kwargs) - self.ip_address = ip_address - - -class MonitoringDetails(Model): - """Details needed to monitor a Hana Instance. + super(ProxyResource, self).__init__(**kwargs) - :param hana_subnet: ARM ID of an Azure Subnet with access to the HANA - instance. - :type hana_subnet: str - :param hana_hostname: Hostname of the HANA Instance blade. - :type hana_hostname: str - :param hana_db_name: Name of the database itself. - :type hana_db_name: str - :param hana_db_sql_port: The port number of the tenant DB. Used to connect - to the DB. - :type hana_db_sql_port: int - :param hana_db_username: Username for the HANA database to login to for - monitoring - :type hana_db_username: str - :param hana_db_password: Password for the HANA database to login for - monitoring - :type hana_db_password: str - """ - - _attribute_map = { - 'hana_subnet': {'key': 'hanaSubnet', 'type': 'str'}, - 'hana_hostname': {'key': 'hanaHostname', 'type': 'str'}, - 'hana_db_name': {'key': 'hanaDbName', 'type': 'str'}, - 'hana_db_sql_port': {'key': 'hanaDbSqlPort', 'type': 'int'}, - 'hana_db_username': {'key': 'hanaDbUsername', 'type': 'str'}, - 'hana_db_password': {'key': 'hanaDbPassword', 'type': 'str'}, - } - def __init__(self, *, hana_subnet: str=None, hana_hostname: str=None, hana_db_name: str=None, hana_db_sql_port: int=None, hana_db_username: str=None, hana_db_password: str=None, **kwargs) -> None: - super(MonitoringDetails, self).__init__(**kwargs) - self.hana_subnet = hana_subnet - self.hana_hostname = hana_hostname - self.hana_db_name = hana_db_name - self.hana_db_sql_port = hana_db_sql_port - self.hana_db_username = hana_db_username - self.hana_db_password = hana_db_password - - -class NetworkProfile(Model): - """Specifies the network settings for the HANA instance disks. +class ProviderInstance(ProxyResource): + """A provider instance associated with a SAP monitor. Variables are only populated by the server, and will be ignored when sending a request. - :param network_interfaces: Specifies the network interfaces for the HANA + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} + :vartype id: str + :ivar name: The name of the resource + :vartype name: str + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. + :vartype type: str + :param provider_instance_type: The type of provider instance. + :type provider_instance_type: str + :param properties: A JSON string containing the properties of the provider instance. - :type network_interfaces: list[~azure.mgmt.hanaonazure.models.IpAddress] - :ivar circuit_id: Specifies the circuit id for connecting to express - route. - :vartype circuit_id: str + :type properties: str + :param metadata: A JSON string containing metadata of the provider + instance. + :type metadata: str + :ivar provisioning_state: State of provisioning of the provider instance. + Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', + 'Succeeded', 'Deleting', 'Migrating' + :vartype provisioning_state: str or + ~azure.mgmt.hanaonazure.models.HanaProvisioningStatesEnum """ _validation = { - 'circuit_id': {'readonly': True}, + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'provisioning_state': {'readonly': True}, } _attribute_map = { - 'network_interfaces': {'key': 'networkInterfaces', 'type': '[IpAddress]'}, - 'circuit_id': {'key': 'circuitId', 'type': 'str'}, + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'provider_instance_type': {'key': 'properties.type', 'type': 'str'}, + 'properties': {'key': 'properties.properties', 'type': 'str'}, + 'metadata': {'key': 'properties.metadata', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } - def __init__(self, *, network_interfaces=None, **kwargs) -> None: - super(NetworkProfile, self).__init__(**kwargs) - self.network_interfaces = network_interfaces - self.circuit_id = None + def __init__(self, *, provider_instance_type: str=None, properties: str=None, metadata: str=None, **kwargs) -> None: + super(ProviderInstance, self).__init__(**kwargs) + self.provider_instance_type = provider_instance_type + self.properties = properties + self.metadata = metadata + self.provisioning_state = None -class Operation(Model): - """HANA operation information. +class TrackedResource(Resource): + """The resource model definition for a ARM tracked top level resource. Variables are only populated by the server, and will be ignored when sending a request. - :ivar name: The name of the operation being performed on this particular - object. This name should match the action name that appears in RBAC / the - event service. + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} + :vartype id: str + :ivar name: The name of the resource :vartype name: str - :param display: Displayed HANA operation information - :type display: ~azure.mgmt.hanaonazure.models.Display + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. + :vartype type: str + :param tags: Resource tags. + :type tags: dict[str, str] + :param location: The Azure Region where the resource lives + :type location: str """ _validation = { + 'id': {'readonly': True}, 'name': {'readonly': True}, + 'type': {'readonly': True}, } _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'display': {'key': 'display', 'type': 'Display'}, - } - - def __init__(self, *, display=None, **kwargs) -> None: - super(Operation, self).__init__(**kwargs) - self.name = None - self.display = display - - -class OSProfile(Model): - """Specifies the operating system settings for the HANA instance. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :param computer_name: Specifies the host OS name of the HANA instance. - :type computer_name: str - :ivar os_type: This property allows you to specify the type of the OS. - :vartype os_type: str - :ivar version: Specifies version of operating system. - :vartype version: str - :param ssh_public_key: Specifies the SSH public key used to access the - operating system. - :type ssh_public_key: str - """ - - _validation = { - 'os_type': {'readonly': True}, - 'version': {'readonly': True}, - } - - _attribute_map = { - 'computer_name': {'key': 'computerName', 'type': 'str'}, - 'os_type': {'key': 'osType', 'type': 'str'}, - 'version': {'key': 'version', 'type': 'str'}, - 'ssh_public_key': {'key': 'sshPublicKey', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'tags': {'key': 'tags', 'type': '{str}'}, + 'location': {'key': 'location', 'type': 'str'}, } - def __init__(self, *, computer_name: str=None, ssh_public_key: str=None, **kwargs) -> None: - super(OSProfile, self).__init__(**kwargs) - self.computer_name = computer_name - self.os_type = None - self.version = None - self.ssh_public_key = ssh_public_key + def __init__(self, *, tags=None, location: str=None, **kwargs) -> None: + super(TrackedResource, self).__init__(**kwargs) + self.tags = tags + self.location = location -class SapMonitor(Resource): +class SapMonitor(TrackedResource): """SAP monitor info on Azure (ARM properties and SAP monitor properties). Variables are only populated by the server, and will be ignored when sending a request. - :ivar id: Resource ID + :ivar id: Fully qualified resource Id for the resource. Ex - + /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficManagerProfiles/{resourceName} :vartype id: str - :ivar name: Resource name + :ivar name: The name of the resource :vartype name: str - :ivar type: Resource type + :ivar type: The type of the resource. Ex- + Microsoft.Network/trafficManagerProfiles. :vartype type: str - :param location: Resource location + :param tags: Resource tags. + :type tags: dict[str, str] + :param location: The Azure Region where the resource lives :type location: str - :ivar tags: Resource tags - :vartype tags: dict[str, str] - :param hana_subnet: Specifies the SAP monitor unique ID. - :type hana_subnet: str - :param hana_hostname: Hostname of the HANA instance. - :type hana_hostname: str - :param hana_db_name: Database name of the HANA instance. - :type hana_db_name: str - :param hana_db_sql_port: Database port of the HANA instance. - :type hana_db_sql_port: int - :param hana_db_username: Database username of the HANA instance. - :type hana_db_username: str - :param hana_db_password: Database password of the HANA instance. - :type hana_db_password: str - :param hana_db_password_key_vault_url: KeyVault URL link to the password - for the HANA database. - :type hana_db_password_key_vault_url: str - :param hana_db_credentials_msi_id: MSI ID passed by customer which has - access to customer's KeyVault and to be assigned to the Collector VM. - :type hana_db_credentials_msi_id: str - :param key_vault_id: Key Vault ID containing customer's HANA credentials. - :type key_vault_id: str :ivar provisioning_state: State of provisioning of the HanaInstance. Possible values include: 'Accepted', 'Creating', 'Updating', 'Failed', 'Succeeded', 'Deleting', 'Migrating' @@ -520,91 +337,55 @@ class SapMonitor(Resource): :param log_analytics_workspace_shared_key: The shared key of the log analytics workspace that is used for monitoring :type log_analytics_workspace_shared_key: str + :ivar sap_monitor_collector_version: The version of the payload running in + the Collector VM + :vartype sap_monitor_collector_version: str + :param monitor_subnet: The subnet which the SAP monitor will be deployed + in + :type monitor_subnet: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, - 'tags': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'managed_resource_group_name': {'readonly': True}, + 'sap_monitor_collector_version': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, - 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, - 'hana_subnet': {'key': 'properties.hanaSubnet', 'type': 'str'}, - 'hana_hostname': {'key': 'properties.hanaHostname', 'type': 'str'}, - 'hana_db_name': {'key': 'properties.hanaDbName', 'type': 'str'}, - 'hana_db_sql_port': {'key': 'properties.hanaDbSqlPort', 'type': 'int'}, - 'hana_db_username': {'key': 'properties.hanaDbUsername', 'type': 'str'}, - 'hana_db_password': {'key': 'properties.hanaDbPassword', 'type': 'str'}, - 'hana_db_password_key_vault_url': {'key': 'properties.hanaDbPasswordKeyVaultUrl', 'type': 'str'}, - 'hana_db_credentials_msi_id': {'key': 'properties.hanaDbCredentialsMsiId', 'type': 'str'}, - 'key_vault_id': {'key': 'properties.keyVaultId', 'type': 'str'}, + 'location': {'key': 'location', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'managed_resource_group_name': {'key': 'properties.managedResourceGroupName', 'type': 'str'}, 'log_analytics_workspace_arm_id': {'key': 'properties.logAnalyticsWorkspaceArmId', 'type': 'str'}, 'enable_customer_analytics': {'key': 'properties.enableCustomerAnalytics', 'type': 'bool'}, 'log_analytics_workspace_id': {'key': 'properties.logAnalyticsWorkspaceId', 'type': 'str'}, 'log_analytics_workspace_shared_key': {'key': 'properties.logAnalyticsWorkspaceSharedKey', 'type': 'str'}, + 'sap_monitor_collector_version': {'key': 'properties.sapMonitorCollectorVersion', 'type': 'str'}, + 'monitor_subnet': {'key': 'properties.monitorSubnet', 'type': 'str'}, } - def __init__(self, *, location: str=None, hana_subnet: str=None, hana_hostname: str=None, hana_db_name: str=None, hana_db_sql_port: int=None, hana_db_username: str=None, hana_db_password: str=None, hana_db_password_key_vault_url: str=None, hana_db_credentials_msi_id: str=None, key_vault_id: str=None, log_analytics_workspace_arm_id: str=None, enable_customer_analytics: bool=None, log_analytics_workspace_id: str=None, log_analytics_workspace_shared_key: str=None, **kwargs) -> None: - super(SapMonitor, self).__init__(location=location, **kwargs) - self.hana_subnet = hana_subnet - self.hana_hostname = hana_hostname - self.hana_db_name = hana_db_name - self.hana_db_sql_port = hana_db_sql_port - self.hana_db_username = hana_db_username - self.hana_db_password = hana_db_password - self.hana_db_password_key_vault_url = hana_db_password_key_vault_url - self.hana_db_credentials_msi_id = hana_db_credentials_msi_id - self.key_vault_id = key_vault_id + def __init__(self, *, tags=None, location: str=None, log_analytics_workspace_arm_id: str=None, enable_customer_analytics: bool=None, log_analytics_workspace_id: str=None, log_analytics_workspace_shared_key: str=None, monitor_subnet: str=None, **kwargs) -> None: + super(SapMonitor, self).__init__(tags=tags, location=location, **kwargs) self.provisioning_state = None self.managed_resource_group_name = None self.log_analytics_workspace_arm_id = log_analytics_workspace_arm_id self.enable_customer_analytics = enable_customer_analytics self.log_analytics_workspace_id = log_analytics_workspace_id self.log_analytics_workspace_shared_key = log_analytics_workspace_shared_key - - -class StorageProfile(Model): - """Specifies the storage settings for the HANA instance disks. - - Variables are only populated by the server, and will be ignored when - sending a request. - - :ivar nfs_ip_address: IP Address to connect to storage. - :vartype nfs_ip_address: str - :param os_disks: Specifies information about the operating system disk - used by the hana instance. - :type os_disks: list[~azure.mgmt.hanaonazure.models.Disk] - """ - - _validation = { - 'nfs_ip_address': {'readonly': True}, - } - - _attribute_map = { - 'nfs_ip_address': {'key': 'nfsIpAddress', 'type': 'str'}, - 'os_disks': {'key': 'osDisks', 'type': '[Disk]'}, - } - - def __init__(self, *, os_disks=None, **kwargs) -> None: - super(StorageProfile, self).__init__(**kwargs) - self.nfs_ip_address = None - self.os_disks = os_disks + self.sap_monitor_collector_version = None + self.monitor_subnet = monitor_subnet class Tags(Model): - """Tags field of the HANA instance. + """Tags field of the resource. - :param tags: Tags field of the HANA instance. + :param tags: Tags field of the resource. :type tags: dict[str, str] """ diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_paged_models.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_paged_models.py index 874542d2dcaf..3d6078986be8 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_paged_models.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/models/_paged_models.py @@ -25,29 +25,29 @@ class OperationPaged(Paged): def __init__(self, *args, **kwargs): super(OperationPaged, self).__init__(*args, **kwargs) -class HanaInstancePaged(Paged): +class SapMonitorPaged(Paged): """ - A paging container for iterating over a list of :class:`HanaInstance ` object + A paging container for iterating over a list of :class:`SapMonitor ` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[HanaInstance]'} + 'current_page': {'key': 'value', 'type': '[SapMonitor]'} } def __init__(self, *args, **kwargs): - super(HanaInstancePaged, self).__init__(*args, **kwargs) -class SapMonitorPaged(Paged): + super(SapMonitorPaged, self).__init__(*args, **kwargs) +class ProviderInstancePaged(Paged): """ - A paging container for iterating over a list of :class:`SapMonitor ` object + A paging container for iterating over a list of :class:`ProviderInstance ` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[SapMonitor]'} + 'current_page': {'key': 'value', 'type': '[ProviderInstance]'} } def __init__(self, *args, **kwargs): - super(SapMonitorPaged, self).__init__(*args, **kwargs) + super(ProviderInstancePaged, self).__init__(*args, **kwargs) diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/__init__.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/__init__.py index 03aed444efe9..729ae15a1600 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/__init__.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/__init__.py @@ -10,11 +10,11 @@ # -------------------------------------------------------------------------- from ._operations import Operations -from ._hana_instances_operations import HanaInstancesOperations from ._sap_monitors_operations import SapMonitorsOperations +from ._provider_instances_operations import ProviderInstancesOperations __all__ = [ 'Operations', - 'HanaInstancesOperations', 'SapMonitorsOperations', + 'ProviderInstancesOperations', ] diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_hana_instances_operations.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_hana_instances_operations.py deleted file mode 100644 index 2165693784cd..000000000000 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_hana_instances_operations.py +++ /dev/null @@ -1,738 +0,0 @@ -# coding=utf-8 -# -------------------------------------------------------------------------- -# Copyright (c) Microsoft Corporation. All rights reserved. -# Licensed under the MIT License. See License.txt in the project root for -# license information. -# -# Code generated by Microsoft (R) AutoRest Code Generator. -# Changes may cause incorrect behavior and will be lost if the code is -# regenerated. -# -------------------------------------------------------------------------- - -import uuid -from msrest.pipeline import ClientRawResponse -from msrest.polling import LROPoller, NoPolling -from msrestazure.polling.arm_polling import ARMPolling - -from .. import models - - -class HanaInstancesOperations(object): - """HanaInstancesOperations operations. - - You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. - - :param client: Client for service requests. - :param config: Configuration of service client. - :param serializer: An object model serializer. - :param deserializer: An object model deserializer. - :ivar api_version: Client API version. Constant value: "2017-11-03-preview". - """ - - models = models - - def __init__(self, client, config, serializer, deserializer): - - self._client = client - self._serialize = serializer - self._deserialize = deserializer - self.api_version = "2017-11-03-preview" - - self.config = config - - def list( - self, custom_headers=None, raw=False, **operation_config): - """Gets a list of SAP HANA instances in the specified subscription. - - Gets a list of SAP HANA instances in the specified subscription. The - operations returns various properties of each SAP HANA on Azure - instance. - - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of HanaInstance - :rtype: - ~azure.mgmt.hanaonazure.models.HanaInstancePaged[~azure.mgmt.hanaonazure.models.HanaInstance] - :raises: - :class:`ErrorResponseException` - """ - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ErrorResponseException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.HanaInstancePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.HanaOnAzure/hanaInstances'} - - def list_by_resource_group( - self, resource_group_name, custom_headers=None, raw=False, **operation_config): - """Gets a list of SAP HANA instances in the specified subscription and the - resource group. - - Gets a list of SAP HANA instances in the specified subscription and the - resource group. The operations returns various properties of each SAP - HANA on Azure instance. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of HanaInstance - :rtype: - ~azure.mgmt.hanaonazure.models.HanaInstancePaged[~azure.mgmt.hanaonazure.models.HanaInstance] - :raises: - :class:`ErrorResponseException` - """ - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_by_resource_group.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ErrorResponseException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.HanaInstancePaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances'} - - def get( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, **operation_config): - """Gets properties of a SAP HANA instance. - - Gets properties of a SAP HANA instance for the specified subscription, - resource group, and instance name. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: HanaInstance or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.hanaonazure.models.HanaInstance or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ErrorResponseException` - """ - # Construct URL - url = self.get.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ErrorResponseException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('HanaInstance', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}'} - - - def _create_initial( - self, resource_group_name, hana_instance_name, hana_instance_parameter, custom_headers=None, raw=False, **operation_config): - # Construct URL - url = self.create.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct body - body_content = self._serialize.body(hana_instance_parameter, 'HanaInstance') - - # Construct and send request - request = self._client.put(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 201]: - raise models.ErrorResponseException(self._deserialize, response) - - deserialized = None - - if response.status_code == 200: - deserialized = self._deserialize('HanaInstance', response) - if response.status_code == 201: - deserialized = self._deserialize('HanaInstance', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - - def create( - self, resource_group_name, hana_instance_name, hana_instance_parameter, custom_headers=None, raw=False, polling=True, **operation_config): - """Creates a SAP HANA instance. - - Creates a SAP HANA instance for the specified subscription, resource - group, and instance name. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param hana_instance_parameter: Request body representing a - HanaInstance - :type hana_instance_parameter: - ~azure.mgmt.hanaonazure.models.HanaInstance - :param dict custom_headers: headers that will be added to the request - :param bool raw: The poller return type is ClientRawResponse, the - direct response alongside the deserialized response - :param polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy - :return: An instance of LROPoller that returns HanaInstance or - ClientRawResponse if raw==True - :rtype: - ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.hanaonazure.models.HanaInstance] - or - ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.hanaonazure.models.HanaInstance]] - :raises: - :class:`ErrorResponseException` - """ - raw_result = self._create_initial( - resource_group_name=resource_group_name, - hana_instance_name=hana_instance_name, - hana_instance_parameter=hana_instance_parameter, - custom_headers=custom_headers, - raw=True, - **operation_config - ) - - def get_long_running_output(response): - deserialized = self._deserialize('HanaInstance', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - - lro_delay = operation_config.get( - 'long_running_operation_timeout', - self.config.long_running_operation_timeout) - if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) - elif polling is False: polling_method = NoPolling() - else: polling_method = polling - return LROPoller(self._client, raw_result, get_long_running_output, polling_method) - create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}'} - - - def _delete_initial( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, **operation_config): - # Construct URL - url = self.delete.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.delete(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 202, 204]: - raise models.ErrorResponseException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - def delete( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, polling=True, **operation_config): - """Deletes a SAP HANA instance. - - Deletes a SAP HANA instance with the specified subscription, resource - group, and instance name. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: The poller return type is ClientRawResponse, the - direct response alongside the deserialized response - :param polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy - :return: An instance of LROPoller that returns None or - ClientRawResponse if raw==True - :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] - :raises: - :class:`ErrorResponseException` - """ - raw_result = self._delete_initial( - resource_group_name=resource_group_name, - hana_instance_name=hana_instance_name, - custom_headers=custom_headers, - raw=True, - **operation_config - ) - - def get_long_running_output(response): - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - lro_delay = operation_config.get( - 'long_running_operation_timeout', - self.config.long_running_operation_timeout) - if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) - elif polling is False: polling_method = NoPolling() - else: polling_method = polling - return LROPoller(self._client, raw_result, get_long_running_output, polling_method) - delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}'} - - def update( - self, resource_group_name, hana_instance_name, tags=None, custom_headers=None, raw=False, **operation_config): - """Patches the Tags field of a SAP HANA instance. - - Patches the Tags field of a SAP HANA instance for the specified - subscription, resource group, and instance name. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param tags: Tags field of the HANA instance. - :type tags: dict[str, str] - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: HanaInstance or ClientRawResponse if raw=true - :rtype: ~azure.mgmt.hanaonazure.models.HanaInstance or - ~msrest.pipeline.ClientRawResponse - :raises: - :class:`ErrorResponseException` - """ - tags_parameter = models.Tags(tags=tags) - - # Construct URL - url = self.update.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = 'application/json; charset=utf-8' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct body - body_content = self._serialize.body(tags_parameter, 'Tags') - - # Construct and send request - request = self._client.patch(url, query_parameters, header_parameters, body_content) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.ErrorResponseException(self._deserialize, response) - - deserialized = None - if response.status_code == 200: - deserialized = self._deserialize('HanaInstance', response) - - if raw: - client_raw_response = ClientRawResponse(deserialized, response) - return client_raw_response - - return deserialized - update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}'} - - - def _restart_initial( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, **operation_config): - # Construct URL - url = self.restart.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 202]: - raise models.ErrorResponseException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - def restart( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, polling=True, **operation_config): - """The operation to restart a SAP HANA instance. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: The poller return type is ClientRawResponse, the - direct response alongside the deserialized response - :param polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy - :return: An instance of LROPoller that returns None or - ClientRawResponse if raw==True - :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] - :raises: - :class:`ErrorResponseException` - """ - raw_result = self._restart_initial( - resource_group_name=resource_group_name, - hana_instance_name=hana_instance_name, - custom_headers=custom_headers, - raw=True, - **operation_config - ) - - def get_long_running_output(response): - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - lro_delay = operation_config.get( - 'long_running_operation_timeout', - self.config.long_running_operation_timeout) - if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) - elif polling is False: polling_method = NoPolling() - else: polling_method = polling - return LROPoller(self._client, raw_result, get_long_running_output, polling_method) - restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}/restart'} - - - def _start_initial( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, **operation_config): - # Construct URL - url = self.start.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 202]: - raise models.ErrorResponseException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - def start( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, polling=True, **operation_config): - """The operation to start a SAP HANA instance. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: The poller return type is ClientRawResponse, the - direct response alongside the deserialized response - :param polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy - :return: An instance of LROPoller that returns None or - ClientRawResponse if raw==True - :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] - :raises: - :class:`ErrorResponseException` - """ - raw_result = self._start_initial( - resource_group_name=resource_group_name, - hana_instance_name=hana_instance_name, - custom_headers=custom_headers, - raw=True, - **operation_config - ) - - def get_long_running_output(response): - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - lro_delay = operation_config.get( - 'long_running_operation_timeout', - self.config.long_running_operation_timeout) - if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) - elif polling is False: polling_method = NoPolling() - else: polling_method = polling - return LROPoller(self._client, raw_result, get_long_running_output, polling_method) - start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}/start'} - - - def _shutdown_initial( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, **operation_config): - # Construct URL - url = self.shutdown.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'hanaInstanceName': self._serialize.url("hana_instance_name", hana_instance_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - - # Construct headers - header_parameters = {} - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.post(url, query_parameters, header_parameters) - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200, 202]: - raise models.ErrorResponseException(self._deserialize, response) - - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - def shutdown( - self, resource_group_name, hana_instance_name, custom_headers=None, raw=False, polling=True, **operation_config): - """The operation to shutdown a SAP HANA instance. - - :param resource_group_name: Name of the resource group. - :type resource_group_name: str - :param hana_instance_name: Name of the SAP HANA on Azure instance. - :type hana_instance_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: The poller return type is ClientRawResponse, the - direct response alongside the deserialized response - :param polling: True for ARMPolling, False for no polling, or a - polling object for personal polling strategy - :return: An instance of LROPoller that returns None or - ClientRawResponse if raw==True - :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or - ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] - :raises: - :class:`ErrorResponseException` - """ - raw_result = self._shutdown_initial( - resource_group_name=resource_group_name, - hana_instance_name=hana_instance_name, - custom_headers=custom_headers, - raw=True, - **operation_config - ) - - def get_long_running_output(response): - if raw: - client_raw_response = ClientRawResponse(None, response) - return client_raw_response - - lro_delay = operation_config.get( - 'long_running_operation_timeout', - self.config.long_running_operation_timeout) - if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) - elif polling is False: polling_method = NoPolling() - else: polling_method = polling - return LROPoller(self._client, raw_result, get_long_running_output, polling_method) - shutdown.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/hanaInstances/{hanaInstanceName}/shutdown'} diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_operations.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_operations.py index 75c81a3d2b75..3bce38bb487c 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_operations.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_operations.py @@ -24,7 +24,7 @@ class Operations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API version. Constant value: "2017-11-03-preview". + :ivar api_version: Client API version. Constant value: "2020-02-07-preview". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2017-11-03-preview" + self.api_version = "2020-02-07-preview" self.config = config diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_provider_instances_operations.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_provider_instances_operations.py new file mode 100644 index 000000000000..e1cdebe48280 --- /dev/null +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_provider_instances_operations.py @@ -0,0 +1,376 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +import uuid +from msrest.pipeline import ClientRawResponse +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling + +from .. import models + + +class ProviderInstancesOperations(object): + """ProviderInstancesOperations operations. + + You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute. + + :param client: Client for service requests. + :param config: Configuration of service client. + :param serializer: An object model serializer. + :param deserializer: An object model deserializer. + :ivar api_version: Client API version. Constant value: "2020-02-07-preview". + """ + + models = models + + def __init__(self, client, config, serializer, deserializer): + + self._client = client + self._serialize = serializer + self._deserialize = deserializer + self.api_version = "2020-02-07-preview" + + self.config = config + + def list( + self, resource_group_name, sap_monitor_name, custom_headers=None, raw=False, **operation_config): + """Gets a list of provider instances in the specified SAP monitor. + + Gets a list of provider instances in the specified SAP monitor. The + operations returns various properties of each provider instances. + + :param resource_group_name: Name of the resource group. + :type resource_group_name: str + :param sap_monitor_name: Name of the SAP monitor resource. + :type sap_monitor_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ProviderInstance + :rtype: + ~azure.mgmt.hanaonazure.models.ProviderInstancePaged[~azure.mgmt.hanaonazure.models.ProviderInstance] + :raises: + :class:`ErrorResponseException` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'sapMonitorName': self._serialize.url("sap_monitor_name", sap_monitor_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.ProviderInstancePaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}/providerInstances'} + + def get( + self, resource_group_name, sap_monitor_name, provider_instance_name, custom_headers=None, raw=False, **operation_config): + """Gets properties of a provider instance. + + Gets properties of a provider instance for the specified subscription, + resource group, SapMonitor name, and resource name. + + :param resource_group_name: Name of the resource group. + :type resource_group_name: str + :param sap_monitor_name: Name of the SAP monitor resource. + :type sap_monitor_name: str + :param provider_instance_name: Name of the provider instance. + :type provider_instance_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ProviderInstance or ClientRawResponse if raw=true + :rtype: ~azure.mgmt.hanaonazure.models.ProviderInstance or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`ErrorResponseException` + """ + # Construct URL + url = self.get.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'sapMonitorName': self._serialize.url("sap_monitor_name", sap_monitor_name, 'str'), + 'providerInstanceName': self._serialize.url("provider_instance_name", provider_instance_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.ErrorResponseException(self._deserialize, response) + + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('ProviderInstance', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}/providerInstances/{providerInstanceName}'} + + + def _create_initial( + self, resource_group_name, sap_monitor_name, provider_instance_name, provider_instance_parameter, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.create.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'sapMonitorName': self._serialize.url("sap_monitor_name", sap_monitor_name, 'str'), + 'providerInstanceName': self._serialize.url("provider_instance_name", provider_instance_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct body + body_content = self._serialize.body(provider_instance_parameter, 'ProviderInstance') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 201]: + raise models.ErrorResponseException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('ProviderInstance', response) + if response.status_code == 201: + deserialized = self._deserialize('ProviderInstance', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + def create( + self, resource_group_name, sap_monitor_name, provider_instance_name, provider_instance_parameter, custom_headers=None, raw=False, polling=True, **operation_config): + """Creates a provider instance. + + Creates a provider instance for the specified subscription, resource + group, SapMonitor name, and resource name. + + :param resource_group_name: Name of the resource group. + :type resource_group_name: str + :param sap_monitor_name: Name of the SAP monitor resource. + :type sap_monitor_name: str + :param provider_instance_name: Name of the provider instance. + :type provider_instance_name: str + :param provider_instance_parameter: Request body representing a + provider instance + :type provider_instance_parameter: + ~azure.mgmt.hanaonazure.models.ProviderInstance + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns ProviderInstance or + ClientRawResponse if raw==True + :rtype: + ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.hanaonazure.models.ProviderInstance] + or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.hanaonazure.models.ProviderInstance]] + :raises: + :class:`ErrorResponseException` + """ + raw_result = self._create_initial( + resource_group_name=resource_group_name, + sap_monitor_name=sap_monitor_name, + provider_instance_name=provider_instance_name, + provider_instance_parameter=provider_instance_parameter, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + deserialized = self._deserialize('ProviderInstance', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}/providerInstances/{providerInstanceName}'} + + + def _delete_initial( + self, resource_group_name, sap_monitor_name, provider_instance_name, custom_headers=None, raw=False, **operation_config): + # Construct URL + url = self.delete.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'sapMonitorName': self._serialize.url("sap_monitor_name", sap_monitor_name, 'str'), + 'providerInstanceName': self._serialize.url("provider_instance_name", provider_instance_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + + # Construct headers + header_parameters = {} + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.delete(url, query_parameters, header_parameters) + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200, 202, 204]: + raise models.ErrorResponseException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + def delete( + self, resource_group_name, sap_monitor_name, provider_instance_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Deletes a provider instance. + + Deletes a provider instance for the specified subscription, resource + group, SapMonitor name, and resource name. + + :param resource_group_name: Name of the resource group. + :type resource_group_name: str + :param sap_monitor_name: Name of the SAP monitor resource. + :type sap_monitor_name: str + :param provider_instance_name: Name of the provider instance. + :type provider_instance_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: + :class:`ErrorResponseException` + """ + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + sap_monitor_name=sap_monitor_name, + provider_instance_name=provider_instance_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.HanaOnAzure/sapMonitors/{sapMonitorName}/providerInstances/{providerInstanceName}'} diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_sap_monitors_operations.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_sap_monitors_operations.py index 9f7b87beddb9..08e1be3cf936 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_sap_monitors_operations.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/operations/_sap_monitors_operations.py @@ -26,7 +26,7 @@ class SapMonitorsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: Client API version. Constant value: "2017-11-03-preview". + :ivar api_version: Client API version. Constant value: "2020-02-07-preview". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2017-11-03-preview" + self.api_version = "2020-02-07-preview" self.config = config @@ -367,7 +367,7 @@ def update( :type resource_group_name: str :param sap_monitor_name: Name of the SAP monitor resource. :type sap_monitor_name: str - :param tags: Tags field of the HANA instance. + :param tags: Tags field of the resource. :type tags: dict[str, str] :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/version.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/version.py index aeae719acfc3..eba31a2798cf 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/version.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/azure/mgmt/hanaonazure/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "0.13.0" +VERSION = "0.14.0" diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/setup.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/setup.py index 8923708522e7..b0bd60a460e0 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/setup.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/setup.py @@ -36,7 +36,9 @@ pass # Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd: +with open(os.path.join(package_folder_path, 'version.py') + if os.path.exists(os.path.join(package_folder_path, 'version.py')) + else os.path.join(package_folder_path, '_version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) diff --git a/sdk/hanaonazure/azure-mgmt-hanaonazure/tests/test_azure_mgmt_hanaonazure.py b/sdk/hanaonazure/azure-mgmt-hanaonazure/tests/test_azure_mgmt_hanaonazure.py index d751a6d81f70..982d1f3d5626 100644 --- a/sdk/hanaonazure/azure-mgmt-hanaonazure/tests/test_azure_mgmt_hanaonazure.py +++ b/sdk/hanaonazure/azure-mgmt-hanaonazure/tests/test_azure_mgmt_hanaonazure.py @@ -24,27 +24,27 @@ def setUp(self): def process(self, result): pass - def test_hanainstance_list(self): - hanainstances = list(self.hanaonazure_client.hana_instances.list()) - self.assertEqual(len(hanainstances), 3) - - @ResourceGroupPreparer() - def test_hanainstance_list_by_resource_group(self, resource_group): - - resource_group_name = resource_group.name - - hanainstances = list(self.hanaonazure_client.hana_instances.list_by_resource_group(resource_group_name)) - self.assertEqual(len(hanainstances), 3) - - @ResourceGroupPreparer() - def test_hanainstance_get(self, resource_group): - - resource_group_name = resource_group.name - resource_name = "testhanainstanceresourcename" - - hanainstance = self.hanaonazure_client.hana_instances.get(resource_group_name, resource_name) - self.assertEqual(hanainstance.name, resource_name) + #def test_hanainstance_list(self): + # hanainstances = list(self.hanaonazure_client.hana_instances.list()) + # self.assertEqual(len(hanainstances), 3) + # + #@ResourceGroupPreparer() + #def test_hanainstance_list_by_resource_group(self, resource_group): + # + # resource_group_name = resource_group.name + # + # hanainstances = list(self.hanaonazure_client.hana_instances.list_by_resource_group(resource_group_name)) + # self.assertEqual(len(hanainstances), 3) + + #@ResourceGroupPreparer() + #def test_hanainstance_get(self, resource_group): + + # resource_group_name = resource_group.name + # resource_name = "testhanainstanceresourcename" + + # hanainstance = self.hanaonazure_client.hana_instances.get(resource_group_name, resource_name) + # self.assertEqual(hanainstance.name, resource_name) #------------------------------------------------------------------------------ if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() From 0e82fae6b881d39497f8eb6890a33969dd9eec31 Mon Sep 17 00:00:00 2001 From: Xiang Yan Date: Tue, 19 May 2020 08:56:27 -0700 Subject: [PATCH 10/28] fix AttributeException (#11463) * fix AttributeException --- sdk/core/azure-core/CHANGELOG.md | 3 + .../azure/core/pipeline/transport/_aiohttp.py | 6 +- .../azure/core/pipeline/transport/_base.py | 6 +- .../core/pipeline/transport/_base_async.py | 2 +- .../pipeline/transport/_requests_basic.py | 2 +- .../test_stream_generator.py | 107 ++++++++++++++++++ .../azure-core/tests/test_stream_generator.py | 101 +++++++++++++++++ 7 files changed, 219 insertions(+), 8 deletions(-) create mode 100644 sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py create mode 100644 sdk/core/azure-core/tests/test_stream_generator.py diff --git a/sdk/core/azure-core/CHANGELOG.md b/sdk/core/azure-core/CHANGELOG.md index 508e9f35bad1..dbd114975faa 100644 --- a/sdk/core/azure-core/CHANGELOG.md +++ b/sdk/core/azure-core/CHANGELOG.md @@ -3,6 +3,9 @@ ## 1.5.1 (Unreleased) +### Bug fixes + +- Fix AttributeException in StreamDownloadGenerator #11462 ## 1.5.0 (2020-05-04) diff --git a/sdk/core/azure-core/azure/core/pipeline/transport/_aiohttp.py b/sdk/core/azure-core/azure/core/pipeline/transport/_aiohttp.py index 0f064e618f23..9b0089401513 100644 --- a/sdk/core/azure-core/azure/core/pipeline/transport/_aiohttp.py +++ b/sdk/core/azure-core/azure/core/pipeline/transport/_aiohttp.py @@ -234,12 +234,12 @@ async def __anext__(self): else: await asyncio.sleep(retry_interval) headers = {'range': 'bytes=' + str(self.downloaded) + '-'} - resp = self.pipeline.run(self.request, stream=True, headers=headers) - if resp.status_code == 416: + resp = await self.pipeline.run(self.request, stream=True, headers=headers) + if resp.http_response.status_code == 416: raise chunk = await self.response.internal_response.content.read(self.block_size) if not chunk: - raise StopIteration() + raise StopAsyncIteration() self.downloaded += len(chunk) return chunk continue diff --git a/sdk/core/azure-core/azure/core/pipeline/transport/_base.py b/sdk/core/azure-core/azure/core/pipeline/transport/_base.py index 33d924a4004d..d8f82b34f4da 100644 --- a/sdk/core/azure-core/azure/core/pipeline/transport/_base.py +++ b/sdk/core/azure-core/azure/core/pipeline/transport/_base.py @@ -175,13 +175,13 @@ class HttpTransport( @abc.abstractmethod def send(self, request, **kwargs): - # type: (PipelineRequest, Any) -> PipelineResponse + # type: (HTTPRequestType, Any) -> HTTPResponseType """Send the request using this HTTP sender. :param request: The pipeline request object - :type request: ~azure.core.pipeline.PipelineRequest + :type request: ~azure.core.transport.HTTPRequest :return: The pipeline response object. - :rtype: ~azure.core.pipeline.PipelineResponse + :rtype: ~azure.core.pipeline.transport.HttpResponse """ @abc.abstractmethod diff --git a/sdk/core/azure-core/azure/core/pipeline/transport/_base_async.py b/sdk/core/azure-core/azure/core/pipeline/transport/_base_async.py index d79999ed6297..bfc51ef6109b 100644 --- a/sdk/core/azure-core/azure/core/pipeline/transport/_base_async.py +++ b/sdk/core/azure-core/azure/core/pipeline/transport/_base_async.py @@ -28,7 +28,7 @@ import abc from collections.abc import AsyncIterator -from typing import AsyncIterator as AsyncIteratorType, TypeVar, Generic +from typing import AsyncIterator as AsyncIteratorType, TypeVar, Generic, Any from ._base import ( _HttpResponseBase, _HttpClientTransportResponse, diff --git a/sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py b/sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py index 537f902ece3b..ea0e7b0e4c9c 100644 --- a/sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py +++ b/sdk/core/azure-core/azure/core/pipeline/transport/_requests_basic.py @@ -137,7 +137,7 @@ def __next__(self): time.sleep(retry_interval) headers = {'range': 'bytes=' + str(self.downloaded) + '-'} resp = self.pipeline.run(self.request, stream=True, headers=headers) - if resp.status_code == 416: + if resp.http_response.status_code == 416: raise chunk = next(self.iter_content_func) if not chunk: diff --git a/sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py b/sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py new file mode 100644 index 000000000000..f7368159615f --- /dev/null +++ b/sdk/core/azure-core/tests/azure_core_asynctests/test_stream_generator.py @@ -0,0 +1,107 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +from azure.core.pipeline.transport import ( + HttpRequest, + AsyncHttpResponse, + AsyncHttpTransport, +) +from azure.core.pipeline import AsyncPipeline +from azure.core.pipeline.transport._aiohttp import AioHttpStreamDownloadGenerator +from unittest import mock +import pytest + +@pytest.mark.asyncio +async def test_connection_error_response(): + class MockTransport(AsyncHttpTransport): + def __init__(self): + self._count = 0 + + async def __aexit__(self, exc_type, exc_val, exc_tb): + pass + async def close(self): + pass + async def open(self): + pass + + async def send(self, request, **kwargs): + request = HttpRequest('GET', 'http://127.0.0.1/') + response = AsyncHttpResponse(request, None) + response.status_code = 200 + return response + + class MockContent(): + def __init__(self): + self._first = True + + async def read(self, block_size): + if self._first: + self._first = False + raise ConnectionError + return None + + class MockInternalResponse(): + def __init__(self): + self.headers = {} + self.content = MockContent() + + async def close(self): + pass + + class AsyncMock(mock.MagicMock): + async def __call__(self, *args, **kwargs): + return super(AsyncMock, self).__call__(*args, **kwargs) + + http_request = HttpRequest('GET', 'http://127.0.0.1/') + pipeline = AsyncPipeline(MockTransport()) + http_response = AsyncHttpResponse(http_request, None) + http_response.internal_response = MockInternalResponse() + stream = AioHttpStreamDownloadGenerator(pipeline, http_response) + with mock.patch('asyncio.sleep', new_callable=AsyncMock): + with pytest.raises(StopAsyncIteration): + await stream.__anext__() + +@pytest.mark.asyncio +async def test_connection_error_416(): + class MockTransport(AsyncHttpTransport): + def __init__(self): + self._count = 0 + + async def __aexit__(self, exc_type, exc_val, exc_tb): + pass + async def close(self): + pass + async def open(self): + pass + + async def send(self, request, **kwargs): + request = HttpRequest('GET', 'http://127.0.0.1/') + response = AsyncHttpResponse(request, None) + response.status_code = 416 + return response + + class MockContent(): + async def read(self, block_size): + raise ConnectionError + + class MockInternalResponse(): + def __init__(self): + self.headers = {} + self.content = MockContent() + + async def close(self): + pass + + class AsyncMock(mock.MagicMock): + async def __call__(self, *args, **kwargs): + return super(AsyncMock, self).__call__(*args, **kwargs) + + http_request = HttpRequest('GET', 'http://127.0.0.1/') + pipeline = AsyncPipeline(MockTransport()) + http_response = AsyncHttpResponse(http_request, None) + http_response.internal_response = MockInternalResponse() + stream = AioHttpStreamDownloadGenerator(pipeline, http_response) + with mock.patch('asyncio.sleep', new_callable=AsyncMock): + with pytest.raises(ConnectionError): + await stream.__anext__() diff --git a/sdk/core/azure-core/tests/test_stream_generator.py b/sdk/core/azure-core/tests/test_stream_generator.py new file mode 100644 index 000000000000..4f40bba885eb --- /dev/null +++ b/sdk/core/azure-core/tests/test_stream_generator.py @@ -0,0 +1,101 @@ +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ +import requests +from azure.core.pipeline.transport import ( + HttpRequest, + HttpResponse, + HttpTransport, +) +from azure.core.pipeline import Pipeline, PipelineResponse +from azure.core.pipeline.transport._requests_basic import StreamDownloadGenerator +try: + from unittest import mock +except ImportError: + import mock +import pytest + +def test_connection_error_response(): + class MockTransport(HttpTransport): + def __init__(self): + self._count = 0 + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + def close(self): + pass + def open(self): + pass + + def send(self, request, **kwargs): + request = HttpRequest('GET', 'http://127.0.0.1/') + response = HttpResponse(request, None) + response.status_code = 200 + return response + + def next(self): + self.__next__() + + def __next__(self): + if self._count == 0: + self._count += 1 + raise requests.exceptions.ConnectionError + + class MockInternalResponse(): + def iter_content(self, block_size): + return MockTransport() + + def close(self): + pass + + http_request = HttpRequest('GET', 'http://127.0.0.1/') + pipeline = Pipeline(MockTransport()) + http_response = HttpResponse(http_request, None) + http_response.internal_response = MockInternalResponse() + stream = StreamDownloadGenerator(pipeline, http_response) + with mock.patch('time.sleep', return_value=None): + with pytest.raises(StopIteration): + stream.__next__() + +def test_connection_error_416(): + class MockTransport(HttpTransport): + def __init__(self): + self._count = 0 + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + def close(self): + pass + def open(self): + pass + + def send(self, request, **kwargs): + request = HttpRequest('GET', 'http://127.0.0.1/') + response = HttpResponse(request, None) + response.status_code = 416 + return response + + def next(self): + self.__next__() + + def __next__(self): + if self._count == 0: + self._count += 1 + raise requests.exceptions.ConnectionError + + class MockInternalResponse(): + def iter_content(self, block_size): + return MockTransport() + + def close(self): + pass + + http_request = HttpRequest('GET', 'http://127.0.0.1/') + pipeline = Pipeline(MockTransport()) + http_response = HttpResponse(http_request, None) + http_response.internal_response = MockInternalResponse() + stream = StreamDownloadGenerator(pipeline, http_response) + with mock.patch('time.sleep', return_value=None): + with pytest.raises(requests.exceptions.ConnectionError): + stream.__next__() \ No newline at end of file From dfe5b0957c1af756e125ad625cfd9834b43cdc20 Mon Sep 17 00:00:00 2001 From: annatisch Date: Tue, 19 May 2020 10:44:57 -0700 Subject: [PATCH 11/28] [Core] Support multipart changesets (#10972) * Support multipart pipeline context * Support sending multipart changesets * Added receive tests * Fix pylint + mypy * Update to use recursive requests * CI fix * Update changeset response decoding * Make mypy happy --- .../azure-core/azure/core/pipeline/_base.py | 3 + .../azure/core/pipeline/_base_async.py | 3 + .../azure/core/pipeline/transport/_base.py | 79 +- .../test_basic_transport.py | 690 +++++++++++++++-- .../azure-core/tests/test_basic_transport.py | 708 +++++++++++++++++- 5 files changed, 1375 insertions(+), 108 deletions(-) diff --git a/sdk/core/azure-core/azure/core/pipeline/_base.py b/sdk/core/azure-core/azure/core/pipeline/_base.py index 5d5ca0404390..905111135eaf 100644 --- a/sdk/core/azure-core/azure/core/pipeline/_base.py +++ b/sdk/core/azure-core/azure/core/pipeline/_base.py @@ -166,6 +166,9 @@ def _prepare_multipart_mixed_request(request): import concurrent.futures def prepare_requests(req): + if req.multipart_mixed_info: + # Recursively update changeset "sub requests" + Pipeline._prepare_multipart_mixed_request(req) context = PipelineContext(None, **pipeline_options) pipeline_request = PipelineRequest(req, context) for policy in policies: diff --git a/sdk/core/azure-core/azure/core/pipeline/_base_async.py b/sdk/core/azure-core/azure/core/pipeline/_base_async.py index 95694a2f90f4..6d9fa19c83ef 100644 --- a/sdk/core/azure-core/azure/core/pipeline/_base_async.py +++ b/sdk/core/azure-core/azure/core/pipeline/_base_async.py @@ -174,6 +174,9 @@ async def _prepare_multipart_mixed_request(self, request: HTTPRequestType) -> No pipeline_options = multipart_mixed_info[3] # type: Dict[str, Any] async def prepare_requests(req): + if req.multipart_mixed_info: + # Recursively update changeset "sub requests" + await self._prepare_multipart_mixed_request(req) context = PipelineContext(None, **pipeline_options) pipeline_request = PipelineRequest(req, context) for policy in policies: diff --git a/sdk/core/azure-core/azure/core/pipeline/transport/_base.py b/sdk/core/azure-core/azure/core/pipeline/transport/_base.py index d8f82b34f4da..d6e14b4f6876 100644 --- a/sdk/core/azure-core/azure/core/pipeline/transport/_base.py +++ b/sdk/core/azure-core/azure/core/pipeline/transport/_base.py @@ -60,6 +60,7 @@ Optional, Tuple, Iterator, + Type ) from six.moves.http_client import HTTPConnection, HTTPResponse as _HTTPResponse @@ -379,27 +380,30 @@ def set_multipart_mixed(self, *requests, **kwargs): :keyword list[SansIOHTTPPolicy] policies: SansIOPolicy to apply at preparation time :keyword str boundary: Optional boundary - :param requests: HttpRequests object """ self.multipart_mixed_info = ( requests, kwargs.pop("policies", []), - kwargs.pop("boundary", []), + kwargs.pop("boundary", None), kwargs ) - def prepare_multipart_body(self): - # type: () -> None + def prepare_multipart_body(self, content_index=0): + # type: (int) -> int """Will prepare the body of this request according to the multipart information. This call assumes the on_request policies have been applied already in their correct context (sync/async) Does nothing if "set_multipart_mixed" was never called. + + :param int content_index: The current index of parts within the batch message. + :returns: The updated index after all parts in this request have been added. + :rtype: int """ if not self.multipart_mixed_info: - return + return 0 requests = self.multipart_mixed_info[0] # type: List[HttpRequest] boundary = self.multipart_mixed_info[2] # type: Optional[str] @@ -409,12 +413,22 @@ def prepare_multipart_body(self): main_message.add_header("Content-Type", "multipart/mixed") if boundary: main_message.set_boundary(boundary) - for i, req in enumerate(requests): + + for req in requests: part_message = Message() - part_message.add_header("Content-Type", "application/http") - part_message.add_header("Content-Transfer-Encoding", "binary") - part_message.add_header("Content-ID", str(i)) - part_message.set_payload(req.serialize()) + if req.multipart_mixed_info: + content_index = req.prepare_multipart_body(content_index=content_index) + part_message.add_header("Content-Type", req.headers['Content-Type']) + payload = req.serialize() + # We need to remove the ~HTTP/1.1 prefix along with the added content-length + payload = payload[payload.index(b'--'):] + else: + part_message.add_header("Content-Type", "application/http") + part_message.add_header("Content-Transfer-Encoding", "binary") + part_message.add_header("Content-ID", str(content_index)) + payload = req.serialize() + content_index += 1 + part_message.set_payload(payload) main_message.attach(part_message) try: @@ -435,6 +449,7 @@ def prepare_multipart_body(self): self.headers["Content-Type"] = ( "multipart/mixed; boundary=" + main_message.get_boundary() ) + return content_index def serialize(self): # type: () -> bytes @@ -485,6 +500,31 @@ def text(self, encoding=None): encoding = "utf-8-sig" return self.body().decode(encoding) + def _decode_parts(self, message, http_response_type, requests): + # type: (Message, Type[_HttpResponseBase], List[HttpRequest]) -> List[HttpResponse] + """Rebuild an HTTP response from pure string.""" + responses = [] + for index, raw_reponse in enumerate(message.get_payload()): + content_type = raw_reponse.get_content_type() + if content_type == "application/http": + responses.append( + _deserialize_response( + raw_reponse.get_payload(decode=True), + requests[index], + http_response_type=http_response_type, + ) + ) + elif content_type == "multipart/mixed" and requests[index].multipart_mixed_info: + # The message batch contains one or more change sets + changeset_requests = requests[index].multipart_mixed_info[0] # type: ignore + changeset_responses = self._decode_parts(raw_reponse, http_response_type, changeset_requests) + responses.extend(changeset_responses) + else: + raise ValueError( + "Multipart doesn't support part other than application/http for now" + ) + return responses + def _get_raw_parts(self, http_response_type=None): # type (Optional[Type[_HttpResponseBase]]) -> Iterator[HttpResponse] """Assuming this body is multipart, return the iterator or parts. @@ -503,26 +543,9 @@ def _get_raw_parts(self, http_response_type=None): + b"\r\n\r\n" + body_as_bytes ) - message = message_parser(http_body) # type: Message - - # Rebuild an HTTP response from pure string requests = self.request.multipart_mixed_info[0] # type: List[HttpRequest] - responses = [] - for request, raw_reponse in zip(requests, message.get_payload()): - if raw_reponse.get_content_type() == "application/http": - responses.append( - _deserialize_response( - raw_reponse.get_payload(decode=True), - request, - http_response_type=http_response_type, - ) - ) - else: - raise ValueError( - "Multipart doesn't support part other than application/http for now" - ) - return responses + return self._decode_parts(message, http_response_type, requests) class HttpResponse(_HttpResponseBase): # pylint: disable=abstract-method diff --git a/sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py b/sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py index 58f6761b4c5a..22810b8afffd 100644 --- a/sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py +++ b/sdk/core/azure-core/tests/azure_core_asynctests/test_basic_transport.py @@ -18,6 +18,28 @@ import pytest +# transport = mock.MagicMock(spec=AsyncHttpTransport) +# MagicMock support async cxt manager only after 3.8 +# https://github.com/python/cpython/pull/9296 + +class MockAsyncHttpTransport(AsyncHttpTransport): + async def __aenter__(self): return self + async def __aexit__(self, *args): pass + async def open(self): pass + async def close(self): pass + async def send(self, request, **kwargs): pass + + +class MockResponse(AsyncHttpResponse): + def __init__(self, request, body, content_type): + super(MockResponse, self).__init__(request, None) + self._body = body + self.content_type = content_type + + def body(self): + return self._body + + @pytest.mark.asyncio async def test_basic_options_aiohttp(): @@ -31,18 +53,6 @@ async def test_basic_options_aiohttp(): @pytest.mark.asyncio async def test_multipart_send(): - - # transport = mock.MagicMock(spec=AsyncHttpTransport) - # MagicMock support async cxt manager only after 3.8 - # https://github.com/python/cpython/pull/9296 - - class MockAsyncHttpTransport(AsyncHttpTransport): - async def __aenter__(self): return self - async def __aexit__(self, *args): pass - async def open(self): pass - async def close(self): pass - async def send(self, request, **kwargs): pass - transport = MockAsyncHttpTransport() class RequestPolicy(object): @@ -90,17 +100,6 @@ async def on_request(self, request): @pytest.mark.asyncio async def test_multipart_send_with_context(): - # transport = mock.MagicMock(spec=AsyncHttpTransport) - # MagicMock support async cxt manager only after 3.8 - # https://github.com/python/cpython/pull/9296 - - class MockAsyncHttpTransport(AsyncHttpTransport): - async def __aenter__(self): return self - async def __aexit__(self, *args): pass - async def open(self): pass - async def close(self): pass - async def send(self, request, **kwargs): pass - transport = MockAsyncHttpTransport() header_policy = HeadersPolicy() @@ -150,16 +149,288 @@ async def on_request(self, request): @pytest.mark.asyncio -async def test_multipart_receive(): +async def test_multipart_send_with_one_changeset(): + transport = MockAsyncHttpTransport() + requests = [ + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ] + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + *requests, + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + changeset, + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + async with AsyncPipeline(transport) as pipeline: + await pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + + +@pytest.mark.asyncio +async def test_multipart_send_with_multiple_changesets(): + transport = MockAsyncHttpTransport() + changeset1 = HttpRequest(None, None) + changeset1.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1"), + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + changeset2 = HttpRequest(None, None) + changeset2.set_multipart_mixed( + HttpRequest("DELETE", "/container2/blob2"), + HttpRequest("DELETE", "/container3/blob3"), + boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314" + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + changeset1, + changeset2, + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525", + ) + + async with AsyncPipeline(transport) as pipeline: + await pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 3\r\n' + b'\r\n' + b'DELETE /container3/blob3 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + + +@pytest.mark.asyncio +async def test_multipart_send_with_combination_changeset_first(): + transport = MockAsyncHttpTransport() + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1"), + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + changeset, + HttpRequest("DELETE", "/container2/blob2"), + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + async with AsyncPipeline(transport) as pipeline: + await pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + + +@pytest.mark.asyncio +async def test_multipart_send_with_combination_changeset_last(): + transport = MockAsyncHttpTransport() + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container1/blob1"), + HttpRequest("DELETE", "/container2/blob2"), + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + changeset, + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + async with AsyncPipeline(transport) as pipeline: + await pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + + +@pytest.mark.asyncio +async def test_multipart_send_with_combination_changeset_middle(): + transport = MockAsyncHttpTransport() + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container1/blob1"), + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + changeset, + HttpRequest("DELETE", "/container2/blob2"), + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + async with AsyncPipeline(transport) as pipeline: + await pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) - class MockResponse(AsyncHttpResponse): - def __init__(self, request, body, content_type): - super(MockResponse, self).__init__(request, None) - self._body = body - self.content_type = content_type - def body(self): - return self._body +@pytest.mark.asyncio +async def test_multipart_receive(): class ResponsePolicy(object): def on_response(self, request, response): @@ -232,22 +503,349 @@ async def on_response(self, request, response): @pytest.mark.asyncio -async def test_multipart_receive_with_bom(): +async def test_multipart_receive_with_one_changeset(): + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ) - req0 = HttpRequest("DELETE", "/container0/blob0") + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(changeset) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 202 Accepted\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 202 Accepted\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + async for part in response.parts(): + parts.append(part) + assert len(parts) == 2 + + res0 = parts[0] + assert res0.status_code == 202 + + +@pytest.mark.asyncio +async def test_multipart_receive_with_multiple_changesets(): + + changeset1 = HttpRequest(None, None) + changeset1.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ) + changeset2 = HttpRequest(None, None) + changeset2.set_multipart_mixed( + HttpRequest("DELETE", "/container2/blob2"), + HttpRequest("DELETE", "/container3/blob3") + ) request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") - request.set_multipart_mixed(req0) + request.set_multipart_mixed(changeset1, changeset2) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 3\r\n' + b'\r\n' + b'HTTP/1.1 409\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) - class MockResponse(AsyncHttpResponse): - def __init__(self, request, body, content_type): - super(MockResponse, self).__init__(request, None) - self._body = body - self.content_type = content_type + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) - def body(self): - return self._body + parts = [] + async for part in response.parts(): + parts.append(part) + assert len(parts) == 4 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + assert parts[3].status_code == 409 + + +@pytest.mark.asyncio +async def test_multipart_receive_with_combination_changeset_first(): + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(changeset, HttpRequest("DELETE", "/container2/blob2")) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + async for part in response.parts(): + parts.append(part) + assert len(parts) == 3 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + + +@pytest.mark.asyncio +async def test_multipart_receive_with_combination_changeset_middle(): + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed(HttpRequest("DELETE", "/container1/blob1")) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + changeset, + HttpRequest("DELETE", "/container2/blob2") + ) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + async for part in response.parts(): + parts.append(part) + assert len(parts) == 3 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + + +@pytest.mark.asyncio +async def test_multipart_receive_with_combination_changeset_last(): + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container1/blob1"), + HttpRequest("DELETE", "/container2/blob2") + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(HttpRequest("DELETE", "/container0/blob0"), changeset) + + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + async for part in response.parts(): + parts.append(part) + assert len(parts) == 3 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + + +@pytest.mark.asyncio +async def test_multipart_receive_with_bom(): + + req0 = HttpRequest("DELETE", "/container0/blob0") + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(req0) body_as_bytes = ( b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\n" b"Content-Type: application/http\n" @@ -288,16 +886,6 @@ async def test_recursive_multipart_receive(): request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") request.set_multipart_mixed(req0) - - class MockResponse(AsyncHttpResponse): - def __init__(self, request, body, content_type): - super(MockResponse, self).__init__(request, None) - self._body = body - self.content_type = content_type - - def body(self): - return self._body - internal_body_as_str = ( "--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n" "Content-Type: application/http\r\n" diff --git a/sdk/core/azure-core/tests/test_basic_transport.py b/sdk/core/azure-core/tests/test_basic_transport.py index 25909791f9fa..d3d13eac2089 100644 --- a/sdk/core/azure-core/tests/test_basic_transport.py +++ b/sdk/core/azure-core/tests/test_basic_transport.py @@ -21,6 +21,16 @@ import pytest +class MockResponse(HttpResponse): + def __init__(self, request, body, content_type): + super(MockResponse, self).__init__(request, None) + self._body = body + self.content_type = content_type + + def body(self): + return self._body + + @pytest.mark.skipif(sys.version_info < (3, 6), reason="Multipart serialization not supported on 2.7 + dict order not deterministic on 3.5") def test_http_request_serialization(): # Method + Url @@ -212,7 +222,6 @@ def test_multipart_send(): @pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7") def test_multipart_send_with_context(): - transport = mock.MagicMock(spec=HttpTransport) header_policy = HeadersPolicy({ @@ -259,16 +268,341 @@ def test_multipart_send_with_context(): ) -def test_multipart_receive(): +@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7") +def test_multipart_send_with_one_changeset(): + + transport = mock.MagicMock(spec=HttpTransport) + + header_policy = HeadersPolicy({ + 'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT' + }) + + requests = [ + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ] + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + *requests, + policies=[header_policy], + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + changeset, + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525", + ) + + with Pipeline(transport) as pipeline: + pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + + +@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7") +def test_multipart_send_with_multiple_changesets(): + + transport = mock.MagicMock(spec=HttpTransport) + + header_policy = HeadersPolicy({ + 'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT' + }) + + changeset1 = HttpRequest(None, None) + changeset1.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1"), + policies=[header_policy], + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + changeset2 = HttpRequest(None, None) + changeset2.set_multipart_mixed( + HttpRequest("DELETE", "/container2/blob2"), + HttpRequest("DELETE", "/container3/blob3"), + policies=[header_policy], + boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314" + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + changeset1, + changeset2, + policies=[header_policy], + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525", + ) + + with Pipeline(transport) as pipeline: + pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 3\r\n' + b'\r\n' + b'DELETE /container3/blob3 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + + +@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7") +def test_multipart_send_with_combination_changeset_first(): + + transport = mock.MagicMock(spec=HttpTransport) + + header_policy = HeadersPolicy({ + 'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT' + }) + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1"), + policies=[header_policy], + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + changeset, + HttpRequest("DELETE", "/container2/blob2"), + policies=[header_policy], + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + with Pipeline(transport) as pipeline: + pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + +@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7") +def test_multipart_send_with_combination_changeset_last(): + + transport = mock.MagicMock(spec=HttpTransport) + + header_policy = HeadersPolicy({ + 'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT' + }) + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container1/blob1"), + HttpRequest("DELETE", "/container2/blob2"), + policies=[header_policy], + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + changeset, + policies=[header_policy], + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + with Pipeline(transport) as pipeline: + pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) + +@pytest.mark.skipif(sys.version_info < (3, 0), reason="Multipart serialization not supported on 2.7") +def test_multipart_send_with_combination_changeset_middle(): + + transport = mock.MagicMock(spec=HttpTransport) + + header_policy = HeadersPolicy({ + 'x-ms-date': 'Thu, 14 Jun 2018 16:46:54 GMT' + }) + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container1/blob1"), + policies=[header_policy], + boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + changeset, + HttpRequest("DELETE", "/container2/blob2"), + policies=[header_policy], + boundary="batch_357de4f7-6d0b-4e02-8cd2-6361411a9525" + ) + + with Pipeline(transport) as pipeline: + pipeline.run(request) + + assert request.body == ( + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'DELETE /container0/blob0 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: multipart/mixed; boundary=changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'DELETE /container1/blob1 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'DELETE /container2/blob2 HTTP/1.1\r\n' + b'x-ms-date: Thu, 14 Jun 2018 16:46:54 GMT\r\n' + b'\r\n' + b'\r\n' + b'--batch_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + ) - class MockResponse(HttpResponse): - def __init__(self, request, body, content_type): - super(MockResponse, self).__init__(request, None) - self._body = body - self.content_type = content_type - def body(self): - return self._body +def test_multipart_receive(): class ResponsePolicy(object): def on_response(self, request, response): @@ -330,22 +664,347 @@ def on_response(self, request, response): assert res1.status_code == 404 assert res1.headers['x-ms-fun'] == 'true' -def test_multipart_receive_with_bom(): - req0 = HttpRequest("DELETE", "/container0/blob0") +def test_multipart_receive_with_one_changeset(): + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ) request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") - request.set_multipart_mixed(req0) + request.set_multipart_mixed(changeset) + + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 202 Accepted\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 202 Accepted\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + for part in response.parts(): + parts.append(part) + assert len(parts) == 2 + + res0 = parts[0] + assert res0.status_code == 202 + + +def test_multipart_receive_with_multiple_changesets(): + + changeset1 = HttpRequest(None, None) + changeset1.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ) + changeset2 = HttpRequest(None, None) + changeset2.set_multipart_mixed( + HttpRequest("DELETE", "/container2/blob2"), + HttpRequest("DELETE", "/container3/blob3") + ) - class MockResponse(HttpResponse): - def __init__(self, request, body, content_type): - super(MockResponse, self).__init__(request, None) - self._body = body - self.content_type = content_type + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(changeset1, changeset2) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314"\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 3\r\n' + b'\r\n' + b'HTTP/1.1 409\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_8b9e487e-a353-4dcb-a6f4-0688191e0314--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + for part in response.parts(): + parts.append(part) + assert len(parts) == 4 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + assert parts[3].status_code == 409 - def body(self): - return self._body +def test_multipart_receive_with_combination_changeset_first(): + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + HttpRequest("DELETE", "/container1/blob1") + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(changeset, HttpRequest("DELETE", "/container2/blob2")) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + for part in response.parts(): + parts.append(part) + assert len(parts) == 3 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + + +def test_multipart_receive_with_combination_changeset_middle(): + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed(HttpRequest("DELETE", "/container1/blob1")) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed( + HttpRequest("DELETE", "/container0/blob0"), + changeset, + HttpRequest("DELETE", "/container2/blob2") + ) + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + for part in response.parts(): + parts.append(part) + assert len(parts) == 3 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + + +def test_multipart_receive_with_combination_changeset_last(): + + changeset = HttpRequest(None, None) + changeset.set_multipart_mixed( + HttpRequest("DELETE", "/container1/blob1"), + HttpRequest("DELETE", "/container2/blob2") + ) + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(HttpRequest("DELETE", "/container0/blob0"), changeset) + + body_as_bytes = ( + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 2\r\n' + b'\r\n' + b'HTTP/1.1 200\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n' + b'Content-Type: multipart/mixed; boundary="changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525"\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 0\r\n' + b'\r\n' + b'HTTP/1.1 202\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525\r\n' + b'Content-Type: application/http\r\n' + b'Content-Transfer-Encoding: binary\r\n' + b'Content-ID: 1\r\n' + b'\r\n' + b'HTTP/1.1 404\r\n' + b'x-ms-request-id: 778fdc83-801e-0000-62ff-0334671e284f\r\n' + b'x-ms-version: 2018-11-09\r\n' + b'\r\n' + b'\r\n' + b'--changeset_357de4f7-6d0b-4e02-8cd2-6361411a9525--\r\n' + b'\r\n' + b'--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed--\r\n' + ) + + response = MockResponse( + request, + body_as_bytes, + "multipart/mixed; boundary=batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed" + ) + + parts = [] + for part in response.parts(): + parts.append(part) + assert len(parts) == 3 + assert parts[0].status_code == 200 + assert parts[1].status_code == 202 + assert parts[2].status_code == 404 + + +def test_multipart_receive_with_bom(): + + req0 = HttpRequest("DELETE", "/container0/blob0") + + request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") + request.set_multipart_mixed(req0) body_as_bytes = ( b"--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\n" b"Content-Type: application/http\n" @@ -383,16 +1042,6 @@ def test_recursive_multipart_receive(): request = HttpRequest("POST", "http://account.blob.core.windows.net/?comp=batch") request.set_multipart_mixed(req0) - - class MockResponse(HttpResponse): - def __init__(self, request, body, content_type): - super(MockResponse, self).__init__(request, None) - self._body = body - self.content_type = content_type - - def body(self): - return self._body - internal_body_as_str = ( "--batchresponse_66925647-d0cb-4109-b6d3-28efe3e1e5ed\r\n" "Content-Type: application/http\r\n" @@ -435,6 +1084,7 @@ def body(self): internal_response0 = internal_response[0] assert internal_response0.status_code == 400 + def test_close_unopened_transport(): transport = RequestsTransport() transport.close() From 016e7c66427918b7f19fc897a041d9abe66d0bb9 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Tue, 19 May 2020 12:23:24 -0700 Subject: [PATCH 12/28] Update CODEOWNERS (#11516) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 6d7d66d65bae..f14d45781737 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -10,7 +10,7 @@ # Core /sdk/core/ @lmazuel @xiangyan99 @johanste -/sdk/core/azure-core/ @xiangyan99 @bryevdv @lmazuel +/sdk/core/azure-core/ @xiangyan99 @lmazuel # Service team /sdk/identity/ @chlowell @schaabs From cb52715da120c495a9fbd640fd91931748f9bacf Mon Sep 17 00:00:00 2001 From: annatisch Date: Tue, 19 May 2020 13:35:58 -0700 Subject: [PATCH 13/28] [cosmos] readme review feedback (#11527) * readme review feedback * Removed extra section --- sdk/cosmos/azure-cosmos/CHANGELOG.md | 2 +- sdk/cosmos/azure-cosmos/README.md | 97 +++++++++++++--------------- 2 files changed, 47 insertions(+), 52 deletions(-) diff --git a/sdk/cosmos/azure-cosmos/CHANGELOG.md b/sdk/cosmos/azure-cosmos/CHANGELOG.md index b64254da1684..ed50a4607451 100644 --- a/sdk/cosmos/azure-cosmos/CHANGELOG.md +++ b/sdk/cosmos/azure-cosmos/CHANGELOG.md @@ -1,6 +1,6 @@ # Release History -## 4.0.0 (2020-05-18) +## 4.0.0 (2020-05-20) - Stable release. - Added HttpLoggingPolicy to pipeline to enable passing in a custom logger for request and response headers. diff --git a/sdk/cosmos/azure-cosmos/README.md b/sdk/cosmos/azure-cosmos/README.md index 064191219d8b..71a7889bc7fd 100644 --- a/sdk/cosmos/azure-cosmos/README.md +++ b/sdk/cosmos/azure-cosmos/README.md @@ -13,7 +13,7 @@ Use the Azure Cosmos DB SQL API SDK for Python to manage databases and the JSON ## Getting started - +### Prerequisites * Azure subscription - [Create a free account][azure_sub] * Azure [Cosmos DB account][cosmos_account] - SQL API * [Python 2.7 or 3.5.3+][python] @@ -25,13 +25,13 @@ If you need a Cosmos DB SQL API account, you can create one with this [Azure CLI az cosmosdb create --resource-group --name ``` -## Installation +### Install the package ```bash pip install azure-cosmos ``` -### Configure a virtual environment (optional) +#### Configure a virtual environment (optional) Although not required, you can keep your your base system and Azure SDK environments isolated from one another if you use a virtual environment. Execute the following commands to configure and then enter a virtual environment with [venv][venv]: @@ -39,13 +39,10 @@ Although not required, you can keep your your base system and Azure SDK environm python3 -m venv azure-cosmosdb-sdk-environment source azure-cosmosdb-sdk-environment/bin/activate ``` - -## Key concepts +### Authenticate the client Interaction with Cosmos DB starts with an instance of the [CosmosClient][ref_cosmosclient] class. You need an **account**, its **URI**, and one of its **account keys** to instantiate the client object. -### Get credentials - Use the Azure CLI snippet below to populate two environment variables with the database account URI and its primary master key (you can also find these values in the Azure portal). The snippet is formatted for the Bash shell. ```Bash @@ -55,8 +52,7 @@ ACCT_NAME= export ACCOUNT_URI=$(az cosmosdb show --resource-group $RES_GROUP --name $ACCT_NAME --query documentEndpoint --output tsv) export ACCOUNT_KEY=$(az cosmosdb list-keys --resource-group $RES_GROUP --name $ACCT_NAME --query primaryMasterKey --output tsv) ``` - -### Create client +### Create the client Once you've populated the `ACCOUNT_URI` and `ACCOUNT_KEY` environment variables, you can create the [CosmosClient][ref_cosmosclient]. @@ -69,7 +65,7 @@ key = os.environ['ACCOUNT_KEY'] client = CosmosClient(url, credential=key) ``` -## Usage +## Key concepts Once you've initialized a [CosmosClient][ref_cosmosclient], you can interact with the primary resource types in Cosmos DB: @@ -155,8 +151,9 @@ for i in range(1, 10): To delete items from a container, use [ContainerProxy.delete_item][ref_container_delete_item]. The SQL API in Cosmos DB does not support the SQL `DELETE` statement. ```Python -for item in container.query_items(query='SELECT * FROM products p WHERE p.productModel = "DISCONTINUED"', - enable_cross_partition_query=True): +for item in container.query_items( + query='SELECT * FROM products p WHERE p.productModel = "DISCONTINUED"', + enable_cross_partition_query=True): container.delete_item(item, partition_key='Pager') ``` @@ -175,8 +172,8 @@ container = database.get_container_client(container_name) # Enumerate the returned items import json for item in container.query_items( - query='SELECT * FROM mycontainer r WHERE r.id="item3"', - enable_cross_partition_query=True): + query='SELECT * FROM mycontainer r WHERE r.id="item3"', + enable_cross_partition_query=True): print(json.dumps(item, indent=True)) ``` @@ -215,10 +212,11 @@ Certain properties of an existing container can be modified. This example sets t ```Python database = client.get_database_client(database_name) container = database.get_container_client(container_name) -database.replace_container(container, - partition_key=PartitionKey(path="/productName"), - default_ttl=10, - ) +database.replace_container( + container, + partition_key=PartitionKey(path="/productName"), + default_ttl=10, +) # Display the new TTL setting for the container container_props = container.read() print(json.dumps(container_props['defaultTtl'])) @@ -226,39 +224,6 @@ print(json.dumps(container_props['defaultTtl'])) For more information on TTL, see [Time to Live for Azure Cosmos DB data][cosmos_ttl]. -## Optional Configuration - -Optional keyword arguments that can be passed in at the client and per-operation level. - -### Retry Policy configuration - -Use the following keyword arguments when instantiating a client to configure the retry policy: - -* __retry_total__ (int): Total number of retries to allow. Takes precedence over other counts. -Pass in `retry_total=0` if you do not want to retry on requests. Defaults to 10. -* __retry_connect__ (int): How many connection-related errors to retry on. Defaults to 3. -* __retry_read__ (int): How many times to retry on read errors. Defaults to 3. -* __retry_status__ (int): How many times to retry on bad status codes. Defaults to 3. - -### Other client / per-operation configuration - -Other optional configuration keyword arguments that can be specified on the client or per-operation. - -**Client keyword arguments:** - -* __enable_endpoint_discovery__ (bool): Enable endpoint discovery for geo-replicated database accounts. Default is `True`. -* __preferred_locations__ (list[str]): The preferred locations for geo-replicated database accounts. -* __connection_timeout__ (int): Optionally sets the connect and read timeout value, in seconds. -* __transport__ (Any): User-provided transport to send the HTTP request. - -**Per-operation keyword arguments:** - -* __raw_response_hook__ (callable): The given callback uses the response returned from the service. -* __user_agent__ (str): Appends the custom value to the user-agent header to be sent with the request. -* __logging_enable__ (bool): Enables logging at the DEBUG level. Defaults to False. Can also be passed in at -the client level to enable it for all requests. -* __headers__ (dict): Pass in custom headers as key, value pairs. E.g. `headers={'CustomValue': value}` -* __timeout__ (int): An absolute timeout in seconds, for the combined HTTP request and response processing. ## Troubleshooting @@ -278,6 +243,36 @@ except exceptions.CosmosResourceExistsError: HTTP status code 409: The ID (name) provided for the container is already in use. The container name must be unique within the database.""") +``` +### Logging +This library uses the standard +[logging](https://docs.python.org/3.5/library/logging.html) library for logging. +Basic information about HTTP sessions (URLs, headers, etc.) is logged at INFO +level. + +Detailed DEBUG level logging, including request/response bodies and unredacted +headers, can be enabled on a client with the `logging_enable` argument: +```python +import sys +import logging +from azure.cosmos import CosmosClient + +# Create a logger for the 'azure' SDK +logger = logging.getLogger('azure') +logger.setLevel(logging.DEBUG) + +# Configure a console output +handler = logging.StreamHandler(stream=sys.stdout) +logger.addHandler(handler) + +# This client will log detailed information about its HTTP sessions, at DEBUG level +client = CosmosClient(url, credential=key, logging_enable=True) +``` + +Similarly, `logging_enable` can enable detailed logging for a single operation, +even when it isn't enabled for the client: +```py +database = client.create_database(database_name, logging_enable=True) ``` ## Next steps From af6b719e7b6403ae9d51089c0e066321fc3dc988 Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Tue, 19 May 2020 14:16:50 -0700 Subject: [PATCH 14/28] rename input parameters (#11518) --- .../azure-ai-formrecognizer/CHANGELOG.md | 4 ++ .../azure-ai-formrecognizer/README.md | 2 +- .../formrecognizer/_form_recognizer_client.py | 48 +++++++++---------- .../aio/_form_recognizer_client_async.py | 48 +++++++++---------- ...s_trained_with_and_without_labels_async.py | 8 ++-- .../sample_get_bounding_boxes_async.py | 6 +-- .../sample_recognize_content_async.py | 6 +-- .../sample_recognize_custom_forms_async.py | 5 +- .../sample_recognize_receipts_async.py | 5 +- ...ample_recognize_receipts_from_url_async.py | 2 +- ..._models_trained_with_and_without_labels.py | 4 +- .../samples/sample_get_bounding_boxes.py | 2 +- .../samples/sample_recognize_content.py | 2 +- .../samples/sample_recognize_custom_forms.py | 2 +- .../samples/sample_recognize_receipts.py | 2 +- .../sample_recognize_receipts_from_url.py | 2 +- .../tests/test_custom_forms.py | 8 ++-- .../tests/test_custom_forms_async.py | 8 ++-- .../tests/test_custom_forms_from_url.py | 10 ++-- .../tests/test_custom_forms_from_url_async.py | 10 ++-- .../tests/test_receipt.py | 6 +-- .../tests/test_receipt_async.py | 6 +-- .../tests/test_receipt_from_url.py | 4 +- 23 files changed, 100 insertions(+), 100 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md index 584b5c4032b4..c0bb755487c9 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md @@ -14,6 +14,10 @@ - `FormField` does not have a page_number. - `begin_recognize_receipts` APIs now return `RecognizedReceipt` instead of `USReceipt` - `USReceiptType` is renamed to `ReceiptType` +- `stream` and `url` parameters found on methods for `FormRecognizerClient` have been renamed to `form` and `form_url`, respectively. +For recognize receipt methods, parameters have been renamed to `receipt` and `receipt_url`. + + **New features** diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/README.md index 9e5545fe0e1f..d166e8ee201a 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/README.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/README.md @@ -172,7 +172,7 @@ model_id = "" with open("", "rb") as fd: form = fd.read() -poller = form_recognizer_client.begin_recognize_custom_forms(model_id=model_id, stream=form) +poller = form_recognizer_client.begin_recognize_custom_forms(model_id=model_id, form=form) result = poller.result() for recognized_form in result: diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py index 38f5aaf25fd0..ea7a0f23d819 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_recognizer_client.py @@ -77,15 +77,15 @@ def _receipt_callback(self, raw_response, _, headers): # pylint: disable=unused return prepare_us_receipt(analyze_result) @distributed_trace - def begin_recognize_receipts(self, stream, **kwargs): + def begin_recognize_receipts(self, receipt, **kwargs): # type: (Union[bytes, IO[bytes]], Any) -> LROPoller """Extract field text and semantic values from a given US sales receipt. The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. + :param receipt: JPEG, PNG, PDF and TIFF type file stream or bytes. Currently only supports US sales receipts. - :type stream: bytes or IO[bytes] + :type receipt: bytes or IO[bytes] :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword str content_type: Media type of the body sent to the API. Content-type is @@ -116,10 +116,10 @@ def begin_recognize_receipts(self, stream, **kwargs): include_text_content = kwargs.pop("include_text_content", False) if content_type is None: - content_type = get_content_type(stream) + content_type = get_content_type(receipt) return self._client.begin_analyze_receipt_async( - file_stream=stream, + file_stream=receipt, content_type=content_type, include_text_details=include_text_content, cls=kwargs.pop("cls", self._receipt_callback), @@ -129,12 +129,12 @@ def begin_recognize_receipts(self, stream, **kwargs): ) @distributed_trace - def begin_recognize_receipts_from_url(self, url, **kwargs): + def begin_recognize_receipts_from_url(self, receipt_url, **kwargs): # type: (str, Any) -> LROPoller """Extract field text and semantic values from a given US sales receipt. The input document must be the location (Url) of the receipt to be analyzed. - :param str url: The url of the receipt to analyze. The input must be a valid, encoded url + :param str receipt_url: The url of the receipt to analyze. The input must be a valid, encoded url of one of the supported formats: JPEG, PNG, PDF and TIFF. Currently only supports US sales receipts. :keyword bool include_text_content: @@ -160,7 +160,7 @@ def begin_recognize_receipts_from_url(self, url, **kwargs): include_text_content = kwargs.pop("include_text_content", False) return self._client.begin_analyze_receipt_async( - file_stream={"source": url}, + file_stream={"source": receipt_url}, include_text_details=include_text_content, cls=kwargs.pop("cls", self._receipt_callback), polling=LROBasePolling(timeout=polling_interval, **kwargs), @@ -173,14 +173,14 @@ def _content_callback(self, raw_response, _, headers): # pylint: disable=unused return prepare_content_result(analyze_result) @distributed_trace - def begin_recognize_content(self, stream, **kwargs): + def begin_recognize_content(self, form, **kwargs): # type: (Union[bytes, IO[bytes]], Any) -> LROPoller """Extract text and content/layout information from a given document. The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. - :type stream: bytes or IO[bytes] + :param form: JPEG, PNG, PDF and TIFF type file stream or bytes. + :type form: bytes or IO[bytes] :keyword str content_type: Media type of the body sent to the API. Content-type is auto-detected, but can be overridden by passing this keyword argument. For options, see :class:`~azure.ai.formrecognizer.FormContentType`. @@ -207,10 +207,10 @@ def begin_recognize_content(self, stream, **kwargs): raise TypeError("Call begin_recognize_content_from_url() to analyze a document from a url.") if content_type is None: - content_type = get_content_type(stream) + content_type = get_content_type(form) return self._client.begin_analyze_layout_async( - file_stream=stream, + file_stream=form, content_type=content_type, cls=kwargs.pop("cls", self._content_callback), polling=LROBasePolling(timeout=polling_interval, **kwargs), @@ -219,12 +219,12 @@ def begin_recognize_content(self, stream, **kwargs): ) @distributed_trace - def begin_recognize_content_from_url(self, url, **kwargs): + def begin_recognize_content_from_url(self, form_url, **kwargs): # type: (str, Any) -> LROPoller """Extract text and layout information from a given document. The input document must be the location (Url) of the document to be analyzed. - :param str url: The url of the form to analyze. The input must be a valid, encoded url + :param str form_url: The url of the form to analyze. The input must be a valid, encoded url of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. @@ -237,7 +237,7 @@ def begin_recognize_content_from_url(self, url, **kwargs): polling_interval = kwargs.pop("polling_interval", POLLING_INTERVAL) return self._client.begin_analyze_layout_async( - file_stream={"source": url}, + file_stream={"source": form_url}, cls=kwargs.pop("cls", self._content_callback), polling=LROBasePolling(timeout=polling_interval, **kwargs), error_map=error_map, @@ -245,7 +245,7 @@ def begin_recognize_content_from_url(self, url, **kwargs): ) @distributed_trace - def begin_recognize_custom_forms(self, model_id, stream, **kwargs): + def begin_recognize_custom_forms(self, model_id, form, **kwargs): # type: (str, Union[bytes, IO[bytes]], Any) -> LROPoller """Analyze a custom form with a model trained with or without labels. The form to analyze should be of the same type as the forms that were used to train the model. @@ -253,8 +253,8 @@ def begin_recognize_custom_forms(self, model_id, stream, **kwargs): 'image/jpeg', 'image/png' or 'image/tiff'. :param str model_id: Custom model identifier. - :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. - :type stream: bytes or IO[bytes] + :param form: JPEG, PNG, PDF and TIFF type file stream or bytes. + :type form: bytes or IO[bytes] :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword str content_type: Media type of the body sent to the API. Content-type is @@ -285,7 +285,7 @@ def begin_recognize_custom_forms(self, model_id, stream, **kwargs): include_text_content = kwargs.pop("include_text_content", False) if content_type is None: - content_type = get_content_type(stream) + content_type = get_content_type(form) def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argument analyze_result = self._client._deserialize(AnalyzeOperationResult, raw_response) @@ -293,7 +293,7 @@ def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argume deserialization_callback = cls if cls else analyze_callback return self._client.begin_analyze_with_custom_model( - file_stream=stream, + file_stream=form, model_id=model_id, include_text_details=include_text_content, content_type=content_type, @@ -304,14 +304,14 @@ def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argume ) @distributed_trace - def begin_recognize_custom_forms_from_url(self, model_id, url, **kwargs): + def begin_recognize_custom_forms_from_url(self, model_id, form_url, **kwargs): # type: (str, str, Any) -> LROPoller """Analyze a custom form with a model trained with or without labels. The form to analyze should be of the same type as the forms that were used to train the model. The input document must be the location (Url) of the document to be analyzed. :param str model_id: Custom model identifier. - :param str url: The url of the form to analyze. The input must be a valid, encoded url + :param str form_url: The url of the form to analyze. The input must be a valid, encoded url of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. @@ -333,7 +333,7 @@ def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argume deserialization_callback = cls if cls else analyze_callback return self._client.begin_analyze_with_custom_model( - file_stream={"source": url}, + file_stream={"source": form_url}, model_id=model_id, include_text_details=include_text_content, cls=deserialization_callback, diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py index 215ce186a275..5a24374a718b 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_recognizer_client_async.py @@ -89,16 +89,16 @@ def _receipt_callback(self, raw_response, _, headers): # pylint: disable=unused @distributed_trace_async async def recognize_receipts( self, - stream: Union[bytes, IO[bytes]], + receipt: Union[bytes, IO[bytes]], **kwargs: Any ) -> List["RecognizedReceipt"]: """Extract field text and semantic values from a given US sales receipt. The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. + :param receipt: JPEG, PNG, PDF and TIFF type file stream or bytes. Currently only supports US sales receipts. - :type stream: bytes or IO[bytes] + :type receipt: bytes or IO[bytes] :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword str content_type: Media type of the body sent to the API. Content-type is @@ -128,10 +128,10 @@ async def recognize_receipts( include_text_content = kwargs.pop("include_text_content", False) if content_type is None: - content_type = get_content_type(stream) + content_type = get_content_type(receipt) return await self._client.analyze_receipt_async( # type: ignore - file_stream=stream, + file_stream=receipt, content_type=content_type, include_text_details=include_text_content, cls=kwargs.pop("cls", self._receipt_callback), @@ -143,13 +143,13 @@ async def recognize_receipts( @distributed_trace_async async def recognize_receipts_from_url( self, - url: str, + receipt_url: str, **kwargs: Any ) -> List["RecognizedReceipt"]: """Extract field text and semantic values from a given US sales receipt. The input document must be the location (Url) of the receipt to be analyzed. - :param str url: The url of the receipt to analyze. The input must be a valid, encoded url + :param str receipt_url: The url of the receipt to analyze. The input must be a valid, encoded url of one of the supported formats: JPEG, PNG, PDF and TIFF. Currently only supports US sales receipts. :keyword bool include_text_content: @@ -174,7 +174,7 @@ async def recognize_receipts_from_url( include_text_content = kwargs.pop("include_text_content", False) return await self._client.analyze_receipt_async( # type: ignore - file_stream={"source": url}, + file_stream={"source": receipt_url}, include_text_details=include_text_content, cls=kwargs.pop("cls", self._receipt_callback), polling=AsyncLROBasePolling(timeout=polling_interval, **kwargs), @@ -187,13 +187,13 @@ def _content_callback(self, raw_response, _, headers): # pylint: disable=unused return prepare_content_result(analyze_result) @distributed_trace_async - async def recognize_content(self, stream: Union[bytes, IO[bytes]], **kwargs: Any) -> List["FormPage"]: + async def recognize_content(self, form: Union[bytes, IO[bytes]], **kwargs: Any) -> List["FormPage"]: """Extract text and content/layout information from a given document. The input document must be of one of the supported content types - 'application/pdf', 'image/jpeg', 'image/png' or 'image/tiff'. - :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. - :type stream: bytes or IO[bytes] + :param form: JPEG, PNG, PDF and TIFF type file stream or bytes. + :type form: bytes or IO[bytes] :keyword str content_type: Media type of the body sent to the API. Content-type is auto-detected, but can be overridden by passing this keyword argument. For options, see :class:`~azure.ai.formrecognizer.FormContentType`. @@ -219,10 +219,10 @@ async def recognize_content(self, stream: Union[bytes, IO[bytes]], **kwargs: Any raise TypeError("Call begin_recognize_content_from_url() to analyze a document from a url.") if content_type is None: - content_type = get_content_type(stream) + content_type = get_content_type(form) return await self._client.analyze_layout_async( # type: ignore - file_stream=stream, + file_stream=form, content_type=content_type, cls=kwargs.pop("cls", self._content_callback), polling=AsyncLROBasePolling(timeout=polling_interval, **kwargs), @@ -231,11 +231,11 @@ async def recognize_content(self, stream: Union[bytes, IO[bytes]], **kwargs: Any ) @distributed_trace_async - async def recognize_content_from_url(self, url: str, **kwargs: Any) -> List["FormPage"]: + async def recognize_content_from_url(self, form_url: str, **kwargs: Any) -> List["FormPage"]: """Extract text and layout information from a given document. The input document must be the location (Url) of the document to be analyzed. - :param str url: The url of the form to analyze. The input must be a valid, encoded url + :param str form_url: The url of the form to analyze. The input must be a valid, encoded url of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword int polling_interval: Waiting time between two polls for LRO operations if no Retry-After header is present. Defaults to 5 seconds. @@ -246,7 +246,7 @@ async def recognize_content_from_url(self, url: str, **kwargs: Any) -> List["For polling_interval = kwargs.pop("polling_interval", POLLING_INTERVAL) return await self._client.analyze_layout_async( # type: ignore - file_stream={"source": url}, + file_stream={"source": form_url}, cls=kwargs.pop("cls", self._content_callback), polling=AsyncLROBasePolling(timeout=polling_interval, **kwargs), error_map=error_map, @@ -257,7 +257,7 @@ async def recognize_content_from_url(self, url: str, **kwargs: Any) -> List["For async def recognize_custom_forms( self, model_id: str, - stream: Union[bytes, IO[bytes]], + form: Union[bytes, IO[bytes]], **kwargs: Any ) -> List["RecognizedForm"]: """Analyze a custom form with a model trained with or without labels. The form @@ -266,8 +266,8 @@ async def recognize_custom_forms( 'image/jpeg', 'image/png' or 'image/tiff'. :param str model_id: Custom model identifier. - :param stream: JPEG, PNG, PDF and TIFF type file stream or bytes. - :type stream: bytes or IO[bytes] + :param form: JPEG, PNG, PDF and TIFF type file stream or bytes. + :type form: bytes or IO[bytes] :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. :keyword str content_type: Media type of the body sent to the API. Content-type is @@ -298,7 +298,7 @@ async def recognize_custom_forms( include_text_content = kwargs.pop("include_text_content", False) if content_type is None: - content_type = get_content_type(stream) + content_type = get_content_type(form) def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argument analyze_result = self._client._deserialize(AnalyzeOperationResult, raw_response) @@ -306,7 +306,7 @@ def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argume deserialization_callback = cls if cls else analyze_callback return await self._client.analyze_with_custom_model( # type: ignore - file_stream=stream, + file_stream=form, model_id=model_id, include_text_details=include_text_content, content_type=content_type, @@ -320,7 +320,7 @@ def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argume async def recognize_custom_forms_from_url( self, model_id: str, - url: str, + form_url: str, **kwargs: Any ) -> List["RecognizedForm"]: """Analyze a custom form with a model trained with or without labels. The form @@ -328,7 +328,7 @@ async def recognize_custom_forms_from_url( The input document must be the location (Url) of the document to be analyzed. :param str model_id: Custom model identifier. - :param str url: The url of the form to analyze. The input must be a valid, encoded url + :param str form_url: The url of the form to analyze. The input must be a valid, encoded url of one of the supported formats: JPEG, PNG, PDF and TIFF. :keyword bool include_text_content: Whether or not to include text elements such as lines and words in addition to form fields. @@ -349,7 +349,7 @@ def analyze_callback(raw_response, _, headers): # pylint: disable=unused-argume deserialization_callback = cls if cls else analyze_callback return await self._client.analyze_with_custom_model( # type: ignore - file_stream={"source": url}, + file_stream={"source": form_url}, model_id=model_id, include_text_details=include_text_content, cls=deserialization_callback, diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py index f8fb42fb4571..fc49182b4054 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_differentiate_output_models_trained_with_and_without_labels_async.py @@ -41,18 +41,20 @@ class DifferentiateOutputModelsTrainedWithAndWithoutLabelsSampleAsync(object): async def recognize_custom_forms(self): from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient + + path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "./sample_forms/forms/Form_1.jpg")) async with FormRecognizerClient( endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) as form_recognizer_client: # Make sure your form's type is included in the list of form types the custom model can recognize - with open("sample_forms/forms/Form_1.jpg", "rb") as f: + with open(path_to_sample_forms, "rb") as f: stream = f.read() forms_with_labeled_model = await form_recognizer_client.recognize_custom_forms( - model_id=self.model_trained_with_labels_id, stream=stream + model_id=self.model_trained_with_labels_id, form=stream ) forms_with_unlabeled_model = await form_recognizer_client.recognize_custom_forms( - model_id=self.model_trained_without_labels_id, stream=stream + model_id=self.model_trained_without_labels_id, form=stream ) # With a form recognized by a model trained with labels, this 'name' key will be its diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py index 5c7b80d05f1b..b1f77c26cb0b 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_get_bounding_boxes_async.py @@ -23,7 +23,6 @@ import os import asyncio -from pathlib import Path def format_bounding_box(bounding_box): @@ -38,8 +37,7 @@ class GetBoundingBoxesSampleAsync(object): model_id = os.environ["CUSTOM_TRAINED_MODEL_ID"] async def get_bounding_boxes(self): - # the sample forms are located in this file's parent's parent's files. - path_to_sample_forms = Path(__file__).parent.parent.absolute() / Path("sample_forms/forms/Form_1.jpg") + path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "./sample_forms/forms/Form_1.jpg")) from azure.ai.formrecognizer import FormWord, FormLine from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient @@ -52,7 +50,7 @@ async def get_bounding_boxes(self): # Make sure your form's type is included in the list of form types the custom model can recognize with open(path_to_sample_forms, "rb") as f: forms = await form_recognizer_client.recognize_custom_forms( - model_id=self.model_id, stream=f.read(), include_text_content=True + model_id=self.model_id, form=f.read(), include_text_content=True ) for idx, form in enumerate(forms): diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py index 2f51830f7aae..1d402543f1fb 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_content_async.py @@ -23,7 +23,6 @@ import os import asyncio -from pathlib import Path def format_bounding_box(bounding_box): @@ -37,8 +36,7 @@ class RecognizeContentSampleAsync(object): key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] async def recognize_content(self): - # the sample forms are located in this file's parent's parent's files. - path_to_sample_forms = Path(__file__).parent.parent.absolute() / Path("sample_forms/forms/Invoice_1.pdf") + path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "./sample_forms/forms/Invoice_1.pdf")) # [START recognize_content_async] from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient @@ -47,7 +45,7 @@ async def recognize_content(self): ) as form_recognizer_client: with open(path_to_sample_forms, "rb") as f: - contents = await form_recognizer_client.recognize_content(stream=f.read()) + contents = await form_recognizer_client.recognize_content(form=f.read()) for idx, content in enumerate(contents): print("----Recognizing content from page #{}----".format(idx)) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_custom_forms_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_custom_forms_async.py index 989b9a8439e9..2b87cd737372 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_custom_forms_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_custom_forms_async.py @@ -35,8 +35,7 @@ class RecognizeCustomFormsSampleAsync(object): model_id = os.environ["CUSTOM_TRAINED_MODEL_ID"] async def recognize_custom_forms(self): - # the sample forms are located in this file's parent's parent's files. - path_to_sample_forms = Path(__file__).parent.parent.absolute() / Path("sample_forms/forms/Form_1.jpg") + path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "./sample_forms/forms/Form_1.jpg")) # [START recognize_custom_forms_async] from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient @@ -47,7 +46,7 @@ async def recognize_custom_forms(self): # Make sure your form's type is included in the list of form types the custom model can recognize with open(path_to_sample_forms, "rb") as f: forms = await form_recognizer_client.recognize_custom_forms( - model_id=self.model_id, stream=f.read() + model_id=self.model_id, form=f.read() ) for idx, form in enumerate(forms): diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_async.py index 8046443fbf6a..4135184bb7e0 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_async.py @@ -31,8 +31,7 @@ class RecognizeReceiptsSampleAsync(object): key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] async def recognize_receipts(self): - # the sample forms are located in this file's parent's parent's files. - path_to_sample_forms = Path(__file__).parent.parent.absolute() / Path("sample_forms/receipt/contoso-allinone.jpg") + path_to_sample_forms = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", "./sample_forms/receipt/contoso-allinone.jpg")) # [START recognize_receipts_async] from azure.core.credentials import AzureKeyCredential from azure.ai.formrecognizer.aio import FormRecognizerClient @@ -41,7 +40,7 @@ async def recognize_receipts(self): ) as form_recognizer_client: with open(path_to_sample_forms, "rb") as f: - receipts = await form_recognizer_client.recognize_receipts(stream=f.read()) + receipts = await form_recognizer_client.recognize_receipts(receipt=f.read()) for idx, receipt in enumerate(receipts): print("--------Recognizing receipt #{}--------".format(idx)) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_from_url_async.py index 688b07da1997..2e5c49216290 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_from_url_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_recognize_receipts_from_url_async.py @@ -37,7 +37,7 @@ async def recognize_receipts_from_url(self): endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) as form_recognizer_client: url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/master/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/receipt/contoso-receipt.png" - receipts = await form_recognizer_client.recognize_receipts_from_url(url=url) + receipts = await form_recognizer_client.recognize_receipts_from_url(receipt_url=url) for idx, receipt in enumerate(receipts): print("--------Recognizing receipt #{}--------".format(idx)) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_differentiate_output_models_trained_with_and_without_labels.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_differentiate_output_models_trained_with_and_without_labels.py index 7ef68f1a4070..b9a66e4cd484 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_differentiate_output_models_trained_with_and_without_labels.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_differentiate_output_models_trained_with_and_without_labels.py @@ -49,10 +49,10 @@ def recognize_custom_forms(self): with open("sample_forms/forms/Form_1.jpg", "rb") as f: stream = f.read() forms_with_labeled_model_poller = form_recognizer_client.begin_recognize_custom_forms( - model_id=self.model_trained_with_labels_id, stream=stream + model_id=self.model_trained_with_labels_id, form=stream ) forms_with_unlabeled_model_poller = form_recognizer_client.begin_recognize_custom_forms( - model_id=self.model_trained_without_labels_id, stream=stream + model_id=self.model_trained_without_labels_id, form=stream ) # Calling result after kicking off each call allows for server-side paralellization diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py index 0924014c7bdc..55acc5189ed2 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_get_bounding_boxes.py @@ -46,7 +46,7 @@ def get_bounding_boxes(self): # Make sure your form's type is included in the list of form types the custom model can recognize with open("sample_forms/forms/Form_1.jpg", "rb") as f: poller = form_recognizer_client.begin_recognize_custom_forms( - model_id=self.model_id, stream=f, include_text_content=True + model_id=self.model_id, form=f, include_text_content=True ) forms = poller.result() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py index cffed4bb3135..7da45debdd95 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_content.py @@ -38,7 +38,7 @@ def recognize_content(self): from azure.ai.formrecognizer import FormRecognizerClient form_recognizer_client = FormRecognizerClient(endpoint=self.endpoint, credential=AzureKeyCredential(self.key)) with open("sample_forms/forms/Invoice_1.pdf", "rb") as f: - poller = form_recognizer_client.begin_recognize_content(stream=f) + poller = form_recognizer_client.begin_recognize_content(form=f) contents = poller.result() for idx, content in enumerate(contents): diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_custom_forms.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_custom_forms.py index e7298a6b3a37..81f7d3161992 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_custom_forms.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_custom_forms.py @@ -43,7 +43,7 @@ def recognize_custom_forms(self): # Make sure your form's type is included in the list of form types the custom model can recognize with open("sample_forms/forms/Form_1.jpg", "rb") as f: poller = form_recognizer_client.begin_recognize_custom_forms( - model_id=self.model_id, stream=f + model_id=self.model_id, form=f ) forms = poller.result() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py index 8adc6c4c7814..6df1b641aae6 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts.py @@ -36,7 +36,7 @@ def recognize_receipts(self): endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) with open("sample_forms/receipt/contoso-allinone.jpg", "rb") as f: - poller = form_recognizer_client.begin_recognize_receipts(stream=f) + poller = form_recognizer_client.begin_recognize_receipts(receipt=f) receipts = poller.result() for idx, receipt in enumerate(receipts): diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts_from_url.py index c83451078441..332897b9bdfc 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts_from_url.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_recognize_receipts_from_url.py @@ -36,7 +36,7 @@ def recognize_receipts_from_url(self): endpoint=self.endpoint, credential=AzureKeyCredential(self.key) ) url = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/master/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/receipt/contoso-receipt.png" - poller = form_recognizer_client.begin_recognize_receipts_from_url(url=url) + poller = form_recognizer_client.begin_recognize_receipts_from_url(receipt_url=url) receipts = poller.result() for idx, receipt in enumerate(receipts): diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py index e21805a9549f..2eecb77a3e8a 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms.py @@ -25,20 +25,20 @@ def test_custom_form_bad_endpoint(self, resource_group, location, form_recognize myfile = fd.read() with self.assertRaises(ServiceRequestError): client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key)) - poller = client.begin_recognize_custom_forms(model_id="xx", stream=myfile) + poller = client.begin_recognize_custom_forms(model_id="xx", form=myfile) @GlobalFormRecognizerAccountPreparer() def test_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx")) with self.assertRaises(ClientAuthenticationError): - poller = client.begin_recognize_custom_forms(model_id="xx", stream=b"xx", content_type="image/jpeg") + poller = client.begin_recognize_custom_forms(model_id="xx", form=b"xx", content_type="image/jpeg") @GlobalFormRecognizerAccountPreparer() def test_passing_unsupported_url_content_type(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key)) with self.assertRaises(TypeError): - poller = client.begin_recognize_custom_forms(model_id="xx", stream="https://badurl.jpg", content_type="application/json") + poller = client.begin_recognize_custom_forms(model_id="xx", form="https://badurl.jpg", content_type="application/json") @GlobalFormRecognizerAccountPreparer() def test_auto_detect_unsupported_stream_content(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): @@ -50,7 +50,7 @@ def test_auto_detect_unsupported_stream_content(self, resource_group, location, with self.assertRaises(ValueError): poller = client.begin_recognize_custom_forms( model_id="xxx", - stream=myfile, + form=myfile, ) @GlobalFormRecognizerAccountPreparer() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py index 6c0fe5b94b7e..aa7c002fc709 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_async.py @@ -27,20 +27,20 @@ async def test_custom_form_bad_endpoint(self, resource_group, location, form_rec myfile = fd.read() with self.assertRaises(ServiceRequestError): client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key)) - result = await client.recognize_custom_forms(model_id="xx", stream=myfile) + result = await client.recognize_custom_forms(model_id="xx", form=myfile) @GlobalFormRecognizerAccountPreparer() async def test_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx")) with self.assertRaises(ClientAuthenticationError): - result = await client.recognize_custom_forms(model_id="xx", stream=b"xx", content_type="image/jpeg") + result = await client.recognize_custom_forms(model_id="xx", form=b"xx", content_type="image/jpeg") @GlobalFormRecognizerAccountPreparer() async def test_passing_unsupported_url_content_type(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key)) with self.assertRaises(TypeError): - result = await client.recognize_custom_forms(model_id="xx", stream="https://badurl.jpg", content_type="application/json") + result = await client.recognize_custom_forms(model_id="xx", form="https://badurl.jpg", content_type="application/json") @GlobalFormRecognizerAccountPreparer() async def test_auto_detect_unsupported_stream_content(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): @@ -52,7 +52,7 @@ async def test_auto_detect_unsupported_stream_content(self, resource_group, loca with self.assertRaises(ValueError): poller = await client.recognize_custom_forms( model_id="xxx", - stream=myfile, + form=myfile, ) @GlobalFormRecognizerAccountPreparer() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py index 4be0b5913b5c..2b7182247b9e 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url.py @@ -23,20 +23,20 @@ class TestCustomFormsFromUrl(FormRecognizerTest): def test_custom_form_url_bad_endpoint(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): with self.assertRaises(ServiceRequestError): client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key)) - result = client.begin_recognize_custom_forms_from_url(model_id="xx", url=self.form_url_jpg) + result = client.begin_recognize_custom_forms_from_url(model_id="xx", form_url=self.form_url_jpg) @GlobalFormRecognizerAccountPreparer() def test_url_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx")) with self.assertRaises(ClientAuthenticationError): - result = client.begin_recognize_custom_forms_from_url(model_id="xx", url=self.form_url_jpg) + result = client.begin_recognize_custom_forms_from_url(model_id="xx", form_url=self.form_url_jpg) @GlobalFormRecognizerAccountPreparer() def test_passing_bad_url(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key)) with self.assertRaises(HttpResponseError): - poller = client.begin_recognize_custom_forms_from_url(model_id="xx", url="https://badurl.jpg") + poller = client.begin_recognize_custom_forms_from_url(model_id="xx", form_url="https://badurl.jpg") @GlobalFormRecognizerAccountPreparer() def test_pass_stream_into_url(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): @@ -46,7 +46,7 @@ def test_pass_stream_into_url(self, resource_group, location, form_recognizer_ac with self.assertRaises(HttpResponseError): poller = client.begin_recognize_custom_forms_from_url( model_id="xxx", - url=fd, + form_url=fd, ) @GlobalFormRecognizerAccountPreparer() @@ -60,7 +60,7 @@ def test_custom_form_bad_url(self, client, container_sas_url): with self.assertRaises(HttpResponseError): poller = fr_client.begin_recognize_custom_forms_from_url( model.model_id, - url="https://badurl.jpg" + form_url="https://badurl.jpg" ) form = poller.result() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py index 3c196abf0b2e..32a7fd801e25 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_custom_forms_from_url_async.py @@ -23,20 +23,20 @@ class TestCustomFormsFromUrlAsync(AsyncFormRecognizerTest): async def test_custom_form_url_bad_endpoint(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): with self.assertRaises(ServiceRequestError): client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key)) - result = await client.recognize_custom_forms_from_url(model_id="xx", url=self.form_url_jpg) + result = await client.recognize_custom_forms_from_url(model_id="xx", form_url=self.form_url_jpg) @GlobalFormRecognizerAccountPreparer() async def test_url_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx")) with self.assertRaises(ClientAuthenticationError): - result = await client.recognize_custom_forms_from_url(model_id="xx", url=self.form_url_jpg) + result = await client.recognize_custom_forms_from_url(model_id="xx", form_url=self.form_url_jpg) @GlobalFormRecognizerAccountPreparer() async def test_passing_bad_url(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key)) with self.assertRaises(HttpResponseError): - result = await client.recognize_custom_forms_from_url(model_id="xx", url="https://badurl.jpg") + result = await client.recognize_custom_forms_from_url(model_id="xx", form_url="https://badurl.jpg") @GlobalFormRecognizerAccountPreparer() async def test_pass_stream_into_url(self, resource_group, location, form_recognizer_account, form_recognizer_account_key): @@ -46,7 +46,7 @@ async def test_pass_stream_into_url(self, resource_group, location, form_recogni with self.assertRaises(HttpResponseError): result = await client.recognize_custom_forms_from_url( model_id="xxx", - url=fd, + form_url=fd, ) @GlobalFormRecognizerAccountPreparer() @@ -59,7 +59,7 @@ async def test_form_bad_url(self, client, container_sas_url): with self.assertRaises(HttpResponseError): form = await fr_client.recognize_custom_forms_from_url( model.model_id, - url="https://badurl.jpg" + form_url="https://badurl.jpg" ) @GlobalFormRecognizerAccountPreparer() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py index bbe1a6fc0d23..5273ddb654bc 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py @@ -144,7 +144,7 @@ def callback(raw_response, _, headers): myfile = fd.read() poller = client.begin_recognize_receipts( - stream=myfile, + receipt=myfile, include_text_content=True, cls=callback ) @@ -209,7 +209,7 @@ def callback(raw_response, _, headers): myfile = fd.read() poller = client.begin_recognize_receipts( - stream=myfile, + receipt=myfile, include_text_content=True, cls=callback ) @@ -379,7 +379,7 @@ def callback(raw_response, _, headers): myfile = fd.read() poller = client.begin_recognize_receipts( - stream=myfile, + receipt=myfile, include_text_content=True, cls=callback ) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py index c6cdc981a616..f66a582a1ef9 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py @@ -143,7 +143,7 @@ def callback(raw_response, _, headers): myfile = fd.read() result = await client.recognize_receipts( - stream=myfile, + receipt=myfile, include_text_content=True, cls=callback ) @@ -207,7 +207,7 @@ def callback(raw_response, _, headers): myfile = fd.read() result = await client.recognize_receipts( - stream=myfile, + receipt=myfile, include_text_content=True, cls=callback ) @@ -373,7 +373,7 @@ def callback(raw_response, _, headers): myfile = fd.read() result = await client.recognize_receipts( - stream=myfile, + receipt=myfile, include_text_content=True, cls=callback ) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py index d54d5117bfa9..241715bd34ad 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py @@ -70,7 +70,7 @@ def callback(raw_response, _, headers): responses.append(extracted_receipt) poller = client.begin_recognize_receipts_from_url( - url=self.receipt_url_jpg, + receipt_url=self.receipt_url_jpg, include_text_content=True, cls=callback ) @@ -132,7 +132,7 @@ def callback(raw_response, _, headers): responses.append(extracted_receipt) poller = client.begin_recognize_receipts_from_url( - url=self.receipt_url_png, + receipt_url=self.receipt_url_png, include_text_content=True, cls=callback ) From 30e60ef9dcc54c63d773787227a49f00ebf3f729 Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Tue, 19 May 2020 15:36:36 -0700 Subject: [PATCH 15/28] Sync eng/common directory with azure-sdk-tools repository (#11472) --- .../templates/steps/docs-metadata-release.yml | 3 --- eng/common/scripts/artifact-metadata-parsing.ps1 | 12 ++++++------ eng/common/scripts/create-tags-and-git-release.ps1 | 4 ++-- eng/common/scripts/update-docs-metadata.ps1 | 2 +- 4 files changed, 9 insertions(+), 12 deletions(-) diff --git a/eng/common/pipelines/templates/steps/docs-metadata-release.yml b/eng/common/pipelines/templates/steps/docs-metadata-release.yml index 3ffdd5ec60a6..89d12d4ac1d1 100644 --- a/eng/common/pipelines/templates/steps/docs-metadata-release.yml +++ b/eng/common/pipelines/templates/steps/docs-metadata-release.yml @@ -56,6 +56,3 @@ steps: BaseBranchName: smoke-test WorkingDirectory: ${{parameters.WorkingDirectory}}/repo ScriptDirectory: ${{parameters.WorkingDirectory}}/${{parameters.ScriptDirectory}} - - - diff --git a/eng/common/scripts/artifact-metadata-parsing.ps1 b/eng/common/scripts/artifact-metadata-parsing.ps1 index 51ee8eb56b84..66e090ed6f6d 100644 --- a/eng/common/scripts/artifact-metadata-parsing.ps1 +++ b/eng/common/scripts/artifact-metadata-parsing.ps1 @@ -358,7 +358,7 @@ function GetExistingTags($apiUrl) { } # Walk across all build artifacts, check them against the appropriate repository, return a list of tags/releases -function VerifyPackages($pkgRepository, $artifactLocation, $workingDirectory, $apiUrl, $releaseSha, $exitOnError = $True) { +function VerifyPackages($pkgRepository, $artifactLocation, $workingDirectory, $apiUrl, $releaseSha, $continueOnError = $false) { $pkgList = [array]@() $ParsePkgInfoFn = "" $packagePattern = "" @@ -404,7 +404,7 @@ function VerifyPackages($pkgRepository, $artifactLocation, $workingDirectory, $a continue } - if ($parsedPackage.Deployable -ne $True -and $exitOnError) { + if ($parsedPackage.Deployable -ne $True -and !$continueOnError) { Write-Host "Package $($parsedPackage.PackageId) is marked with version $($parsedPackage.PackageVersion), the version $($parsedPackage.PackageVersion) has already been deployed to the target repository." Write-Host "Maybe a pkg version wasn't updated properly?" exit(1) @@ -430,8 +430,8 @@ function VerifyPackages($pkgRepository, $artifactLocation, $workingDirectory, $a $intersect = $results | % { $_.Tag } | ? { $existingTags -contains $_ } - if ($intersect.Length -gt 0 -and $exitOnError) { - CheckArtifactShaAgainstTagsList -priorExistingTagList $intersect -releaseSha $releaseSha -apiUrl $apiUrl -exitOnError $exitOnError + if ($intersect.Length -gt 0 -and !$continueOnError) { + CheckArtifactShaAgainstTagsList -priorExistingTagList $intersect -releaseSha $releaseSha -apiUrl $apiUrl -continueOnError $continueOnError # all the tags are clean. remove them from the list of releases we will publish. $results = $results | ? { -not ($intersect -contains $_.Tag ) } @@ -443,7 +443,7 @@ function VerifyPackages($pkgRepository, $artifactLocation, $workingDirectory, $a # given a set of tags that we want to release, we need to ensure that if they already DO exist. # if they DO exist, quietly exit if the commit sha of the artifact matches that of the tag # if the commit sha does not match, exit with error and report both problem shas -function CheckArtifactShaAgainstTagsList($priorExistingTagList, $releaseSha, $apiUrl, $exitOnError) { +function CheckArtifactShaAgainstTagsList($priorExistingTagList, $releaseSha, $apiUrl, $continueOnError) { $headers = @{ "Content-Type" = "application/json" "Authorization" = "token $($env:GH_TOKEN)" @@ -465,7 +465,7 @@ function CheckArtifactShaAgainstTagsList($priorExistingTagList, $releaseSha, $ap } } - if ($unmatchedTags.Length -gt 0 -and $exitOnError) { + if ($unmatchedTags.Length -gt 0 -and !$continueOnError) { Write-Host "Tags already existing with different SHA versions. Exiting." exit(1) } diff --git a/eng/common/scripts/create-tags-and-git-release.ps1 b/eng/common/scripts/create-tags-and-git-release.ps1 index 56e3f22a4274..f87c90997839 100644 --- a/eng/common/scripts/create-tags-and-git-release.ps1 +++ b/eng/common/scripts/create-tags-and-git-release.ps1 @@ -15,7 +15,7 @@ param ( $repoOwner = "", # the owning organization of the repository. EG "Azure" $repoName = "", # the name of the repository. EG "azure-sdk-for-java" $repoId = "$repoOwner/$repoName", # full repo id. EG azure/azure-sdk-for-net DevOps: $(Build.Repository.Id), - [switch]$forceCreate = $false + [switch]$continueOnError = $false ) Write-Host "> $PSCommandPath $args" @@ -26,7 +26,7 @@ $apiUrl = "https://api.github.com/repos/$repoId" Write-Host "Using API URL $apiUrl" # VERIFY PACKAGES -$pkgList = VerifyPackages -pkgRepository $packageRepository -artifactLocation $artifactLocation -workingDirectory $workingDirectory -apiUrl $apiUrl -releaseSha $releaseSha +$pkgList = VerifyPackages -pkgRepository $packageRepository -artifactLocation $artifactLocation -workingDirectory $workingDirectory -apiUrl $apiUrl -releaseSha $releaseSha -continueOnError $continueOnError if ($pkgList) { Write-Host "Given the visible artifacts, github releases will be created for the following:" diff --git a/eng/common/scripts/update-docs-metadata.ps1 b/eng/common/scripts/update-docs-metadata.ps1 index a162cbf8514e..b4cc2ac7849e 100644 --- a/eng/common/scripts/update-docs-metadata.ps1 +++ b/eng/common/scripts/update-docs-metadata.ps1 @@ -86,7 +86,7 @@ $pkgs = VerifyPackages -pkgRepository $Repository ` -workingDirectory $WorkDirectory ` -apiUrl $apiUrl ` -releaseSha $ReleaseSHA ` - -exitOnError $False + -continueOnError $True if ($pkgs) { Write-Host "Given the visible artifacts, readmes will be copied for the following packages" From 06a16ee292f200bf4a6bf455068233a76c7fedf7 Mon Sep 17 00:00:00 2001 From: "Adam Ling (MSFT)" <47871814+yunhaoling@users.noreply.github.com> Date: Tue, 19 May 2020 16:29:18 -0700 Subject: [PATCH 16/28] update unit test according to the latest uamqp update (#11533) --- sdk/eventhub/azure-eventhub/tests/unittest/test_event_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sdk/eventhub/azure-eventhub/tests/unittest/test_event_data.py b/sdk/eventhub/azure-eventhub/tests/unittest/test_event_data.py index 50502563681c..f32cec472067 100644 --- a/sdk/eventhub/azure-eventhub/tests/unittest/test_event_data.py +++ b/sdk/eventhub/azure-eventhub/tests/unittest/test_event_data.py @@ -93,6 +93,6 @@ def test_event_data_batch(): batch.add(EventData("A")) assert str(batch) == "EventDataBatch(max_size_in_bytes=100, partition_id=None, partition_key='par', event_count=1)" assert repr(batch) == "EventDataBatch(max_size_in_bytes=100, partition_id=None, partition_key='par', event_count=1)" - assert batch.size_in_bytes == 89 and len(batch) == 1 + assert batch.size_in_bytes == 97 and len(batch) == 1 with pytest.raises(ValueError): batch.add(EventData("A")) From 2598959164c9d1e87f20060e8ca76ec563d180d6 Mon Sep 17 00:00:00 2001 From: Bryan Van de Ven Date: Tue, 19 May 2020 16:51:40 -0700 Subject: [PATCH 17/28] Search Renames / Regen (#11342) * add regenerated swagger * Analyzer -> LexicalAnalyzer * StandardAnalyzer -> LuceneStandardAnalyzer * StandardTokenizer -> LuceneStandardTokenizer * DataSource -> SearchIndexerDataSource * DataContainer -> SearchIndexerDataContainer * Skillset -> SearchIndexerSkillset * Skill -> SearchIndexerSkill * TokenInfo -> AnalyzedTokenInfo * EncryptionKey -> SearchResourceEncryptionKey * IndexerExecutionInfo -> SearchIndexerStatus * Indexer -> SearchIndexer * Tokenizer -> LexicalTokenizer * Field -> SearchField * Index -> SearchIndex * update for AccessCondition * update tests * regenerate w/o x-ms-pageable * remove conflict merge junk * pylint --- .../azure/search/documents/__init__.py | 54 +- .../documents/_index/_generated/__init__.py | 2 +- .../_index/_generated/_configuration.py | 2 +- .../_index/_generated/_search_index_client.py | 2 +- .../_index/_generated/aio/__init__.py | 2 +- .../_generated/aio/_configuration_async.py | 2 +- .../aio/_search_index_client_async.py | 2 +- .../aio/operations_async/__init__.py | 2 +- .../_documents_operations_async.py | 49 +- .../_index/_generated/models/__init__.py | 8 +- .../_index/_generated/models/_models.py | 28 +- .../_index/_generated/models/_models_py3.py | 28 +- .../models/_search_index_client_enums.py | 24 +- .../_index/_generated/operations/__init__.py | 2 +- .../operations/_documents_operations.py | 49 +- .../documents/_service/_datasources_client.py | 42 +- .../documents/_service/_generated/__init__.py | 10 +- .../_service/_generated/_configuration.py | 10 +- .../_generated/_search_service_client.py | 21 +- .../_service/_generated/aio/__init__.py | 4 +- .../_generated/aio/_configuration_async.py | 4 +- .../aio/_search_service_client_async.py | 15 +- .../aio/operations_async/__init__.py | 4 +- .../_data_sources_operations_async.py | 151 +- .../_indexers_operations_async.py | 188 +- .../_indexes_operations_async.py | 180 +- ..._search_service_client_operations_async.py | 15 +- .../_skillsets_operations_async.py | 151 +- .../_synonym_maps_operations_async.py | 117 +- .../_service/_generated/models/__init__.py | 140 +- .../_service/_generated/models/_models.py | 2979 +++++++++-------- .../_service/_generated/models/_models_py3.py | 2904 ++++++++-------- .../models/_search_service_client_enums.py | 900 ++--- .../_generated/operations/__init__.py | 4 +- .../operations/_data_sources_operations.py | 161 +- .../operations/_indexers_operations.py | 198 +- .../operations/_indexes_operations.py | 190 +- .../_search_service_client_operations.py | 25 +- .../operations/_skillsets_operations.py | 161 +- .../operations/_synonym_maps_operations.py | 127 +- .../azure/search/documents/_service/_index.py | 14 +- .../documents/_service/_indexers_client.py | 75 +- .../documents/_service/_indexes_client.py | 52 +- .../search/documents/_service/_models.py | 6 +- .../documents/_service/_skillsets_client.py | 97 +- .../_service/_synonym_maps_client.py | 15 +- .../azure/search/documents/_service/_utils.py | 20 +- .../_service/aio/_datasources_client.py | 44 +- .../_service/aio/_indexers_client.py | 67 +- .../documents/_service/aio/_indexes_client.py | 52 +- .../_service/aio/_skillsets_client.py | 97 +- .../_service/aio/_synonym_maps_client.py | 15 +- .../async_tests/test_service_live_async.py | 43 +- .../tests/test_regex_flags.py | 12 +- .../tests/test_service_live.py | 43 +- 55 files changed, 4925 insertions(+), 4684 deletions(-) diff --git a/sdk/search/azure-search-documents/azure/search/documents/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/__init__.py index 3abdc091320c..7632f0b5daa7 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/__init__.py @@ -43,9 +43,9 @@ edm, ) from ._service._generated.models import ( - Analyzer, AnalyzeRequest, AnalyzeResult, + AnalyzedTokenInfo, AsciiFoldingTokenFilter, AzureActiveDirectoryApplicationCredentials, CharFilter, @@ -55,24 +55,18 @@ ConditionalSkill, CorsOptions, CustomAnalyzer, - DataSource, DataSourceCredentials, - DataContainer, DictionaryDecompounderTokenFilter, DistanceScoringFunction, DistanceScoringParameters, EdgeNGramTokenFilter, EdgeNGramTokenizer, ElisionTokenFilter, - EncryptionKey, EntityRecognitionSkill, - Field, FreshnessScoringFunction, FreshnessScoringParameters, GetIndexStatisticsResult, ImageAnalysisSkill, - Index, - Indexer, IndexingSchedule, IndexingParameters, InputFieldMappingEntry, @@ -82,7 +76,11 @@ KeywordTokenizer, LanguageDetectionSkill, LengthTokenFilter, + LexicalAnalyzer, + LexicalTokenizer, LimitTokenFilter, + LuceneStandardAnalyzer, + LuceneStandardTokenizer, MagnitudeScoringFunction, MagnitudeScoringParameters, MappingCharFilter, @@ -98,16 +96,20 @@ PatternReplaceTokenFilter, PhoneticTokenFilter, RegexFlags, + SearchField, + SearchIndex, + SearchIndexer, + SearchIndexerDataContainer, + SearchIndexerDataSource, + SearchIndexerSkillset, + SearchResourceEncryptionKey, ScoringFunction, ScoringProfile, SentimentSkill, ShaperSkill, ShingleTokenFilter, - Skillset, SnowballTokenFilter, SplitSkill, - StandardAnalyzer, - StandardTokenizer, StemmerOverrideTokenFilter, StemmerTokenFilter, StopAnalyzer, @@ -120,8 +122,6 @@ TextTranslationSkill, TextWeights, TokenFilter, - TokenInfo, - Tokenizer, TruncateTokenFilter, UaxUrlEmailTokenizer, UniqueTokenFilter, @@ -142,7 +142,7 @@ __all__ = ( "AnalyzeRequest", "AnalyzeResult", - "Analyzer", + "AnalyzedTokenInfo", "AsciiFoldingTokenFilter", "AutocompleteQuery", "AzureActiveDirectoryApplicationCredentials", @@ -154,24 +154,18 @@ "ConditionalSkill", "CorsOptions", "CustomAnalyzer", - "DataSource", "DataSourceCredentials", - "DataContainer", "DictionaryDecompounderTokenFilter", "DistanceScoringFunction", "DistanceScoringParameters", "EdgeNGramTokenFilter", "EdgeNGramTokenizer", "ElisionTokenFilter", - "EncryptionKey", "EntityRecognitionSkill", - "Field", "FreshnessScoringFunction", "FreshnessScoringParameters", "GetIndexStatisticsResult", "ImageAnalysisSkill", - "Index", - "Indexer", "IndexingSchedule", "IndexingParameters", "IndexAction", @@ -184,7 +178,11 @@ "KeywordTokenizer", "LanguageDetectionSkill", "LengthTokenFilter", + "LexicalAnalyzer", + "LexicalTokenizer", "LimitTokenFilter", + "LuceneStandardAnalyzer", + "LuceneStandardTokenizer", "MagnitudeScoringFunction", "MagnitudeScoringParameters", "MappingCharFilter", @@ -204,25 +202,29 @@ "RegexFlags", "ScoringFunction", "ScoringProfile", - "SearchClient", "SearchDataSourcesClient", + "SearchClient", + "SearchField", + "SearchIndex", + "SearchIndexer", + "SearchIndexerDataContainer", + "SearchIndexerDataSource", + "SearchIndexerSkillset", "SearchIndexersClient", "SearchIndexesClient", - "SearchSkillsetsClient", - "SearchSynonymMapsClient", "SearchItemPaged", "SearchQuery", + "SearchResourceEncryptionKey", "SearchServiceClient", + "SearchSkillsetsClient", + "SearchSynonymMapsClient", "SearchableField", "SentimentSkill", "ShaperSkill", "ShingleTokenFilter", "SimpleField", - "Skillset", "SnowballTokenFilter", "SplitSkill", - "StandardAnalyzer", - "StandardTokenizer", "StemmerOverrideTokenFilter", "StemmerTokenFilter", "StopAnalyzer", @@ -236,8 +238,6 @@ "TextTranslationSkill", "TextWeights", "TokenFilter", - "TokenInfo", - "Tokenizer", "TruncateTokenFilter", "UaxUrlEmailTokenizer", "UniqueTokenFilter", diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py index ac06514f5327..7363c23e25cd 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py index 3010c29cca9f..4ad630688a33 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py index 7f112a9888de..8182e533ba81 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/_search_index_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py index a06ffca12355..921a60ecf1d2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py index 2fe06bf7f544..54942faa0d61 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_configuration_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py index 06ec78b349c8..36f6ad7726e5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/_search_index_client_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py index 6b51d112c132..c3fee199ca19 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py index dc288eb01147..0a25976ad253 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/aio/operations_async/_documents_operations_async.py @@ -1,12 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest @@ -52,7 +52,7 @@ async def count( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[int] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -84,7 +84,8 @@ async def count( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('long', pipeline_response) @@ -116,7 +117,7 @@ async def search_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _include_total_result_count = None _facets = None @@ -215,7 +216,8 @@ async def search_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -243,7 +245,7 @@ async def search_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -280,7 +282,8 @@ async def search_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -312,7 +315,7 @@ async def get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -347,7 +350,8 @@ async def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) @@ -383,7 +387,7 @@ async def suggest_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _filter = None _use_fuzzy_matching = None @@ -454,7 +458,8 @@ async def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -482,7 +487,7 @@ async def suggest_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -519,7 +524,8 @@ async def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -547,7 +553,7 @@ async def index( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.IndexDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -584,7 +590,8 @@ async def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: @@ -624,7 +631,7 @@ async def autocomplete_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None _autocomplete_mode = None @@ -691,7 +698,8 @@ async def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) @@ -719,7 +727,7 @@ async def autocomplete_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -756,7 +764,8 @@ async def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py index ad760a1d771e..5971f054dfba 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -16,7 +16,7 @@ from ._models_py3 import IndexingResult from ._models_py3 import RequestOptions from ._models_py3 import SearchDocumentsResult - from ._models_py3 import SearchError, SearchErrorException + from ._models_py3 import SearchError from ._models_py3 import SearchOptions from ._models_py3 import SearchRequest from ._models_py3 import SearchResult @@ -36,7 +36,7 @@ from ._models import IndexingResult # type: ignore from ._models import RequestOptions # type: ignore from ._models import SearchDocumentsResult # type: ignore - from ._models import SearchError, SearchErrorException # type: ignore + from ._models import SearchError # type: ignore from ._models import SearchOptions # type: ignore from ._models import SearchRequest # type: ignore from ._models import SearchResult # type: ignore @@ -64,7 +64,7 @@ 'IndexingResult', 'RequestOptions', 'SearchDocumentsResult', - 'SearchError', 'SearchErrorException', + 'SearchError', 'SearchOptions', 'SearchRequest', 'SearchResult', diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py index 88b4aacde572..69693a04bf57 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -449,31 +449,6 @@ def __init__( self.next_link = None -class SearchErrorException(HttpResponseError): - """Server responded with exception of type: 'SearchError'. - - :param response: Server response to be deserialized. - :param error_model: A deserialized model of the response body as model. - """ - - def __init__(self, response, error_model): - self.error = error_model - super(SearchErrorException, self).__init__(response=response, error_model=error_model) - - @classmethod - def from_response(cls, response, deserialize): - """Deserialize this response as this exception, or a subclass of this exception. - - :param response: Server response to be deserialized. - :param deserialize: A deserializer - """ - model_name = 'SearchError' - error = deserialize(model_name, response) - if error is None: - error = deserialize.dependencies[model_name]() - return error._EXCEPTION_TYPE(response, error) - - class SearchError(msrest.serialization.Model): """Describes an error condition for the Azure Cognitive Search API. @@ -488,7 +463,6 @@ class SearchError(msrest.serialization.Model): :ivar details: An array of details about specific errors that led to this reported error. :vartype details: list[~search_index_client.models.SearchError] """ - _EXCEPTION_TYPE = SearchErrorException _validation = { 'code': {'readonly': True}, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py index 4b8f7bda6f7f..7e394ea69d39 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_models_py3.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -480,31 +480,6 @@ def __init__( self.next_link = None -class SearchErrorException(HttpResponseError): - """Server responded with exception of type: 'SearchError'. - - :param response: Server response to be deserialized. - :param error_model: A deserialized model of the response body as model. - """ - - def __init__(self, response, error_model): - self.error = error_model - super(SearchErrorException, self).__init__(response=response, error_model=error_model) - - @classmethod - def from_response(cls, response, deserialize): - """Deserialize this response as this exception, or a subclass of this exception. - - :param response: Server response to be deserialized. - :param deserialize: A deserializer - """ - model_name = 'SearchError' - error = deserialize(model_name, response) - if error is None: - error = deserialize.dependencies[model_name]() - return error._EXCEPTION_TYPE(response, error) - - class SearchError(msrest.serialization.Model): """Describes an error condition for the Azure Cognitive Search API. @@ -519,7 +494,6 @@ class SearchError(msrest.serialization.Model): :ivar details: An array of details about specific errors that led to this reported error. :vartype details: list[~search_index_client.models.SearchError] """ - _EXCEPTION_TYPE = SearchErrorException _validation = { 'code': {'readonly': True}, diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py index ec3e46a89f5a..f8c0578dc65c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/models/_search_index_client_enums.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -10,23 +10,23 @@ class IndexActionType(str, Enum): """The operation to perform on a document in an indexing batch. """ - upload = "upload" - merge = "merge" - merge_or_upload = "mergeOrUpload" - delete = "delete" + upload = "upload" #: Inserts the document into the index if it is new and updates it if it exists. All fields are replaced in the update case. + merge = "merge" #: Merges the specified field values with an existing document. If the document does not exist, the merge will fail. Any field you specify in a merge will replace the existing field in the document. This also applies to collections of primitive and complex types. + merge_or_upload = "mergeOrUpload" #: Behaves like merge if a document with the given key already exists in the index. If the document does not exist, it behaves like upload with a new document. + delete = "delete" #: Removes the specified document from the index. Any field you specify in a delete operation other than the key field will be ignored. If you want to remove an individual field from a document, use merge instead and set the field explicitly to null. class QueryType(str, Enum): - simple = "simple" - full = "full" + simple = "simple" #: Uses the simple query syntax for searches. Search text is interpreted using a simple query language that allows for symbols such as +, * and "". Queries are evaluated across all searchable fields by default, unless the searchFields parameter is specified. + full = "full" #: Uses the full Lucene query syntax for searches. Search text is interpreted using the Lucene query language which allows field-specific and weighted searches, as well as other advanced features. class SearchMode(str, Enum): - any = "any" - all = "all" + any = "any" #: Any of the search terms must be matched in order to count the document as a match. + all = "all" #: All of the search terms must be matched in order to count the document as a match. class AutocompleteMode(str, Enum): - one_term = "oneTerm" - two_terms = "twoTerms" - one_term_with_context = "oneTermWithContext" + one_term = "oneTerm" #: Only one term is suggested. If the query has two terms, only the last term is completed. For example, if the input is 'washington medic', the suggested terms could include 'medicaid', 'medicare', and 'medicine'. + two_terms = "twoTerms" #: Matching two-term phrases in the index will be suggested. For example, if the input is 'medic', the suggested terms could include 'medicare coverage' and 'medical assistant'. + one_term_with_context = "oneTermWithContext" #: Completes the last term in a query with two or more terms, where the last two terms are a phrase that exists in the index. For example, if the input is 'washington medic', the suggested terms could include 'washington medicaid' and 'washington medical'. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py index 1de4dc8bc765..bcb01fb453e2 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py index 718835b3c195..923e4fb4c95b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_index/_generated/operations/_documents_operations.py @@ -1,12 +1,12 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union import warnings -from azure.core.exceptions import map_error +from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse @@ -53,7 +53,7 @@ def count( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[int] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -85,7 +85,8 @@ def count( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('long', pipeline_response) @@ -118,7 +119,7 @@ def search_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _include_total_result_count = None _facets = None @@ -217,7 +218,8 @@ def search_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -246,7 +248,7 @@ def search_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SearchDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -283,7 +285,8 @@ def search_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SearchDocumentsResult', pipeline_response) @@ -316,7 +319,7 @@ def get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[object] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -351,7 +354,8 @@ def get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('object', pipeline_response) @@ -388,7 +392,7 @@ def suggest_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _filter = None _use_fuzzy_matching = None @@ -459,7 +463,8 @@ def suggest_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -488,7 +493,7 @@ def suggest_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SuggestDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -525,7 +530,8 @@ def suggest_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('SuggestDocumentsResult', pipeline_response) @@ -554,7 +560,7 @@ def index( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.IndexDocumentsResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -591,7 +597,8 @@ def index( if response.status_code not in [200, 207]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: @@ -632,7 +639,7 @@ def autocomplete_get( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None _autocomplete_mode = None @@ -699,7 +706,8 @@ def autocomplete_get( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) @@ -728,7 +736,7 @@ def autocomplete_post( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AutocompleteResult"] - error_map = kwargs.pop('error_map', {}) + error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) _x_ms_client_request_id = None if request_options is not None: @@ -765,7 +773,8 @@ def autocomplete_post( if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) - raise models.SearchErrorException.from_response(response, self._deserialize) + error = self._deserialize(models.SearchError, response) + raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AutocompleteResult', pipeline_response) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py index 83ca286a1f36..0f04fe5407f8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_datasources_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import DataSource + from ._generated.models import SearchIndexerDataSource from typing import Any, Dict, Optional, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -57,12 +57,12 @@ def close(self): @distributed_trace def create_datasource(self, data_source, **kwargs): - # type: (DataSource, **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, **Any) -> Dict[str, Any] """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search.models.DataSource - :return: The created DataSource + :type data_source: ~search.models.SearchIndexerDataSource + :return: The created SearchIndexerDataSource :rtype: dict .. admonition:: Example: @@ -80,28 +80,27 @@ def create_datasource(self, data_source, **kwargs): @distributed_trace def create_or_update_datasource(self, data_source, name=None, **kwargs): - # type: (DataSource, Optional[str], **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, Optional[str], **Any) -> Dict[str, Any] """Creates a new datasource or updates a datasource if it already exists. :param name: The name of the datasource to create or update. :type name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search.models.DataSource + :type data_source: ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created DataSource + :return: The created SearchIndexerDataSource :rtype: dict """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) if not name: name = data_source.name result = self._client.data_sources.create_or_update( data_source_name=name, data_source=data_source, - access_condition=access_condition, error_map=error_map, **kwargs ) @@ -114,7 +113,7 @@ def get_datasource(self, name, **kwargs): :param name: The name of the datasource to retrieve. :type name: str - :return: The DataSource that is fetched. + :return: The SearchIndexerDataSource that is fetched. :rtype: dict .. admonition:: Example: @@ -124,7 +123,7 @@ def get_datasource(self, name, **kwargs): :end-before: [END get_data_source] :language: python :dedent: 4 - :caption: Retrieve a DataSource + :caption: Retrieve a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.data_sources.get(name, **kwargs) @@ -132,7 +131,7 @@ def get_datasource(self, name, **kwargs): @distributed_trace def get_datasources(self, **kwargs): - # type: (**Any) -> Sequence[DataSource] + # type: (**Any) -> Sequence[SearchIndexerDataSource] """Lists all datasources available for a search service. :return: List of all the data sources. @@ -145,7 +144,7 @@ def get_datasources(self, **kwargs): :end-before: [END list_data_source] :language: python :dedent: 4 - :caption: List all the DataSources + :caption: List all the SearchIndexerDataSources """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.data_sources.list(**kwargs) @@ -153,13 +152,13 @@ def get_datasources(self, **kwargs): @distributed_trace def delete_datasource(self, data_source, **kwargs): - # type: (Union[str, DataSource], **Any) -> None + # type: (Union[str, SearchIndexerDataSource], **Any) -> None """Deletes a datasource. To use access conditions, the Datasource model must be provided instead of the name. It is enough to provide the name of the datasource to delete unconditionally :param data_source: The datasource to delete. - :type data_source: str or ~search.models.DataSource + :type data_source: str or ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: None @@ -172,20 +171,17 @@ def delete_datasource(self, data_source, **kwargs): :end-before: [END delete_data_source] :language: python :dedent: 4 - :caption: Delete a DataSource + :caption: Delete a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = data_source.name except AttributeError: name = data_source self._client.data_sources.delete( - data_source_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + data_source_name=name, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py index 4e8378902f12..43602c89e078 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/__init__.py @@ -1,8 +1,16 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from ._search_service_client import SearchServiceClient __all__ = ['SearchServiceClient'] + +try: + from ._patch import patch_sdk + patch_sdk() +except ImportError: + pass diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py index 28368a23abde..a398a3277c31 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_configuration.py @@ -1,14 +1,20 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + VERSION = "unknown" class SearchServiceClientConfiguration(Configuration): diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py index f466d50bb2aa..d049c0cf0dc8 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/_search_service_client.py @@ -1,14 +1,20 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any +from typing import TYPE_CHECKING from azure.core import PipelineClient from msrest import Deserializer, Serializer +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any + from ._configuration import SearchServiceClientConfiguration from .operations import DataSourcesOperations from .operations import IndexersOperations @@ -23,17 +29,18 @@ class SearchServiceClient(SearchServiceClientOperationsMixin): """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. :ivar data_sources: DataSourcesOperations operations - :vartype data_sources: search_service_client.operations.DataSourcesOperations + :vartype data_sources: azure.search.documents.operations.DataSourcesOperations :ivar indexers: IndexersOperations operations - :vartype indexers: search_service_client.operations.IndexersOperations + :vartype indexers: azure.search.documents.operations.IndexersOperations :ivar skillsets: SkillsetsOperations operations - :vartype skillsets: search_service_client.operations.SkillsetsOperations + :vartype skillsets: azure.search.documents.operations.SkillsetsOperations :ivar synonym_maps: SynonymMapsOperations operations - :vartype synonym_maps: search_service_client.operations.SynonymMapsOperations + :vartype synonym_maps: azure.search.documents.operations.SynonymMapsOperations :ivar indexes: IndexesOperations operations - :vartype indexes: search_service_client.operations.IndexesOperations + :vartype indexes: azure.search.documents.operations.IndexesOperations :param endpoint: The endpoint URL of the search service. :type endpoint: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py index 6ffdee218108..bc2235f978a0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py index 022214a557bb..d179b1eb3e0e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_configuration_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py index 11b0ab3a5fdf..a9b8636149fc 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/_search_service_client_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -23,17 +25,18 @@ class SearchServiceClient(SearchServiceClientOperationsMixin): """Client that can be used to manage and query indexes and documents, as well as manage other resources, on a search service. :ivar data_sources: DataSourcesOperations operations - :vartype data_sources: search_service_client.aio.operations_async.DataSourcesOperations + :vartype data_sources: azure.search.documents.aio.operations_async.DataSourcesOperations :ivar indexers: IndexersOperations operations - :vartype indexers: search_service_client.aio.operations_async.IndexersOperations + :vartype indexers: azure.search.documents.aio.operations_async.IndexersOperations :ivar skillsets: SkillsetsOperations operations - :vartype skillsets: search_service_client.aio.operations_async.SkillsetsOperations + :vartype skillsets: azure.search.documents.aio.operations_async.SkillsetsOperations :ivar synonym_maps: SynonymMapsOperations operations - :vartype synonym_maps: search_service_client.aio.operations_async.SynonymMapsOperations + :vartype synonym_maps: azure.search.documents.aio.operations_async.SynonymMapsOperations :ivar indexes: IndexesOperations operations - :vartype indexes: search_service_client.aio.operations_async.IndexesOperations + :vartype indexes: azure.search.documents.aio.operations_async.IndexesOperations :param endpoint: The endpoint URL of the search service. :type endpoint: str + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py index a9e96c765498..7f552e89248c 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py index 431a492fd958..5e8069a9d406 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_data_sources_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class DataSourcesOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -40,42 +42,44 @@ def __init__(self, client, config, serializer, deserializer) -> None: async def create_or_update( self, data_source_name: str, - data_source: "models.DataSource", + data_source: "models.SearchIndexerDataSource", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.DataSource": + ) -> "models.SearchIndexerDataSource": """Creates a new datasource or updates a datasource if it already exists. :param data_source_name: The name of the datasource to create or update. :type data_source_name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource or ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -90,17 +94,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -114,52 +118,53 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore async def delete( self, data_source_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes a datasource. :param data_source_name: The name of the datasource to delete. :type data_source_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -174,10 +179,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -192,27 +197,28 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore async def get( self, data_source_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.DataSource": + ) -> "models.SearchIndexerDataSource": """Retrieves a datasource definition. :param data_source_name: The name of the datasource to retrieve. :type data_source_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -220,7 +226,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -247,13 +253,13 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore async def list( self, @@ -268,14 +274,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListDataSourcesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListDataSourcesResult + :rtype: ~azure.search.documents.models.ListDataSourcesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListDataSourcesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -283,7 +290,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -317,35 +324,37 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/datasources'} + list.metadata = {'url': '/datasources'} # type: ignore async def create( self, - data_source: "models.DataSource", + data_source: "models.SearchIndexerDataSource", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.DataSource": + ) -> "models.SearchIndexerDataSource": """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -359,12 +368,12 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -376,10 +385,10 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/datasources'} + create.metadata = {'url': '/datasources'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py index acd91482da73..4d5b5c3466a9 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexers_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class IndexersOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -48,14 +50,15 @@ async def reset( :param indexer_name: The name of the indexer to reset. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -63,7 +66,7 @@ async def reset( api_version = "2019-05-06-Preview" # Construct URL - url = self.reset.metadata['url'] + url = self.reset.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -92,7 +95,7 @@ async def reset( if cls: return cls(pipeline_response, None, {}) - reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} + reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} # type: ignore async def run( self, @@ -105,14 +108,15 @@ async def run( :param indexer_name: The name of the indexer to run. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -120,7 +124,7 @@ async def run( api_version = "2019-05-06-Preview" # Construct URL - url = self.run.metadata['url'] + url = self.run.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -149,47 +153,49 @@ async def run( if cls: return cls(pipeline_response, None, {}) - run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} + run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} # type: ignore async def create_or_update( self, indexer_name: str, - indexer: "models.Indexer", + indexer: "models.SearchIndexer", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.Indexer": + ) -> "models.SearchIndexer": """Creates a new indexer or updates an indexer if it already exists. :param indexer_name: The name of the indexer to create or update. :type indexer_name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~azure.search.documents.models.SearchIndexer + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer or ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -204,17 +210,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -228,52 +234,53 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} + create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore async def delete( self, indexer_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes an indexer. :param indexer_name: The name of the indexer to delete. :type indexer_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -288,10 +295,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -306,27 +313,28 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexers(\'{indexerName}\')'} + delete.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore async def get( self, indexer_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Indexer": + ) -> "models.SearchIndexer": """Retrieves an indexer definition. :param indexer_name: The name of the indexer to retrieve. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -334,7 +342,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -361,13 +369,13 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexers(\'{indexerName}\')'} + get.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore async def list( self, @@ -382,14 +390,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexersResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexersResult + :rtype: ~azure.search.documents.models.ListIndexersResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexersResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -397,7 +406,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -431,35 +440,37 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/indexers'} + list.metadata = {'url': '/indexers'} # type: ignore async def create( self, - indexer: "models.Indexer", + indexer: "models.SearchIndexer", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Indexer": + ) -> "models.SearchIndexer": """Creates a new indexer. :param indexer: The definition of the indexer to create. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~azure.search.documents.models.SearchIndexer :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -473,12 +484,12 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -490,33 +501,34 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexers'} + create.metadata = {'url': '/indexers'} # type: ignore async def get_status( self, indexer_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.IndexerExecutionInfo": + ) -> "models.SearchIndexerStatus": """Returns the current status and execution history of an indexer. :param indexer_name: The name of the indexer for which to retrieve status. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: IndexerExecutionInfo or the result of cls(response) - :rtype: ~search_service_client.models.IndexerExecutionInfo + :return: SearchIndexerStatus or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerStatus :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.IndexerExecutionInfo"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerStatus"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -524,7 +536,7 @@ async def get_status( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_status.metadata['url'] + url = self.get_status.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -551,10 +563,10 @@ async def get_status( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('IndexerExecutionInfo', pipeline_response) + deserialized = self._deserialize('SearchIndexerStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} + get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py index ddebc3f096c9..ce6b938f8148 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_indexes_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class IndexesOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -39,31 +41,33 @@ def __init__(self, client, config, serializer, deserializer) -> None: async def create( self, - index: "models.Index", + index: "models.SearchIndex", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Index": + ) -> "models.SearchIndex": """Creates a new search index. :param index: The definition of the index to create. - :type index: ~search_service_client.models.Index + :type index: ~azure.search.documents.models.SearchIndex :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -77,12 +81,12 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -94,13 +98,13 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexes'} + create.metadata = {'url': '/indexes'} # type: ignore async def list( self, @@ -115,14 +119,15 @@ async def list( default is all properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexesResult + :rtype: ~azure.search.documents.models.ListIndexesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -130,7 +135,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -164,54 +169,56 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/indexes'} + list.metadata = {'url': '/indexes'} # type: ignore async def create_or_update( self, index_name: str, - index: "models.Index", + index: "models.SearchIndex", allow_index_downtime: Optional[bool] = None, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.Index": + ) -> "models.SearchIndex": """Creates a new search index or updates an index if it already exists. :param index_name: The definition of the index to create or update. :type index_name: str :param index: The definition of the index to create or update. - :type index: ~search_service_client.models.Index + :type index: ~azure.search.documents.models.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. :type allow_index_downtime: bool + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index or ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -228,17 +235,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -252,52 +259,53 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} + create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore async def delete( self, index_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: - """Deletes a search index and all the documents it contains. + """Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. :param index_name: The name of the index to delete. :type index_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -312,10 +320,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -330,27 +338,28 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexes(\'{indexName}\')'} + delete.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore async def get( self, index_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Index": + ) -> "models.SearchIndex": """Retrieves an index definition. :param index_name: The name of the index to retrieve. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -358,7 +367,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -385,13 +394,13 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexes(\'{indexName}\')'} + get.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore async def get_statistics( self, @@ -404,14 +413,15 @@ async def get_statistics( :param index_name: The name of the index for which to retrieve statistics. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: GetIndexStatisticsResult or the result of cls(response) - :rtype: ~search_service_client.models.GetIndexStatisticsResult + :rtype: ~azure.search.documents.models.GetIndexStatisticsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.GetIndexStatisticsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -419,7 +429,7 @@ async def get_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_statistics.metadata['url'] + url = self.get_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -452,7 +462,7 @@ async def get_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} + get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} # type: ignore async def analyze( self, @@ -466,24 +476,26 @@ async def analyze( :param index_name: The name of the index for which to test an analyzer. :type index_name: str :param request: The text and analyzer or analysis components to test. - :type request: ~search_service_client.models.AnalyzeRequest + :type request: ~azure.search.documents.models.AnalyzeRequest :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: AnalyzeResult or the result of cls(response) - :rtype: ~search_service_client.models.AnalyzeResult + :rtype: ~azure.search.documents.models.AnalyzeResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.analyze.metadata['url'] + url = self.analyze.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -498,8 +510,8 @@ async def analyze( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -521,4 +533,4 @@ async def analyze( return cls(pipeline_response, deserialized, {}) return deserialized - analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} + analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py index 1a1707edf14b..473c7c33fa3b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_search_service_client_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar @@ -25,14 +27,15 @@ async def get_service_statistics( """Gets service level statistics for a search service. :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ServiceStatistics or the result of cls(response) - :rtype: ~search_service_client.models.ServiceStatistics + :rtype: ~azure.search.documents.models.ServiceStatistics :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceStatistics"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -40,7 +43,7 @@ async def get_service_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_service_statistics.metadata['url'] + url = self.get_service_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -72,4 +75,4 @@ async def get_service_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_service_statistics.metadata = {'url': '/servicestats'} + get_service_statistics.metadata = {'url': '/servicestats'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py index b286e25f0b34..693760d1612b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_skillsets_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class SkillsetsOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -40,43 +42,45 @@ def __init__(self, client, config, serializer, deserializer) -> None: async def create_or_update( self, skillset_name: str, - skillset: "models.Skillset", + skillset: "models.SearchIndexerSkillset", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs - ) -> "models.Skillset": + ) -> "models.SearchIndexerSkillset": """Creates a new skillset in a search service or updates the skillset if it already exists. :param skillset_name: The name of the skillset to create or update. :type skillset_name: str :param skillset: The skillset containing one or more skills to create or update in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset or ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -91,17 +95,17 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -115,52 +119,53 @@ async def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore async def delete( self, skillset_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes a skillset in a search service. :param skillset_name: The name of the skillset to delete. :type skillset_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -175,10 +180,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -193,27 +198,28 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore async def get( self, skillset_name: str, request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Skillset": + ) -> "models.SearchIndexerSkillset": """Retrieves a skillset in a search service. :param skillset_name: The name of the skillset to retrieve. :type skillset_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -221,7 +227,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -248,13 +254,13 @@ async def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore async def list( self, @@ -269,14 +275,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSkillsetsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSkillsetsResult + :rtype: ~azure.search.documents.models.ListSkillsetsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSkillsetsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -284,7 +291,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -318,35 +325,37 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/skillsets'} + list.metadata = {'url': '/skillsets'} # type: ignore async def create( self, - skillset: "models.Skillset", + skillset: "models.SearchIndexerSkillset", request_options: Optional["models.RequestOptions"] = None, **kwargs - ) -> "models.Skillset": + ) -> "models.SearchIndexerSkillset": """Creates a new skillset in a search service. :param skillset: The skillset containing one or more skills to create in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -360,12 +369,12 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -377,10 +386,10 @@ async def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/skillsets'} + create.metadata = {'url': '/skillsets'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py index 445c1f96f0b4..1df4d8001380 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/aio/operations_async/_synonym_maps_operations_async.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union @@ -22,7 +24,7 @@ class SynonymMapsOperations: instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -41,8 +43,9 @@ async def create_or_update( self, synonym_map_name: str, synonym_map: "models.SynonymMap", + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> "models.SynonymMap": """Creates a new synonym map or updates a synonym map if it already exists. @@ -50,32 +53,33 @@ async def create_or_update( :param synonym_map_name: The name of the synonym map to create or update. :type synonym_map_name: str :param synonym_map: The definition of the synonym map to create or update. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -90,13 +94,13 @@ async def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -123,43 +127,44 @@ async def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore async def delete( self, synonym_map_name: str, + if_match: Optional[str] = None, + if_none_match: Optional[str] = None, request_options: Optional["models.RequestOptions"] = None, - access_condition: Optional["models.AccessCondition"] = None, **kwargs ) -> None: """Deletes a synonym map. :param synonym_map_name: The name of the synonym map to delete. :type synonym_map_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -174,10 +179,10 @@ async def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -192,7 +197,7 @@ async def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore async def get( self, @@ -205,14 +210,15 @@ async def get( :param synonym_map_name: The name of the synonym map to retrieve. :type synonym_map_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -220,7 +226,7 @@ async def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -253,7 +259,7 @@ async def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore async def list( self, @@ -268,14 +274,15 @@ async def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSynonymMapsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSynonymMapsResult + :rtype: ~azure.search.documents.models.ListSynonymMapsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSynonymMapsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -283,7 +290,7 @@ async def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -317,7 +324,7 @@ async def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/synonymmaps'} + list.metadata = {'url': '/synonymmaps'} # type: ignore async def create( self, @@ -328,24 +335,26 @@ async def create( """Creates a new synonym map. :param synonym_map: The definition of the synonym map to create. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -359,8 +368,8 @@ async def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -382,4 +391,4 @@ async def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/synonymmaps'} + create.metadata = {'url': '/synonymmaps'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py index 9e8de4e9d799..cc66379bb7a5 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/__init__.py @@ -1,18 +1,21 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: - from ._models_py3 import AccessCondition from ._models_py3 import AnalyzeRequest from ._models_py3 import AnalyzeResult - from ._models_py3 import Analyzer + from ._models_py3 import AnalyzedTokenInfo from ._models_py3 import AsciiFoldingTokenFilter from ._models_py3 import AzureActiveDirectoryApplicationCredentials + from ._models_py3 import BM25Similarity from ._models_py3 import CharFilter from ._models_py3 import CjkBigramTokenFilter + from ._models_py3 import ClassicSimilarity from ._models_py3 import ClassicTokenizer from ._models_py3 import CognitiveServicesAccount from ._models_py3 import CognitiveServicesAccountKey @@ -21,9 +24,7 @@ from ._models_py3 import CorsOptions from ._models_py3 import CustomAnalyzer from ._models_py3 import DataChangeDetectionPolicy - from ._models_py3 import DataContainer from ._models_py3 import DataDeletionDetectionPolicy - from ._models_py3 import DataSource from ._models_py3 import DataSourceCredentials from ._models_py3 import DefaultCognitiveServicesAccount from ._models_py3 import DictionaryDecompounderTokenFilter @@ -33,9 +34,7 @@ from ._models_py3 import EdgeNGramTokenFilterV2 from ._models_py3 import EdgeNGramTokenizer from ._models_py3 import ElisionTokenFilter - from ._models_py3 import EncryptionKey from ._models_py3 import EntityRecognitionSkill - from ._models_py3 import Field from ._models_py3 import FieldMapping from ._models_py3 import FieldMappingFunction from ._models_py3 import FreshnessScoringFunction @@ -43,16 +42,10 @@ from ._models_py3 import GetIndexStatisticsResult from ._models_py3 import HighWaterMarkChangeDetectionPolicy from ._models_py3 import ImageAnalysisSkill - from ._models_py3 import Index - from ._models_py3 import Indexer - from ._models_py3 import IndexerExecutionInfo from ._models_py3 import IndexerExecutionResult - from ._models_py3 import IndexerLimits from ._models_py3 import IndexingParameters from ._models_py3 import IndexingSchedule from ._models_py3 import InputFieldMappingEntry - from ._models_py3 import ItemError - from ._models_py3 import ItemWarning from ._models_py3 import KeepTokenFilter from ._models_py3 import KeyPhraseExtractionSkill from ._models_py3 import KeywordMarkerTokenFilter @@ -60,12 +53,17 @@ from ._models_py3 import KeywordTokenizerV2 from ._models_py3 import LanguageDetectionSkill from ._models_py3 import LengthTokenFilter + from ._models_py3 import LexicalAnalyzer + from ._models_py3 import LexicalTokenizer from ._models_py3 import LimitTokenFilter from ._models_py3 import ListDataSourcesResult from ._models_py3 import ListIndexersResult from ._models_py3 import ListIndexesResult from ._models_py3 import ListSkillsetsResult from ._models_py3 import ListSynonymMapsResult + from ._models_py3 import LuceneStandardAnalyzer + from ._models_py3 import LuceneStandardTokenizer + from ._models_py3 import LuceneStandardTokenizerV2 from ._models_py3 import MagnitudeScoringFunction from ._models_py3 import MagnitudeScoringParameters from ._models_py3 import MappingCharFilter @@ -89,21 +87,29 @@ from ._models_py3 import ScoringFunction from ._models_py3 import ScoringProfile from ._models_py3 import SearchError + from ._models_py3 import SearchField + from ._models_py3 import SearchIndex + from ._models_py3 import SearchIndexer + from ._models_py3 import SearchIndexerDataContainer + from ._models_py3 import SearchIndexerDataSource + from ._models_py3 import SearchIndexerError + from ._models_py3 import SearchIndexerLimits + from ._models_py3 import SearchIndexerSkill + from ._models_py3 import SearchIndexerSkillset + from ._models_py3 import SearchIndexerStatus + from ._models_py3 import SearchIndexerWarning + from ._models_py3 import SearchResourceEncryptionKey from ._models_py3 import SentimentSkill from ._models_py3 import ServiceCounters from ._models_py3 import ServiceLimits from ._models_py3 import ServiceStatistics from ._models_py3 import ShaperSkill from ._models_py3 import ShingleTokenFilter - from ._models_py3 import Skill - from ._models_py3 import Skillset + from ._models_py3 import Similarity from ._models_py3 import SnowballTokenFilter from ._models_py3 import SoftDeleteColumnDeletionDetectionPolicy from ._models_py3 import SplitSkill from ._models_py3 import SqlIntegratedChangeTrackingPolicy - from ._models_py3 import StandardAnalyzer - from ._models_py3 import StandardTokenizer - from ._models_py3 import StandardTokenizerV2 from ._models_py3 import StemmerOverrideTokenFilter from ._models_py3 import StemmerTokenFilter from ._models_py3 import StopAnalyzer @@ -116,22 +122,21 @@ from ._models_py3 import TextTranslationSkill from ._models_py3 import TextWeights from ._models_py3 import TokenFilter - from ._models_py3 import TokenInfo - from ._models_py3 import Tokenizer from ._models_py3 import TruncateTokenFilter from ._models_py3 import UaxUrlEmailTokenizer from ._models_py3 import UniqueTokenFilter from ._models_py3 import WebApiSkill from ._models_py3 import WordDelimiterTokenFilter except (SyntaxError, ImportError): - from ._models import AccessCondition # type: ignore from ._models import AnalyzeRequest # type: ignore from ._models import AnalyzeResult # type: ignore - from ._models import Analyzer # type: ignore + from ._models import AnalyzedTokenInfo # type: ignore from ._models import AsciiFoldingTokenFilter # type: ignore from ._models import AzureActiveDirectoryApplicationCredentials # type: ignore + from ._models import BM25Similarity # type: ignore from ._models import CharFilter # type: ignore from ._models import CjkBigramTokenFilter # type: ignore + from ._models import ClassicSimilarity # type: ignore from ._models import ClassicTokenizer # type: ignore from ._models import CognitiveServicesAccount # type: ignore from ._models import CognitiveServicesAccountKey # type: ignore @@ -140,9 +145,7 @@ from ._models import CorsOptions # type: ignore from ._models import CustomAnalyzer # type: ignore from ._models import DataChangeDetectionPolicy # type: ignore - from ._models import DataContainer # type: ignore from ._models import DataDeletionDetectionPolicy # type: ignore - from ._models import DataSource # type: ignore from ._models import DataSourceCredentials # type: ignore from ._models import DefaultCognitiveServicesAccount # type: ignore from ._models import DictionaryDecompounderTokenFilter # type: ignore @@ -152,9 +155,7 @@ from ._models import EdgeNGramTokenFilterV2 # type: ignore from ._models import EdgeNGramTokenizer # type: ignore from ._models import ElisionTokenFilter # type: ignore - from ._models import EncryptionKey # type: ignore from ._models import EntityRecognitionSkill # type: ignore - from ._models import Field # type: ignore from ._models import FieldMapping # type: ignore from ._models import FieldMappingFunction # type: ignore from ._models import FreshnessScoringFunction # type: ignore @@ -162,16 +163,10 @@ from ._models import GetIndexStatisticsResult # type: ignore from ._models import HighWaterMarkChangeDetectionPolicy # type: ignore from ._models import ImageAnalysisSkill # type: ignore - from ._models import Index # type: ignore - from ._models import Indexer # type: ignore - from ._models import IndexerExecutionInfo # type: ignore from ._models import IndexerExecutionResult # type: ignore - from ._models import IndexerLimits # type: ignore from ._models import IndexingParameters # type: ignore from ._models import IndexingSchedule # type: ignore from ._models import InputFieldMappingEntry # type: ignore - from ._models import ItemError # type: ignore - from ._models import ItemWarning # type: ignore from ._models import KeepTokenFilter # type: ignore from ._models import KeyPhraseExtractionSkill # type: ignore from ._models import KeywordMarkerTokenFilter # type: ignore @@ -179,12 +174,17 @@ from ._models import KeywordTokenizerV2 # type: ignore from ._models import LanguageDetectionSkill # type: ignore from ._models import LengthTokenFilter # type: ignore + from ._models import LexicalAnalyzer # type: ignore + from ._models import LexicalTokenizer # type: ignore from ._models import LimitTokenFilter # type: ignore from ._models import ListDataSourcesResult # type: ignore from ._models import ListIndexersResult # type: ignore from ._models import ListIndexesResult # type: ignore from ._models import ListSkillsetsResult # type: ignore from ._models import ListSynonymMapsResult # type: ignore + from ._models import LuceneStandardAnalyzer # type: ignore + from ._models import LuceneStandardTokenizer # type: ignore + from ._models import LuceneStandardTokenizerV2 # type: ignore from ._models import MagnitudeScoringFunction # type: ignore from ._models import MagnitudeScoringParameters # type: ignore from ._models import MappingCharFilter # type: ignore @@ -208,21 +208,29 @@ from ._models import ScoringFunction # type: ignore from ._models import ScoringProfile # type: ignore from ._models import SearchError # type: ignore + from ._models import SearchField # type: ignore + from ._models import SearchIndex # type: ignore + from ._models import SearchIndexer # type: ignore + from ._models import SearchIndexerDataContainer # type: ignore + from ._models import SearchIndexerDataSource # type: ignore + from ._models import SearchIndexerError # type: ignore + from ._models import SearchIndexerLimits # type: ignore + from ._models import SearchIndexerSkill # type: ignore + from ._models import SearchIndexerSkillset # type: ignore + from ._models import SearchIndexerStatus # type: ignore + from ._models import SearchIndexerWarning # type: ignore + from ._models import SearchResourceEncryptionKey # type: ignore from ._models import SentimentSkill # type: ignore from ._models import ServiceCounters # type: ignore from ._models import ServiceLimits # type: ignore from ._models import ServiceStatistics # type: ignore from ._models import ShaperSkill # type: ignore from ._models import ShingleTokenFilter # type: ignore - from ._models import Skill # type: ignore - from ._models import Skillset # type: ignore + from ._models import Similarity # type: ignore from ._models import SnowballTokenFilter # type: ignore from ._models import SoftDeleteColumnDeletionDetectionPolicy # type: ignore from ._models import SplitSkill # type: ignore from ._models import SqlIntegratedChangeTrackingPolicy # type: ignore - from ._models import StandardAnalyzer # type: ignore - from ._models import StandardTokenizer # type: ignore - from ._models import StandardTokenizerV2 # type: ignore from ._models import StemmerOverrideTokenFilter # type: ignore from ._models import StemmerTokenFilter # type: ignore from ._models import StopAnalyzer # type: ignore @@ -235,8 +243,6 @@ from ._models import TextTranslationSkill # type: ignore from ._models import TextWeights # type: ignore from ._models import TokenFilter # type: ignore - from ._models import TokenInfo # type: ignore - from ._models import Tokenizer # type: ignore from ._models import TruncateTokenFilter # type: ignore from ._models import UaxUrlEmailTokenizer # type: ignore from ._models import UniqueTokenFilter # type: ignore @@ -244,10 +250,7 @@ from ._models import WordDelimiterTokenFilter # type: ignore from ._search_service_client_enums import ( - AnalyzerName, CjkBigramTokenFilterScripts, - DataSourceType, - DataType, EdgeNGramTokenFilterSide, EntityCategory, EntityRecognitionSkillLanguage, @@ -256,6 +259,8 @@ IndexerExecutionStatus, IndexerStatus, KeyPhraseExtractionSkillLanguage, + LexicalAnalyzerName, + LexicalTokenizerName, MicrosoftStemmingTokenizerLanguage, MicrosoftTokenizerLanguage, OcrSkillLanguage, @@ -263,6 +268,8 @@ RegexFlags, ScoringFunctionAggregation, ScoringFunctionInterpolation, + SearchFieldDataType, + SearchIndexerDataSourceType, SentimentSkillLanguage, SnowballTokenFilterLanguage, SplitSkillLanguage, @@ -273,19 +280,19 @@ TextTranslationSkillLanguage, TokenCharacterKind, TokenFilterName, - TokenizerName, VisualFeature, ) __all__ = [ - 'AccessCondition', 'AnalyzeRequest', 'AnalyzeResult', - 'Analyzer', + 'AnalyzedTokenInfo', 'AsciiFoldingTokenFilter', 'AzureActiveDirectoryApplicationCredentials', + 'BM25Similarity', 'CharFilter', 'CjkBigramTokenFilter', + 'ClassicSimilarity', 'ClassicTokenizer', 'CognitiveServicesAccount', 'CognitiveServicesAccountKey', @@ -294,9 +301,7 @@ 'CorsOptions', 'CustomAnalyzer', 'DataChangeDetectionPolicy', - 'DataContainer', 'DataDeletionDetectionPolicy', - 'DataSource', 'DataSourceCredentials', 'DefaultCognitiveServicesAccount', 'DictionaryDecompounderTokenFilter', @@ -306,9 +311,7 @@ 'EdgeNGramTokenFilterV2', 'EdgeNGramTokenizer', 'ElisionTokenFilter', - 'EncryptionKey', 'EntityRecognitionSkill', - 'Field', 'FieldMapping', 'FieldMappingFunction', 'FreshnessScoringFunction', @@ -316,16 +319,10 @@ 'GetIndexStatisticsResult', 'HighWaterMarkChangeDetectionPolicy', 'ImageAnalysisSkill', - 'Index', - 'Indexer', - 'IndexerExecutionInfo', 'IndexerExecutionResult', - 'IndexerLimits', 'IndexingParameters', 'IndexingSchedule', 'InputFieldMappingEntry', - 'ItemError', - 'ItemWarning', 'KeepTokenFilter', 'KeyPhraseExtractionSkill', 'KeywordMarkerTokenFilter', @@ -333,12 +330,17 @@ 'KeywordTokenizerV2', 'LanguageDetectionSkill', 'LengthTokenFilter', + 'LexicalAnalyzer', + 'LexicalTokenizer', 'LimitTokenFilter', 'ListDataSourcesResult', 'ListIndexersResult', 'ListIndexesResult', 'ListSkillsetsResult', 'ListSynonymMapsResult', + 'LuceneStandardAnalyzer', + 'LuceneStandardTokenizer', + 'LuceneStandardTokenizerV2', 'MagnitudeScoringFunction', 'MagnitudeScoringParameters', 'MappingCharFilter', @@ -362,21 +364,29 @@ 'ScoringFunction', 'ScoringProfile', 'SearchError', + 'SearchField', + 'SearchIndex', + 'SearchIndexer', + 'SearchIndexerDataContainer', + 'SearchIndexerDataSource', + 'SearchIndexerError', + 'SearchIndexerLimits', + 'SearchIndexerSkill', + 'SearchIndexerSkillset', + 'SearchIndexerStatus', + 'SearchIndexerWarning', + 'SearchResourceEncryptionKey', 'SentimentSkill', 'ServiceCounters', 'ServiceLimits', 'ServiceStatistics', 'ShaperSkill', 'ShingleTokenFilter', - 'Skill', - 'Skillset', + 'Similarity', 'SnowballTokenFilter', 'SoftDeleteColumnDeletionDetectionPolicy', 'SplitSkill', 'SqlIntegratedChangeTrackingPolicy', - 'StandardAnalyzer', - 'StandardTokenizer', - 'StandardTokenizerV2', 'StemmerOverrideTokenFilter', 'StemmerTokenFilter', 'StopAnalyzer', @@ -389,17 +399,12 @@ 'TextTranslationSkill', 'TextWeights', 'TokenFilter', - 'TokenInfo', - 'Tokenizer', 'TruncateTokenFilter', 'UaxUrlEmailTokenizer', 'UniqueTokenFilter', 'WebApiSkill', 'WordDelimiterTokenFilter', - 'AnalyzerName', 'CjkBigramTokenFilterScripts', - 'DataSourceType', - 'DataType', 'EdgeNGramTokenFilterSide', 'EntityCategory', 'EntityRecognitionSkillLanguage', @@ -408,6 +413,8 @@ 'IndexerExecutionStatus', 'IndexerStatus', 'KeyPhraseExtractionSkillLanguage', + 'LexicalAnalyzerName', + 'LexicalTokenizerName', 'MicrosoftStemmingTokenizerLanguage', 'MicrosoftTokenizerLanguage', 'OcrSkillLanguage', @@ -415,6 +422,8 @@ 'RegexFlags', 'ScoringFunctionAggregation', 'ScoringFunctionInterpolation', + 'SearchFieldDataType', + 'SearchIndexerDataSourceType', 'SentimentSkillLanguage', 'SnowballTokenFilterLanguage', 'SplitSkillLanguage', @@ -425,6 +434,5 @@ 'TextTranslationSkillLanguage', 'TokenCharacterKind', 'TokenFilterName', - 'TokenizerName', 'VisualFeature', ] diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py index 4474329fee6d..23293adcf2b6 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -8,69 +10,49 @@ import msrest.serialization -class AccessCondition(msrest.serialization.Model): - """Parameter group. - - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_match': {'key': 'If-Match', 'type': 'str'}, - 'if_none_match': {'key': 'If-None-Match', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(AccessCondition, self).__init__(**kwargs) - self.if_match = kwargs.get('if_match', None) - self.if_none_match = kwargs.get('if_none_match', None) - - -class Analyzer(msrest.serialization.Model): - """Base type for analyzers. +class AnalyzedTokenInfo(msrest.serialization.Model): + """Information about a token returned by an analyzer. - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CustomAnalyzer, PatternAnalyzer, StandardAnalyzer, StopAnalyzer. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str + :ivar token: Required. The token returned by the analyzer. + :vartype token: str + :ivar start_offset: Required. The index of the first character of the token in the input text. + :vartype start_offset: int + :ivar end_offset: Required. The index of the last character of the token in the input text. + :vartype end_offset: int + :ivar position: Required. The position of the token in the input text relative to other tokens. + The first token in the input text has position 0, the next has position 1, and so on. Depending + on the analyzer used, some tokens might have the same position, for example if they are + synonyms of each other. + :vartype position: int """ _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, + 'token': {'required': True, 'readonly': True}, + 'start_offset': {'required': True, 'readonly': True}, + 'end_offset': {'required': True, 'readonly': True}, + 'position': {'required': True, 'readonly': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'StandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + 'token': {'key': 'token', 'type': 'str'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'position': {'key': 'position', 'type': 'int'}, } def __init__( self, **kwargs ): - super(Analyzer, self).__init__(**kwargs) - self.odata_type = None - self.name = kwargs.get('name', None) + super(AnalyzedTokenInfo, self).__init__(**kwargs) + self.token = None + self.start_offset = None + self.end_offset = None + self.position = None class AnalyzeRequest(msrest.serialization.Model): @@ -82,33 +64,33 @@ class AnalyzeRequest(msrest.serialization.Model): :type text: str :param analyzer: The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are - mutually exclusive. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName + mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters - are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', - 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', - 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. :type char_filters: list[str] @@ -131,7 +113,7 @@ def __init__( **kwargs ): super(AnalyzeRequest, self).__init__(**kwargs) - self.text = kwargs.get('text', None) + self.text = kwargs['text'] self.analyzer = kwargs.get('analyzer', None) self.tokenizer = kwargs.get('tokenizer', None) self.token_filters = kwargs.get('token_filters', None) @@ -144,7 +126,7 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param tokens: Required. The list of tokens returned by the analyzer specified in the request. - :type tokens: list[~search_service_client.models.TokenInfo] + :type tokens: list[~azure.search.documents.models.AnalyzedTokenInfo] """ _validation = { @@ -152,7 +134,7 @@ class AnalyzeResult(msrest.serialization.Model): } _attribute_map = { - 'tokens': {'key': 'tokens', 'type': '[TokenInfo]'}, + 'tokens': {'key': 'tokens', 'type': '[AnalyzedTokenInfo]'}, } def __init__( @@ -160,7 +142,7 @@ def __init__( **kwargs ): super(AnalyzeResult, self).__init__(**kwargs) - self.tokens = kwargs.get('tokens', None) + self.tokens = kwargs['tokens'] class TokenFilter(msrest.serialization.Model): @@ -200,7 +182,7 @@ def __init__( ): super(TokenFilter, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class AsciiFoldingTokenFilter(TokenFilter): @@ -267,10 +249,79 @@ def __init__( **kwargs ): super(AzureActiveDirectoryApplicationCredentials, self).__init__(**kwargs) - self.application_id = kwargs.get('application_id', None) + self.application_id = kwargs['application_id'] self.application_secret = kwargs.get('application_secret', None) +class Similarity(msrest.serialization.Model): + """Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: BM25Similarity, ClassicSimilarity. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.BM25Similarity': 'BM25Similarity', '#Microsoft.Azure.Search.ClassicSimilarity': 'ClassicSimilarity'} + } + + def __init__( + self, + **kwargs + ): + super(Similarity, self).__init__(**kwargs) + self.odata_type = None + + +class BM25Similarity(Similarity): + """Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By default, a value of + 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. + :type k1: float + :param b: This property controls how the length of a document affects the relevance score. By + default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, + while a value of 1.0 means the score is fully normalized by the length of the document. + :type b: float + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'k1': {'key': 'k1', 'type': 'float'}, + 'b': {'key': 'b', 'type': 'float'}, + } + + def __init__( + self, + **kwargs + ): + super(BM25Similarity, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.BM25Similarity' + self.k1 = kwargs.get('k1', None) + self.b = kwargs.get('b', None) + + class CharFilter(msrest.serialization.Model): """Base type for character filters. @@ -308,11 +359,11 @@ def __init__( ): super(CharFilter, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] class CjkBigramTokenFilter(TokenFilter): - """Forms bigrams of CJK terms that are generated from StandardTokenizer. This token filter is implemented using Apache Lucene. + """Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -324,7 +375,7 @@ class CjkBigramTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param ignore_scripts: The scripts to ignore. - :type ignore_scripts: list[str or ~search_service_client.models.CjkBigramTokenFilterScripts] + :type ignore_scripts: list[str or ~azure.search.documents.models.CjkBigramTokenFilterScripts] :param output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. :type output_unigrams: bool @@ -352,11 +403,36 @@ def __init__( self.output_unigrams = kwargs.get('output_unigrams', False) -class Tokenizer(msrest.serialization.Model): +class ClassicSimilarity(Similarity): + """Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ClassicSimilarity, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.ClassicSimilarity' + + +class LexicalTokenizer(msrest.serialization.Model): """Base type for tokenizers. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, StandardTokenizer, StandardTokenizerV2, UaxUrlEmailTokenizer. + sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, UaxUrlEmailTokenizer. All required parameters must be populated in order to send to Azure. @@ -380,19 +456,19 @@ class Tokenizer(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'StandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'StandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} + 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'LuceneStandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'LuceneStandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} } def __init__( self, **kwargs ): - super(Tokenizer, self).__init__(**kwargs) + super(LexicalTokenizer, self).__init__(**kwargs) self.odata_type = None - self.name = kwargs.get('name', None) + self.name = kwargs['name'] -class ClassicTokenizer(Tokenizer): +class ClassicTokenizer(LexicalTokenizer): """Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -499,7 +575,7 @@ def __init__( ): super(CognitiveServicesAccountKey, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' - self.key = kwargs.get('key', None) + self.key = kwargs['key'] class CommonGramTokenFilter(TokenFilter): @@ -545,12 +621,12 @@ def __init__( ): super(CommonGramTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' - self.common_words = kwargs.get('common_words', None) + self.common_words = kwargs['common_words'] self.ignore_case = kwargs.get('ignore_case', False) self.use_query_mode = kwargs.get('use_query_mode', False) -class Skill(msrest.serialization.Model): +class SearchIndexerSkill(msrest.serialization.Model): """Base type for skills. You probably want to use the sub-classes and not this class directly. Known @@ -573,10 +649,10 @@ class Skill(msrest.serialization.Model): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -602,16 +678,16 @@ def __init__( self, **kwargs ): - super(Skill, self).__init__(**kwargs) + super(SearchIndexerSkill, self).__init__(**kwargs) self.odata_type = None self.name = kwargs.get('name', None) self.description = kwargs.get('description', None) self.context = kwargs.get('context', None) - self.inputs = kwargs.get('inputs', None) - self.outputs = kwargs.get('outputs', None) + self.inputs = kwargs['inputs'] + self.outputs = kwargs['outputs'] -class ConditionalSkill(Skill): +class ConditionalSkill(SearchIndexerSkill): """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. All required parameters must be populated in order to send to Azure. @@ -631,10 +707,10 @@ class ConditionalSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -688,11 +764,51 @@ def __init__( **kwargs ): super(CorsOptions, self).__init__(**kwargs) - self.allowed_origins = kwargs.get('allowed_origins', None) + self.allowed_origins = kwargs['allowed_origins'] self.max_age_in_seconds = kwargs.get('max_age_in_seconds', None) -class CustomAnalyzer(Analyzer): +class LexicalAnalyzer(msrest.serialization.Model): + """Base type for analyzers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'LuceneStandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + } + + def __init__( + self, + **kwargs + ): + super(LexicalAnalyzer, self).__init__(**kwargs) + self.odata_type = None + self.name = kwargs['name'] + + +class CustomAnalyzer(LexicalAnalyzer): """Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. All required parameters must be populated in order to send to Azure. @@ -705,15 +821,15 @@ class CustomAnalyzer(Analyzer): 128 characters. :type name: str :param tokenizer: Required. The name of the tokenizer to use to divide continuous text into a - sequence of tokens, such as breaking a sentence into words. Possible values include: 'classic', - 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', - 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', - 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. @@ -740,7 +856,7 @@ def __init__( ): super(CustomAnalyzer, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' - self.tokenizer = kwargs.get('tokenizer', None) + self.tokenizer = kwargs['tokenizer'] self.token_filters = kwargs.get('token_filters', None) self.char_filters = kwargs.get('char_filters', None) @@ -778,37 +894,6 @@ def __init__( self.odata_type = None -class DataContainer(msrest.serialization.Model): - """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the table or view (for Azure SQL data source) or collection - (for CosmosDB data source) that will be indexed. - :type name: str - :param query: A query that is applied to this data container. The syntax and meaning of this - parameter is datasource-specific. Not supported by Azure SQL datasources. - :type query: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'query': {'key': 'query', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataContainer, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.query = kwargs.get('query', None) - - class DataDeletionDetectionPolicy(msrest.serialization.Model): """Base type for data deletion detection policies. @@ -842,63 +927,6 @@ def __init__( self.odata_type = None -class DataSource(msrest.serialization.Model): - """Represents a datasource definition, which can be used to configure an indexer. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the datasource. - :type name: str - :param description: The description of the datasource. - :type description: str - :param type: Required. The type of the datasource. Possible values include: 'azuresql', - 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. - :type type: str or ~search_service_client.models.DataSourceType - :param credentials: Required. Credentials for the datasource. - :type credentials: ~search_service_client.models.DataSourceCredentials - :param container: Required. The data container for the datasource. - :type container: ~search_service_client.models.DataContainer - :param data_change_detection_policy: The data change detection policy for the datasource. - :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy - :param data_deletion_detection_policy: The data deletion detection policy for the datasource. - :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy - :param e_tag: The ETag of the DataSource. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - 'credentials': {'required': True}, - 'container': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, - 'container': {'key': 'container', 'type': 'DataContainer'}, - 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, - 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(DataSource, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.type = kwargs.get('type', None) - self.credentials = kwargs.get('credentials', None) - self.container = kwargs.get('container', None) - self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None) - self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None) - self.e_tag = kwargs.get('e_tag', None) - - class DataSourceCredentials(msrest.serialization.Model): """Represents credentials that can be used to connect to a datasource. @@ -1000,7 +1028,7 @@ def __init__( ): super(DictionaryDecompounderTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' - self.word_list = kwargs.get('word_list', None) + self.word_list = kwargs['word_list'] self.min_word_size = kwargs.get('min_word_size', 5) self.min_subword_size = kwargs.get('min_subword_size', 2) self.max_subword_size = kwargs.get('max_subword_size', 15) @@ -1024,9 +1052,9 @@ class ScoringFunction(msrest.serialization.Model): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation """ _validation = { @@ -1052,8 +1080,8 @@ def __init__( ): super(ScoringFunction, self).__init__(**kwargs) self.type = None - self.field_name = kwargs.get('field_name', None) - self.boost = kwargs.get('boost', None) + self.field_name = kwargs['field_name'] + self.boost = kwargs['boost'] self.interpolation = kwargs.get('interpolation', None) @@ -1071,11 +1099,11 @@ class DistanceScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the distance scoring function. - :type parameters: ~search_service_client.models.DistanceScoringParameters + :type parameters: ~azure.search.documents.models.DistanceScoringParameters """ _validation = { @@ -1099,7 +1127,7 @@ def __init__( ): super(DistanceScoringFunction, self).__init__(**kwargs) self.type = 'distance' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class DistanceScoringParameters(msrest.serialization.Model): @@ -1130,8 +1158,8 @@ def __init__( **kwargs ): super(DistanceScoringParameters, self).__init__(**kwargs) - self.reference_point_parameter = kwargs.get('reference_point_parameter', None) - self.boosting_distance = kwargs.get('boosting_distance', None) + self.reference_point_parameter = kwargs['reference_point_parameter'] + self.boosting_distance = kwargs['boosting_distance'] class EdgeNGramTokenFilter(TokenFilter): @@ -1152,8 +1180,8 @@ class EdgeNGramTokenFilter(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1198,8 +1226,8 @@ class EdgeNGramTokenFilterV2(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1228,7 +1256,7 @@ def __init__( self.side = kwargs.get('side', None) -class EdgeNGramTokenizer(Tokenizer): +class EdgeNGramTokenizer(LexicalTokenizer): """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -1246,7 +1274,7 @@ class EdgeNGramTokenizer(Tokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -1311,53 +1339,8 @@ def __init__( self.articles = kwargs.get('articles', None) -class EncryptionKey(msrest.serialization.Model): - """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. - - All required parameters must be populated in order to send to Azure. - - :param key_vault_key_name: Required. The name of your Azure Key Vault key to be used to encrypt - your data at rest. - :type key_vault_key_name: str - :param key_vault_key_version: Required. The version of your Azure Key Vault key to be used to - encrypt your data at rest. - :type key_vault_key_version: str - :param key_vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, - that contains the key to be used to encrypt your data at rest. An example URI might be - https://my-keyvault-name.vault.azure.net. - :type key_vault_uri: str - :param access_credentials: Optional Azure Active Directory credentials used for accessing your - Azure Key Vault. Not required if using managed identity instead. - :type access_credentials: - ~search_service_client.models.AzureActiveDirectoryApplicationCredentials - """ - - _validation = { - 'key_vault_key_name': {'required': True}, - 'key_vault_key_version': {'required': True}, - 'key_vault_uri': {'required': True}, - } - - _attribute_map = { - 'key_vault_key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, - 'key_vault_key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, - 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, - 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, - } - - def __init__( - self, - **kwargs - ): - super(EncryptionKey, self).__init__(**kwargs) - self.key_vault_key_name = kwargs.get('key_vault_key_name', None) - self.key_vault_key_version = kwargs.get('key_vault_key_version', None) - self.key_vault_uri = kwargs.get('key_vault_uri', None) - self.access_credentials = kwargs.get('access_credentials', None) - - -class EntityRecognitionSkill(Skill): - """Text analytics entity recognition. +class EntityRecognitionSkill(SearchIndexerSkill): + """Text analytics entity recognition. All required parameters must be populated in order to send to Azure. @@ -1376,17 +1359,17 @@ class EntityRecognitionSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param categories: A list of entity categories that should be extracted. - :type categories: list[str or ~search_service_client.models.EntityCategory] + :type categories: list[str or ~azure.search.documents.models.EntityCategory] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', - 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr'. + Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", + "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". :type default_language_code: str or - ~search_service_client.models.EntityRecognitionSkillLanguage + ~azure.search.documents.models.EntityRecognitionSkillLanguage :param include_typeless_entities: Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not @@ -1429,184 +1412,6 @@ def __init__( self.minimum_precision = kwargs.get('minimum_precision', None) -class Field(msrest.serialization.Model): - """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the field, which must be unique within the fields collection - of the index or parent field. - :type name: str - :param type: Required. The data type of the field. Possible values include: 'Edm.String', - 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', - 'Edm.GeographyPoint', 'Edm.ComplexType'. - :type type: str or ~search_service_client.models.DataType - :param key: A value indicating whether the field uniquely identifies documents in the index. - Exactly one top-level field in each index must be chosen as the key field and it must be of - type Edm.String. Key fields can be used to look up documents directly and update or delete - specific documents. Default is false for simple fields and null for complex fields. - :type key: bool - :param retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields and null for complex fields. - :type retrievable: bool - :param searchable: A value indicating whether the field is full-text searchable. This means it - will undergo analysis such as word-breaking during indexing. If you set a searchable field to a - value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index since Azure Cognitive Search will store an additional - tokenized version of the field value for full-text searches. If you want to save space in your - index and you don't need a field to be included in searches, set searchable to false. - :type searchable: bool - :param filterable: A value indicating whether to enable the field to be referenced in $filter - queries. filterable differs from searchable in how strings are handled. Fields of type - Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so - comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. - :type filterable: bool - :param sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default Azure Cognitive Search sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. - :type sortable: bool - :param facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. - :type facetable: bool - :param analyzer: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName - :param search_analyzer: The name of the analyzer used at search time for the field. This option - can be used only with searchable fields. It must be set together with indexAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Possible values - include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', - 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- - Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', - 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', - 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', - 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', - 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', - 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', - 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', - 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', - 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', - 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.AnalyzerName - :param index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', - 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- - Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', - 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', - 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', - 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', - 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', - 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', - 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', - 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', - 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', - 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.AnalyzerName - :param synonym_maps: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. - :type synonym_maps: list[str] - :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or - Collection(Edm.ComplexType). Must be null or empty for simple fields. - :type fields: list[~search_service_client.models.Field] - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'key': {'key': 'key', 'type': 'bool'}, - 'retrievable': {'key': 'retrievable', 'type': 'bool'}, - 'searchable': {'key': 'searchable', 'type': 'bool'}, - 'filterable': {'key': 'filterable', 'type': 'bool'}, - 'sortable': {'key': 'sortable', 'type': 'bool'}, - 'facetable': {'key': 'facetable', 'type': 'bool'}, - 'analyzer': {'key': 'analyzer', 'type': 'str'}, - 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, - 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, - 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - } - - def __init__( - self, - **kwargs - ): - super(Field, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.type = kwargs.get('type', None) - self.key = kwargs.get('key', None) - self.retrievable = kwargs.get('retrievable', None) - self.searchable = kwargs.get('searchable', None) - self.filterable = kwargs.get('filterable', None) - self.sortable = kwargs.get('sortable', None) - self.facetable = kwargs.get('facetable', None) - self.analyzer = kwargs.get('analyzer', None) - self.search_analyzer = kwargs.get('search_analyzer', None) - self.index_analyzer = kwargs.get('index_analyzer', None) - self.synonym_maps = kwargs.get('synonym_maps', None) - self.fields = kwargs.get('fields', None) - - class FieldMapping(msrest.serialization.Model): """Defines a mapping between a field in a data source and a target field in an index. @@ -1618,7 +1423,7 @@ class FieldMapping(msrest.serialization.Model): name by default. :type target_field_name: str :param mapping_function: A function to apply to each source field value before indexing. - :type mapping_function: ~search_service_client.models.FieldMappingFunction + :type mapping_function: ~azure.search.documents.models.FieldMappingFunction """ _validation = { @@ -1636,7 +1441,7 @@ def __init__( **kwargs ): super(FieldMapping, self).__init__(**kwargs) - self.source_field_name = kwargs.get('source_field_name', None) + self.source_field_name = kwargs['source_field_name'] self.target_field_name = kwargs.get('target_field_name', None) self.mapping_function = kwargs.get('mapping_function', None) @@ -1667,7 +1472,7 @@ def __init__( **kwargs ): super(FieldMappingFunction, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.parameters = kwargs.get('parameters', None) @@ -1685,11 +1490,11 @@ class FreshnessScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the freshness scoring function. - :type parameters: ~search_service_client.models.FreshnessScoringParameters + :type parameters: ~azure.search.documents.models.FreshnessScoringParameters """ _validation = { @@ -1713,7 +1518,7 @@ def __init__( ): super(FreshnessScoringFunction, self).__init__(**kwargs) self.type = 'freshness' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class FreshnessScoringParameters(msrest.serialization.Model): @@ -1739,7 +1544,7 @@ def __init__( **kwargs ): super(FreshnessScoringParameters, self).__init__(**kwargs) - self.boosting_duration = kwargs.get('boosting_duration', None) + self.boosting_duration = kwargs['boosting_duration'] class GetIndexStatisticsResult(msrest.serialization.Model): @@ -1802,10 +1607,10 @@ def __init__( ): super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' - self.high_water_mark_column_name = kwargs.get('high_water_mark_column_name', None) + self.high_water_mark_column_name = kwargs['high_water_mark_column_name'] -class ImageAnalysisSkill(Skill): +class ImageAnalysisSkill(SearchIndexerSkill): """A skill that analyzes image files. It extracts a rich set of visual features based on the image content. All required parameters must be populated in order to send to Azure. @@ -1825,17 +1630,17 @@ class ImageAnalysisSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'en', 'es', 'ja', 'pt', 'zh'. - :type default_language_code: str or ~search_service_client.models.ImageAnalysisSkillLanguage + Possible values include: "en", "es", "ja", "pt", "zh". + :type default_language_code: str or ~azure.search.documents.models.ImageAnalysisSkillLanguage :param visual_features: A list of visual features. - :type visual_features: list[str or ~search_service_client.models.VisualFeature] + :type visual_features: list[str or ~azure.search.documents.models.VisualFeature] :param details: A string indicating which domain-specific details to return. - :type details: list[str or ~search_service_client.models.ImageDetail] + :type details: list[str or ~azure.search.documents.models.ImageDetail] """ _validation = { @@ -1867,329 +1672,98 @@ def __init__( self.details = kwargs.get('details', None) -class Index(msrest.serialization.Model): - """Represents a search index definition, which describes the fields and search behavior of an index. +class IndexerExecutionResult(msrest.serialization.Model): + """Represents the result of an individual indexer execution. + + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the index. - :type name: str - :param fields: Required. The fields of the index. - :type fields: list[~search_service_client.models.Field] - :param scoring_profiles: The scoring profiles for the index. - :type scoring_profiles: list[~search_service_client.models.ScoringProfile] - :param default_scoring_profile: The name of the scoring profile to use if none is specified in - the query. If this property is not set and no scoring profile is specified in the query, then - default scoring (tf-idf) will be used. - :type default_scoring_profile: str - :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :type cors_options: ~search_service_client.models.CorsOptions - :param suggesters: The suggesters for the index. - :type suggesters: list[~search_service_client.models.Suggester] - :param analyzers: The analyzers for the index. - :type analyzers: list[~search_service_client.models.Analyzer] - :param tokenizers: The tokenizers for the index. - :type tokenizers: list[~search_service_client.models.Tokenizer] - :param token_filters: The token filters for the index. - :type token_filters: list[~search_service_client.models.TokenFilter] - :param char_filters: The character filters for the index. - :type char_filters: list[~search_service_client.models.CharFilter] - :param encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive - Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive - Search will ignore attempts to set this property to null. You can change this property as - needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey - :param e_tag: The ETag of the index. - :type e_tag: str + :ivar status: Required. The outcome of this indexer execution. Possible values include: + "transientFailure", "success", "inProgress", "reset". + :vartype status: str or ~azure.search.documents.models.IndexerExecutionStatus + :ivar error_message: The error message indicating the top-level error, if any. + :vartype error_message: str + :ivar start_time: The start time of this indexer execution. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of this indexer execution, if the execution has already completed. + :vartype end_time: ~datetime.datetime + :ivar errors: Required. The item-level indexing errors. + :vartype errors: list[~azure.search.documents.models.SearchIndexerError] + :ivar warnings: Required. The item-level indexing warnings. + :vartype warnings: list[~azure.search.documents.models.SearchIndexerWarning] + :ivar item_count: Required. The number of items that were processed during this indexer + execution. This includes both successfully processed items and items where indexing was + attempted but failed. + :vartype item_count: int + :ivar failed_item_count: Required. The number of items that failed to be indexed during this + indexer execution. + :vartype failed_item_count: int + :ivar initial_tracking_state: Change tracking state with which an indexer execution started. + :vartype initial_tracking_state: str + :ivar final_tracking_state: Change tracking state with which an indexer execution finished. + :vartype final_tracking_state: str """ _validation = { - 'name': {'required': True}, - 'fields': {'required': True}, + 'status': {'required': True, 'readonly': True}, + 'error_message': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'errors': {'required': True, 'readonly': True}, + 'warnings': {'required': True, 'readonly': True}, + 'item_count': {'required': True, 'readonly': True}, + 'failed_item_count': {'required': True, 'readonly': True}, + 'initial_tracking_state': {'readonly': True}, + 'final_tracking_state': {'readonly': True}, } _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, - 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, - 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, - 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, - 'analyzers': {'key': 'analyzers', 'type': '[Analyzer]'}, - 'tokenizers': {'key': 'tokenizers', 'type': '[Tokenizer]'}, - 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, - 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'errors': {'key': 'errors', 'type': '[SearchIndexerError]'}, + 'warnings': {'key': 'warnings', 'type': '[SearchIndexerWarning]'}, + 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, + 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, + 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, + 'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'}, } def __init__( self, **kwargs ): - super(Index, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.fields = kwargs.get('fields', None) - self.scoring_profiles = kwargs.get('scoring_profiles', None) - self.default_scoring_profile = kwargs.get('default_scoring_profile', None) - self.cors_options = kwargs.get('cors_options', None) - self.suggesters = kwargs.get('suggesters', None) - self.analyzers = kwargs.get('analyzers', None) - self.tokenizers = kwargs.get('tokenizers', None) - self.token_filters = kwargs.get('token_filters', None) - self.char_filters = kwargs.get('char_filters', None) - self.encryption_key = kwargs.get('encryption_key', None) - self.e_tag = kwargs.get('e_tag', None) + super(IndexerExecutionResult, self).__init__(**kwargs) + self.status = None + self.error_message = None + self.start_time = None + self.end_time = None + self.errors = None + self.warnings = None + self.item_count = None + self.failed_item_count = None + self.initial_tracking_state = None + self.final_tracking_state = None -class Indexer(msrest.serialization.Model): - """Represents an indexer. +class IndexingParameters(msrest.serialization.Model): + """Represents parameters for indexer execution. - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the indexer. - :type name: str - :param description: The description of the indexer. - :type description: str - :param data_source_name: Required. The name of the datasource from which this indexer reads - data. - :type data_source_name: str - :param skillset_name: The name of the skillset executing with this indexer. - :type skillset_name: str - :param target_index_name: Required. The name of the index to which this indexer writes data. - :type target_index_name: str - :param schedule: The schedule for this indexer. - :type schedule: ~search_service_client.models.IndexingSchedule - :param parameters: Parameters for indexer execution. - :type parameters: ~search_service_client.models.IndexingParameters - :param field_mappings: Defines mappings between fields in the data source and corresponding - target fields in the index. - :type field_mappings: list[~search_service_client.models.FieldMapping] - :param output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :type output_field_mappings: list[~search_service_client.models.FieldMapping] - :param is_disabled: A value indicating whether the indexer is disabled. Default is false. - :type is_disabled: bool - :param e_tag: The ETag of the Indexer. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'data_source_name': {'required': True}, - 'target_index_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, - 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, - 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, - 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, - 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, - 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, - 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, - 'is_disabled': {'key': 'disabled', 'type': 'bool'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Indexer, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.data_source_name = kwargs.get('data_source_name', None) - self.skillset_name = kwargs.get('skillset_name', None) - self.target_index_name = kwargs.get('target_index_name', None) - self.schedule = kwargs.get('schedule', None) - self.parameters = kwargs.get('parameters', None) - self.field_mappings = kwargs.get('field_mappings', None) - self.output_field_mappings = kwargs.get('output_field_mappings', None) - self.is_disabled = kwargs.get('is_disabled', False) - self.e_tag = kwargs.get('e_tag', None) - - -class IndexerExecutionInfo(msrest.serialization.Model): - """Represents the current status and execution history of an indexer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', - 'running'. - :vartype status: str or ~search_service_client.models.IndexerStatus - :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~search_service_client.models.IndexerExecutionResult - :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse - chronological order. - :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] - :ivar limits: Required. The execution limits for the indexer. - :vartype limits: ~search_service_client.models.IndexerLimits - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'last_result': {'readonly': True}, - 'execution_history': {'required': True, 'readonly': True}, - 'limits': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'str'}, - 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, - 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, - 'limits': {'key': 'limits', 'type': 'IndexerLimits'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerExecutionInfo, self).__init__(**kwargs) - self.status = None - self.last_result = None - self.execution_history = None - self.limits = None - - -class IndexerExecutionResult(msrest.serialization.Model): - """Represents the result of an individual indexer execution. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. The outcome of this indexer execution. Possible values include: - 'transientFailure', 'success', 'inProgress', 'reset'. - :vartype status: str or ~search_service_client.models.IndexerExecutionStatus - :ivar error_message: The error message indicating the top-level error, if any. - :vartype error_message: str - :ivar start_time: The start time of this indexer execution. - :vartype start_time: ~datetime.datetime - :ivar end_time: The end time of this indexer execution, if the execution has already completed. - :vartype end_time: ~datetime.datetime - :ivar errors: Required. The item-level indexing errors. - :vartype errors: list[~search_service_client.models.ItemError] - :ivar warnings: Required. The item-level indexing warnings. - :vartype warnings: list[~search_service_client.models.ItemWarning] - :ivar item_count: Required. The number of items that were processed during this indexer - execution. This includes both successfully processed items and items where indexing was - attempted but failed. - :vartype item_count: int - :ivar failed_item_count: Required. The number of items that failed to be indexed during this - indexer execution. - :vartype failed_item_count: int - :ivar initial_tracking_state: Change tracking state with which an indexer execution started. - :vartype initial_tracking_state: str - :ivar final_tracking_state: Change tracking state with which an indexer execution finished. - :vartype final_tracking_state: str - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'error_message': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'errors': {'required': True, 'readonly': True}, - 'warnings': {'required': True, 'readonly': True}, - 'item_count': {'required': True, 'readonly': True}, - 'failed_item_count': {'required': True, 'readonly': True}, - 'initial_tracking_state': {'readonly': True}, - 'final_tracking_state': {'readonly': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'errors': {'key': 'errors', 'type': '[ItemError]'}, - 'warnings': {'key': 'warnings', 'type': '[ItemWarning]'}, - 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, - 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, - 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, - 'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerExecutionResult, self).__init__(**kwargs) - self.status = None - self.error_message = None - self.start_time = None - self.end_time = None - self.errors = None - self.warnings = None - self.item_count = None - self.failed_item_count = None - self.initial_tracking_state = None - self.final_tracking_state = None - - -class IndexerLimits(msrest.serialization.Model): - """IndexerLimits. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar max_run_time: The maximum duration that the indexer is permitted to run for one - execution. - :vartype max_run_time: ~datetime.timedelta - :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be - considered valid for indexing. - :vartype max_document_extraction_size: long - :ivar max_document_content_characters_to_extract: The maximum number of characters that will be - extracted from a document picked up for indexing. - :vartype max_document_content_characters_to_extract: long - """ - - _validation = { - 'max_run_time': {'readonly': True}, - 'max_document_extraction_size': {'readonly': True}, - 'max_document_content_characters_to_extract': {'readonly': True}, - } - - _attribute_map = { - 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, - 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, - 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerLimits, self).__init__(**kwargs) - self.max_run_time = None - self.max_document_extraction_size = None - self.max_document_content_characters_to_extract = None - - -class IndexingParameters(msrest.serialization.Model): - """Represents parameters for indexer execution. - - :param batch_size: The number of items that are read from the data source and indexed as a - single batch in order to improve performance. The default depends on the data source type. - :type batch_size: int - :param max_failed_items: The maximum number of items that can fail indexing for indexer - execution to still be considered successful. -1 means no limit. Default is 0. - :type max_failed_items: int - :param max_failed_items_per_batch: The maximum number of items in a single batch that can fail - indexing for the batch to still be considered successful. -1 means no limit. Default is 0. - :type max_failed_items_per_batch: int - :param configuration: A dictionary of indexer-specific configuration properties. Each name is - the name of a specific property. Each value must be of a primitive type. - :type configuration: dict[str, object] - """ + :param batch_size: The number of items that are read from the data source and indexed as a + single batch in order to improve performance. The default depends on the data source type. + :type batch_size: int + :param max_failed_items: The maximum number of items that can fail indexing for indexer + execution to still be considered successful. -1 means no limit. Default is 0. + :type max_failed_items: int + :param max_failed_items_per_batch: The maximum number of items in a single batch that can fail + indexing for the batch to still be considered successful. -1 means no limit. Default is 0. + :type max_failed_items_per_batch: int + :param configuration: A dictionary of indexer-specific configuration properties. Each name is + the name of a specific property. Each value must be of a primitive type. + :type configuration: dict[str, object] + """ _attribute_map = { 'batch_size': {'key': 'batchSize', 'type': 'int'}, @@ -2234,7 +1808,7 @@ def __init__( **kwargs ): super(IndexingSchedule, self).__init__(**kwargs) - self.interval = kwargs.get('interval', None) + self.interval = kwargs['interval'] self.start_time = kwargs.get('start_time', None) @@ -2250,7 +1824,7 @@ class InputFieldMappingEntry(msrest.serialization.Model): :param source_context: The source context used for selecting recursive inputs. :type source_context: str :param inputs: The recursive inputs used when creating a complex type. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] """ _validation = { @@ -2269,152 +1843,42 @@ def __init__( **kwargs ): super(InputFieldMappingEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.source = kwargs.get('source', None) self.source_context = kwargs.get('source_context', None) self.inputs = kwargs.get('inputs', None) -class ItemError(msrest.serialization.Model): - """Represents an item- or document-level indexing error. - - Variables are only populated by the server, and will be ignored when sending a request. +class KeepTokenFilter(TokenFilter): + """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :ivar key: The key of the item for which indexing failed. - :vartype key: str - :ivar error_message: Required. The message describing the error that occurred while processing - the item. - :vartype error_message: str - :ivar status_code: Required. The status code indicating why the indexing operation failed. - Possible values include: 400 for a malformed input document, 404 for document not found, 409 - for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the - service is too busy. - :vartype status_code: int - :ivar name: The name of the source at which the error originated. For example, this could refer - to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the error to assist in debugging the indexer. - This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This - may not be always available. - :vartype documentation_link: str + :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled + by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keep_words: Required. The list of words to keep. + :type keep_words: list[str] + :param lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :type lower_case_keep_words: bool """ _validation = { - 'key': {'readonly': True}, - 'error_message': {'required': True, 'readonly': True}, - 'status_code': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'keep_words': {'required': True}, } _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemError, self).__init__(**kwargs) - self.key = None - self.error_message = None - self.status_code = None - self.name = None - self.details = None - self.documentation_link = None - - -class ItemWarning(msrest.serialization.Model): - """Represents an item-level warning. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar key: The key of the item which generated a warning. - :vartype key: str - :ivar message: Required. The message describing the warning that occurred while processing the - item. - :vartype message: str - :ivar name: The name of the source at which the warning originated. For example, this could - refer to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the warning to assist in debugging the - indexer. This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - 'key': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, - } - - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemWarning, self).__init__(**kwargs) - self.key = None - self.message = None - self.name = None - self.details = None - self.documentation_link = None - - -class KeepTokenFilter(TokenFilter): - """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :type odata_type: str - :param name: Required. The name of the token filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :type name: str - :param keep_words: Required. The list of words to keep. - :type keep_words: list[str] - :param lower_case_keep_words: A value indicating whether to lower case all words first. Default - is false. - :type lower_case_keep_words: bool - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'keep_words': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'keep_words': {'key': 'keepWords', 'type': '[str]'}, - 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, + 'keep_words': {'key': 'keepWords', 'type': '[str]'}, + 'lower_case_keep_words': {'key': 'keepWordsCase', 'type': 'bool'}, } def __init__( @@ -2423,11 +1887,11 @@ def __init__( ): super(KeepTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' - self.keep_words = kwargs.get('keep_words', None) + self.keep_words = kwargs['keep_words'] self.lower_case_keep_words = kwargs.get('lower_case_keep_words', False) -class KeyPhraseExtractionSkill(Skill): +class KeyPhraseExtractionSkill(SearchIndexerSkill): """A skill that uses text analytics for key phrase extraction. All required parameters must be populated in order to send to Azure. @@ -2447,15 +1911,15 @@ class KeyPhraseExtractionSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt- - PT', 'pt-BR', 'ru', 'es', 'sv'. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt- + PT", "pt-BR", "ru", "es", "sv". :type default_language_code: str or - ~search_service_client.models.KeyPhraseExtractionSkillLanguage + ~azure.search.documents.models.KeyPhraseExtractionSkillLanguage :param max_key_phrase_count: A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. :type max_key_phrase_count: int @@ -2526,11 +1990,11 @@ def __init__( ): super(KeywordMarkerTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' - self.keywords = kwargs.get('keywords', None) + self.keywords = kwargs['keywords'] self.ignore_case = kwargs.get('ignore_case', False) -class KeywordTokenizer(Tokenizer): +class KeywordTokenizer(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2566,7 +2030,7 @@ def __init__( self.buffer_size = kwargs.get('buffer_size', 256) -class KeywordTokenizerV2(Tokenizer): +class KeywordTokenizerV2(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2604,7 +2068,7 @@ def __init__( self.max_token_length = kwargs.get('max_token_length', 256) -class LanguageDetectionSkill(Skill): +class LanguageDetectionSkill(SearchIndexerSkill): """A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. All required parameters must be populated in order to send to Azure. @@ -2624,10 +2088,10 @@ class LanguageDetectionSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -2665,25 +2129,25 @@ class LengthTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param min: The minimum length in characters. Default is 0. Maximum is 300. Must be less than - the value of max. - :type min: int - :param max: The maximum length in characters. Default and maximum is 300. - :type max: int + :param min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :type min_length: int + :param max_length: The maximum length in characters. Default and maximum is 300. + :type max_length: int """ _validation = { 'odata_type': {'required': True}, 'name': {'required': True}, - 'min': {'maximum': 300}, - 'max': {'maximum': 300}, + 'min_length': {'maximum': 300}, + 'max_length': {'maximum': 300}, } _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'min': {'key': 'min', 'type': 'int'}, - 'max': {'key': 'max', 'type': 'int'}, + 'min_length': {'key': 'min', 'type': 'int'}, + 'max_length': {'key': 'max', 'type': 'int'}, } def __init__( @@ -2692,8 +2156,8 @@ def __init__( ): super(LengthTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' - self.min = kwargs.get('min', 0) - self.max = kwargs.get('max', 300) + self.min_length = kwargs.get('min_length', 0) + self.max_length = kwargs.get('max_length', 300) class LimitTokenFilter(TokenFilter): @@ -2745,7 +2209,7 @@ class ListDataSourcesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar data_sources: Required. The datasources in the Search service. - :vartype data_sources: list[~search_service_client.models.DataSource] + :vartype data_sources: list[~azure.search.documents.models.SearchIndexerDataSource] """ _validation = { @@ -2753,7 +2217,7 @@ class ListDataSourcesResult(msrest.serialization.Model): } _attribute_map = { - 'data_sources': {'key': 'value', 'type': '[DataSource]'}, + 'data_sources': {'key': 'value', 'type': '[SearchIndexerDataSource]'}, } def __init__( @@ -2772,7 +2236,7 @@ class ListIndexersResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexers: Required. The indexers in the Search service. - :vartype indexers: list[~search_service_client.models.Indexer] + :vartype indexers: list[~azure.search.documents.models.SearchIndexer] """ _validation = { @@ -2780,7 +2244,7 @@ class ListIndexersResult(msrest.serialization.Model): } _attribute_map = { - 'indexers': {'key': 'value', 'type': '[Indexer]'}, + 'indexers': {'key': 'value', 'type': '[SearchIndexer]'}, } def __init__( @@ -2799,7 +2263,7 @@ class ListIndexesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexes: Required. The indexes in the Search service. - :vartype indexes: list[~search_service_client.models.Index] + :vartype indexes: list[~azure.search.documents.models.SearchIndex] """ _validation = { @@ -2807,7 +2271,7 @@ class ListIndexesResult(msrest.serialization.Model): } _attribute_map = { - 'indexes': {'key': 'value', 'type': '[Index]'}, + 'indexes': {'key': 'value', 'type': '[SearchIndex]'}, } def __init__( @@ -2819,14 +2283,14 @@ def __init__( class ListSkillsetsResult(msrest.serialization.Model): - """Response from a list Skillset request. If successful, it includes the full definitions of all skillsets. + """Response from a list skillset request. If successful, it includes the full definitions of all skillsets. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar skillsets: Required. The skillsets defined in the Search service. - :vartype skillsets: list[~search_service_client.models.Skillset] + :vartype skillsets: list[~azure.search.documents.models.SearchIndexerSkillset] """ _validation = { @@ -2834,7 +2298,7 @@ class ListSkillsetsResult(msrest.serialization.Model): } _attribute_map = { - 'skillsets': {'key': 'value', 'type': '[Skillset]'}, + 'skillsets': {'key': 'value', 'type': '[SearchIndexerSkillset]'}, } def __init__( @@ -2853,7 +2317,7 @@ class ListSynonymMapsResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar synonym_maps: Required. The synonym maps in the Search service. - :vartype synonym_maps: list[~search_service_client.models.SynonymMap] + :vartype synonym_maps: list[~azure.search.documents.models.SynonymMap] """ _validation = { @@ -2872,6 +2336,123 @@ def __init__( self.synonym_maps = None +class LuceneStandardAnalyzer(LexicalAnalyzer): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + **kwargs + ): + super(LuceneStandardAnalyzer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' + self.max_token_length = kwargs.get('max_token_length', 255) + self.stopwords = kwargs.get('stopwords', None) + + +class LuceneStandardTokenizer(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(LuceneStandardTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' + self.max_token_length = kwargs.get('max_token_length', 255) + + +class LuceneStandardTokenizerV2(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(LuceneStandardTokenizerV2, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.max_token_length = kwargs.get('max_token_length', 255) + + class MagnitudeScoringFunction(ScoringFunction): """Defines a function that boosts scores based on the magnitude of a numeric field. @@ -2886,11 +2467,11 @@ class MagnitudeScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the magnitude scoring function. - :type parameters: ~search_service_client.models.MagnitudeScoringParameters + :type parameters: ~azure.search.documents.models.MagnitudeScoringParameters """ _validation = { @@ -2914,7 +2495,7 @@ def __init__( ): super(MagnitudeScoringFunction, self).__init__(**kwargs) self.type = 'magnitude' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class MagnitudeScoringParameters(msrest.serialization.Model): @@ -2947,8 +2528,8 @@ def __init__( **kwargs ): super(MagnitudeScoringParameters, self).__init__(**kwargs) - self.boosting_range_start = kwargs.get('boosting_range_start', None) - self.boosting_range_end = kwargs.get('boosting_range_end', None) + self.boosting_range_start = kwargs['boosting_range_start'] + self.boosting_range_end = kwargs['boosting_range_end'] self.should_boost_beyond_range_by_constant = kwargs.get('should_boost_beyond_range_by_constant', None) @@ -2987,10 +2568,10 @@ def __init__( ): super(MappingCharFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' - self.mappings = kwargs.get('mappings', None) + self.mappings = kwargs['mappings'] -class MergeSkill(Skill): +class MergeSkill(SearchIndexerSkill): """A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. All required parameters must be populated in order to send to Azure. @@ -3010,10 +2591,10 @@ class MergeSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an empty space. :type insert_pre_tag: str @@ -3049,7 +2630,7 @@ def __init__( self.insert_post_tag = kwargs.get('insert_post_tag', " ") -class MicrosoftLanguageStemmingTokenizer(Tokenizer): +class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): """Divides text using language-specific rules and reduces words to their base forms. All required parameters must be populated in order to send to Azure. @@ -3070,13 +2651,13 @@ class MicrosoftLanguageStemmingTokenizer(Tokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'arabic', 'bangla', 'bulgarian', 'catalan', 'croatian', 'czech', 'danish', 'dutch', 'english', - 'estonian', 'finnish', 'french', 'german', 'greek', 'gujarati', 'hebrew', 'hindi', 'hungarian', - 'icelandic', 'indonesian', 'italian', 'kannada', 'latvian', 'lithuanian', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovak', 'slovenian', 'spanish', - 'swedish', 'tamil', 'telugu', 'turkish', 'ukrainian', 'urdu'. - :type language: str or ~search_service_client.models.MicrosoftStemmingTokenizerLanguage + "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". + :type language: str or ~azure.search.documents.models.MicrosoftStemmingTokenizerLanguage """ _validation = { @@ -3104,7 +2685,7 @@ def __init__( self.language = kwargs.get('language', None) -class MicrosoftLanguageTokenizer(Tokenizer): +class MicrosoftLanguageTokenizer(LexicalTokenizer): """Divides text using language-specific rules. All required parameters must be populated in order to send to Azure. @@ -3125,13 +2706,13 @@ class MicrosoftLanguageTokenizer(Tokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'bangla', 'bulgarian', 'catalan', 'chineseSimplified', 'chineseTraditional', 'croatian', - 'czech', 'danish', 'dutch', 'english', 'french', 'german', 'greek', 'gujarati', 'hindi', - 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovenian', 'spanish', 'swedish', - 'tamil', 'telugu', 'thai', 'ukrainian', 'urdu', 'vietnamese'. - :type language: str or ~search_service_client.models.MicrosoftTokenizerLanguage + "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", + "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", + "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", + "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :type language: str or ~azure.search.documents.models.MicrosoftTokenizerLanguage """ _validation = { @@ -3243,7 +2824,7 @@ def __init__( self.max_gram = kwargs.get('max_gram', 2) -class NGramTokenizer(Tokenizer): +class NGramTokenizer(LexicalTokenizer): """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3261,7 +2842,7 @@ class NGramTokenizer(Tokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -3290,7 +2871,7 @@ def __init__( self.token_chars = kwargs.get('token_chars', None) -class OcrSkill(Skill): +class OcrSkill(SearchIndexerSkill): """A skill that extracts text from image files. All required parameters must be populated in order to send to Azure. @@ -3310,18 +2891,18 @@ class OcrSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param text_extraction_algorithm: A value indicating which algorithm to use for extracting - text. Default is printed. Possible values include: 'printed', 'handwritten'. - :type text_extraction_algorithm: str or ~search_service_client.models.TextExtractionAlgorithm + text. Default is printed. Possible values include: "printed", "handwritten". + :type text_extraction_algorithm: str or ~azure.search.documents.models.TextExtractionAlgorithm :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', - 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr- - Latn', 'sk'. - :type default_language_code: str or ~search_service_client.models.OcrSkillLanguage + Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", "sr- + Latn", "sk". + :type default_language_code: str or ~azure.search.documents.models.OcrSkillLanguage :param should_detect_orientation: A value indicating to turn orientation detection on or not. Default is false. :type should_detect_orientation: bool @@ -3381,11 +2962,11 @@ def __init__( **kwargs ): super(OutputFieldMappingEntry, self).__init__(**kwargs) - self.name = kwargs.get('name', None) + self.name = kwargs['name'] self.target_name = kwargs.get('target_name', None) -class PathHierarchyTokenizerV2(Tokenizer): +class PathHierarchyTokenizerV2(LexicalTokenizer): """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3439,7 +3020,7 @@ def __init__( self.number_of_tokens_to_skip = kwargs.get('number_of_tokens_to_skip', 0) -class PatternAnalyzer(Analyzer): +class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3455,11 +3036,11 @@ class PatternAnalyzer(Analyzer): true. :type lower_case_terms: bool :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param stopwords: A list of stopwords. :type stopwords: list[str] """ @@ -3528,311 +3109,1015 @@ def __init__( ): super(PatternCaptureTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' - self.patterns = kwargs.get('patterns', None) + self.patterns = kwargs['patterns'] self.preserve_original = kwargs.get('preserve_original', True) -class PatternReplaceCharFilter(CharFilter): - """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. +class PatternReplaceCharFilter(CharFilter): + """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the char filter.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the char filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param pattern: Required. A regular expression pattern. + :type pattern: str + :param replacement: Required. The replacement text. + :type replacement: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'pattern': {'required': True}, + 'replacement': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternReplaceCharFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' + self.pattern = kwargs['pattern'] + self.replacement = kwargs['replacement'] + + +class PatternReplaceTokenFilter(TokenFilter): + """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled + by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param pattern: Required. A regular expression pattern. + :type pattern: str + :param replacement: Required. The replacement text. + :type replacement: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'pattern': {'required': True}, + 'replacement': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'replacement': {'key': 'replacement', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternReplaceTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' + self.pattern = kwargs['pattern'] + self.replacement = kwargs['replacement'] + + +class PatternTokenizer(LexicalTokenizer): + """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param pattern: A regular expression pattern to match token separators. Default is an + expression that matches one or more non-word characters. + :type pattern: str + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags + :param group: The zero-based ordinal of the matching group in the regular expression pattern to + extract into tokens. Use -1 if you want to use the entire pattern to split the input into + tokens, irrespective of matching groups. Default is -1. + :type group: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'pattern': {'key': 'pattern', 'type': 'str'}, + 'flags': {'key': 'flags', 'type': 'str'}, + 'group': {'key': 'group', 'type': 'int'}, + } + + def __init__( + self, + **kwargs + ): + super(PatternTokenizer, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' + self.pattern = kwargs.get('pattern', "\W+") + self.flags = kwargs.get('flags', None) + self.group = kwargs.get('group', -1) + + +class PhoneticTokenFilter(TokenFilter): + """Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled + by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". + :type encoder: str or ~azure.search.documents.models.PhoneticEncoder + :param replace_original_tokens: A value indicating whether encoded tokens should replace + original tokens. If false, encoded tokens are added as synonyms. Default is true. + :type replace_original_tokens: bool + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'encoder': {'key': 'encoder', 'type': 'str'}, + 'replace_original_tokens': {'key': 'replace', 'type': 'bool'}, + } + + def __init__( + self, + **kwargs + ): + super(PhoneticTokenFilter, self).__init__(**kwargs) + self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' + self.encoder = kwargs.get('encoder', None) + self.replace_original_tokens = kwargs.get('replace_original_tokens', True) + + +class RequestOptions(msrest.serialization.Model): + """Parameter group. + + :param x_ms_client_request_id: The tracking ID sent with the request to help with debugging. + :type x_ms_client_request_id: str + """ + + _attribute_map = { + 'x_ms_client_request_id': {'key': 'x-ms-client-request-id', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(RequestOptions, self).__init__(**kwargs) + self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None) + + +class ResourceCounter(msrest.serialization.Model): + """Represents a resource's usage and quota. + + All required parameters must be populated in order to send to Azure. + + :param usage: Required. The resource usage amount. + :type usage: long + :param quota: The resource amount quota. + :type quota: long + """ + + _validation = { + 'usage': {'required': True}, + } + + _attribute_map = { + 'usage': {'key': 'usage', 'type': 'long'}, + 'quota': {'key': 'quota', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(ResourceCounter, self).__init__(**kwargs) + self.usage = kwargs['usage'] + self.quota = kwargs.get('quota', None) + + +class ScoringProfile(msrest.serialization.Model): + """Defines parameters for a search index that influence scoring in search queries. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the scoring profile. + :type name: str + :param text_weights: Parameters that boost scoring based on text matches in certain index + fields. + :type text_weights: ~azure.search.documents.models.TextWeights + :param functions: The collection of functions that influence the scoring of documents. + :type functions: list[~azure.search.documents.models.ScoringFunction] + :param function_aggregation: A value indicating how the results of individual scoring functions + should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible + values include: "sum", "average", "minimum", "maximum", "firstMatching". + :type function_aggregation: str or ~azure.search.documents.models.ScoringFunctionAggregation + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'text_weights': {'key': 'text', 'type': 'TextWeights'}, + 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, + 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ScoringProfile, self).__init__(**kwargs) + self.name = kwargs['name'] + self.text_weights = kwargs.get('text_weights', None) + self.functions = kwargs.get('functions', None) + self.function_aggregation = kwargs.get('function_aggregation', None) + + +class SearchError(msrest.serialization.Model): + """Describes an error condition for the Azure Cognitive Search API. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar code: One of a server-defined set of error codes. + :vartype code: str + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~azure.search.documents.models.SearchError] + """ + + _validation = { + 'code': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[SearchError]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchError, self).__init__(**kwargs) + self.code = None + self.message = None + self.details = None + + +class SearchField(msrest.serialization.Model): + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :type type: str or ~azure.search.documents.models.SearchFieldDataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :type retrievable: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :param search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh- + Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", + "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", + "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", + "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", + "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", + "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", + "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", + "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", + "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", + "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type search_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :param index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh- + Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type index_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :param synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_maps: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~azure.search.documents.models.SearchField] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'retrievable': {'key': 'retrievable', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, + 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, + 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchField, self).__init__(**kwargs) + self.name = kwargs['name'] + self.type = kwargs['type'] + self.key = kwargs.get('key', None) + self.retrievable = kwargs.get('retrievable', None) + self.searchable = kwargs.get('searchable', None) + self.filterable = kwargs.get('filterable', None) + self.sortable = kwargs.get('sortable', None) + self.facetable = kwargs.get('facetable', None) + self.analyzer = kwargs.get('analyzer', None) + self.search_analyzer = kwargs.get('search_analyzer', None) + self.index_analyzer = kwargs.get('index_analyzer', None) + self.synonym_maps = kwargs.get('synonym_maps', None) + self.fields = kwargs.get('fields', None) + + +class SearchIndex(msrest.serialization.Model): + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~azure.search.documents.models.SearchField] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~azure.search.documents.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~azure.search.documents.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~azure.search.documents.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~azure.search.documents.models.LexicalAnalyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~azure.search.documents.models.LexicalTokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~azure.search.documents.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~azure.search.documents.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + :param similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :type similarity: ~azure.search.documents.models.Similarity + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, + 'similarity': {'key': 'similarity', 'type': 'Similarity'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndex, self).__init__(**kwargs) + self.name = kwargs['name'] + self.fields = kwargs['fields'] + self.scoring_profiles = kwargs.get('scoring_profiles', None) + self.default_scoring_profile = kwargs.get('default_scoring_profile', None) + self.cors_options = kwargs.get('cors_options', None) + self.suggesters = kwargs.get('suggesters', None) + self.analyzers = kwargs.get('analyzers', None) + self.tokenizers = kwargs.get('tokenizers', None) + self.token_filters = kwargs.get('token_filters', None) + self.char_filters = kwargs.get('char_filters', None) + self.encryption_key = kwargs.get('encryption_key', None) + self.similarity = kwargs.get('similarity', None) + self.e_tag = kwargs.get('e_tag', None) + + +class SearchIndexer(msrest.serialization.Model): + """Represents an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the indexer. + :type name: str + :param description: The description of the indexer. + :type description: str + :param data_source_name: Required. The name of the datasource from which this indexer reads + data. + :type data_source_name: str + :param skillset_name: The name of the skillset executing with this indexer. + :type skillset_name: str + :param target_index_name: Required. The name of the index to which this indexer writes data. + :type target_index_name: str + :param schedule: The schedule for this indexer. + :type schedule: ~azure.search.documents.models.IndexingSchedule + :param parameters: Parameters for indexer execution. + :type parameters: ~azure.search.documents.models.IndexingParameters + :param field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :type field_mappings: list[~azure.search.documents.models.FieldMapping] + :param output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :type output_field_mappings: list[~azure.search.documents.models.FieldMapping] + :param is_disabled: A value indicating whether the indexer is disabled. Default is false. + :type is_disabled: bool + :param e_tag: The ETag of the indexer. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'data_source_name': {'required': True}, + 'target_index_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, + 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, + 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, + 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, + 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, + 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, + 'is_disabled': {'key': 'disabled', 'type': 'bool'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexer, self).__init__(**kwargs) + self.name = kwargs['name'] + self.description = kwargs.get('description', None) + self.data_source_name = kwargs['data_source_name'] + self.skillset_name = kwargs.get('skillset_name', None) + self.target_index_name = kwargs['target_index_name'] + self.schedule = kwargs.get('schedule', None) + self.parameters = kwargs.get('parameters', None) + self.field_mappings = kwargs.get('field_mappings', None) + self.output_field_mappings = kwargs.get('output_field_mappings', None) + self.is_disabled = kwargs.get('is_disabled', False) + self.e_tag = kwargs.get('e_tag', None) + + +class SearchIndexerDataContainer(msrest.serialization.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the char filter.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the char filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. + :param name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. :type name: str - :param pattern: Required. A regular expression pattern. - :type pattern: str - :param replacement: Required. The replacement text. - :type replacement: str + :param query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :type query: str """ _validation = { - 'odata_type': {'required': True}, 'name': {'required': True}, - 'pattern': {'required': True}, - 'replacement': {'required': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'pattern': {'key': 'pattern', 'type': 'str'}, - 'replacement': {'key': 'replacement', 'type': 'str'}, + 'query': {'key': 'query', 'type': 'str'}, } def __init__( self, **kwargs ): - super(PatternReplaceCharFilter, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' - self.pattern = kwargs.get('pattern', None) - self.replacement = kwargs.get('replacement', None) + super(SearchIndexerDataContainer, self).__init__(**kwargs) + self.name = kwargs['name'] + self.query = kwargs.get('query', None) -class PatternReplaceTokenFilter(TokenFilter): - """A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. +class SearchIndexerDataSource(msrest.serialization.Model): + """Represents a datasource definition, which can be used to configure an indexer. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :type odata_type: str - :param name: Required. The name of the token filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. + :param name: Required. The name of the datasource. :type name: str - :param pattern: Required. A regular expression pattern. - :type pattern: str - :param replacement: Required. The replacement text. - :type replacement: str + :param description: The description of the datasource. + :type description: str + :param type: Required. The type of the datasource. Possible values include: "azuresql", + "cosmosdb", "azureblob", "azuretable", "mysql". + :type type: str or ~azure.search.documents.models.SearchIndexerDataSourceType + :param credentials: Required. Credentials for the datasource. + :type credentials: ~azure.search.documents.models.DataSourceCredentials + :param container: Required. The data container for the datasource. + :type container: ~azure.search.documents.models.SearchIndexerDataContainer + :param data_change_detection_policy: The data change detection policy for the datasource. + :type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy + :param data_deletion_detection_policy: The data deletion detection policy for the datasource. + :type data_deletion_detection_policy: + ~azure.search.documents.models.DataDeletionDetectionPolicy + :param e_tag: The ETag of the data source. + :type e_tag: str """ _validation = { - 'odata_type': {'required': True}, 'name': {'required': True}, - 'pattern': {'required': True}, - 'replacement': {'required': True}, + 'type': {'required': True}, + 'credentials': {'required': True}, + 'container': {'required': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'pattern': {'key': 'pattern', 'type': 'str'}, - 'replacement': {'key': 'replacement', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, + 'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'}, + 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, + 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } def __init__( self, **kwargs ): - super(PatternReplaceTokenFilter, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' - self.pattern = kwargs.get('pattern', None) - self.replacement = kwargs.get('replacement', None) + super(SearchIndexerDataSource, self).__init__(**kwargs) + self.name = kwargs['name'] + self.description = kwargs.get('description', None) + self.type = kwargs['type'] + self.credentials = kwargs['credentials'] + self.container = kwargs['container'] + self.data_change_detection_policy = kwargs.get('data_change_detection_policy', None) + self.data_deletion_detection_policy = kwargs.get('data_deletion_detection_policy', None) + self.e_tag = kwargs.get('e_tag', None) -class PatternTokenizer(Tokenizer): - """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. +class SearchIndexerError(msrest.serialization.Model): + """Represents an item- or document-level indexing error. + + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. - :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags - :param group: The zero-based ordinal of the matching group in the regular expression pattern to - extract into tokens. Use -1 if you want to use the entire pattern to split the input into - tokens, irrespective of matching groups. Default is -1. - :type group: int + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: Required. The message describing the error that occurred while processing + the item. + :vartype error_message: str + :ivar status_code: Required. The status code indicating why the indexing operation failed. + Possible values include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could refer + to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be always available. + :vartype documentation_link: str """ _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, + 'key': {'readonly': True}, + 'error_message': {'required': True, 'readonly': True}, + 'status_code': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'status_code': {'key': 'statusCode', 'type': 'int'}, 'name': {'key': 'name', 'type': 'str'}, - 'pattern': {'key': 'pattern', 'type': 'str'}, - 'flags': {'key': 'flags', 'type': 'str'}, - 'group': {'key': 'group', 'type': 'int'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, } def __init__( self, **kwargs ): - super(PatternTokenizer, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' - self.pattern = kwargs.get('pattern', "\W+") - self.flags = kwargs.get('flags', None) - self.group = kwargs.get('group', -1) + super(SearchIndexerError, self).__init__(**kwargs) + self.key = None + self.error_message = None + self.status_code = None + self.name = None + self.details = None + self.documentation_link = None -class PhoneticTokenFilter(TokenFilter): - """Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. +class SearchIndexerLimits(msrest.serialization.Model): + """SearchIndexerLimits. - All required parameters must be populated in order to send to Azure. + Variables are only populated by the server, and will be ignored when sending a request. - :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :type odata_type: str - :param name: Required. The name of the token filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :type name: str - :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: - 'metaphone', 'doubleMetaphone', 'soundex', 'refinedSoundex', 'caverphone1', 'caverphone2', - 'cologne', 'nysiis', 'koelnerPhonetik', 'haasePhonetik', 'beiderMorse'. - :type encoder: str or ~search_service_client.models.PhoneticEncoder - :param replace_original_tokens: A value indicating whether encoded tokens should replace - original tokens. If false, encoded tokens are added as synonyms. Default is true. - :type replace_original_tokens: bool + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for indexing. + :vartype max_document_extraction_size: long + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked up for indexing. + :vartype max_document_content_characters_to_extract: long """ _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, + 'max_run_time': {'readonly': True}, + 'max_document_extraction_size': {'readonly': True}, + 'max_document_content_characters_to_extract': {'readonly': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'encoder': {'key': 'encoder', 'type': 'str'}, - 'replace_original_tokens': {'key': 'replace', 'type': 'bool'}, + 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, + 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, + 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, } def __init__( self, **kwargs ): - super(PhoneticTokenFilter, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' - self.encoder = kwargs.get('encoder', None) - self.replace_original_tokens = kwargs.get('replace_original_tokens', True) + super(SearchIndexerLimits, self).__init__(**kwargs) + self.max_run_time = None + self.max_document_extraction_size = None + self.max_document_content_characters_to_extract = None -class RequestOptions(msrest.serialization.Model): - """Parameter group. +class SearchIndexerSkillset(msrest.serialization.Model): + """A list of skills. - :param x_ms_client_request_id: The tracking ID sent with the request to help with debugging. - :type x_ms_client_request_id: str + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the skillset. + :type name: str + :param description: Required. The description of the skillset. + :type description: str + :param skills: Required. A list of skills in the skillset. + :type skills: list[~azure.search.documents.models.SearchIndexerSkill] + :param cognitive_services_account: Details about cognitive services to be used when running + skills. + :type cognitive_services_account: ~azure.search.documents.models.CognitiveServicesAccount + :param e_tag: The ETag of the skillset. + :type e_tag: str """ + _validation = { + 'name': {'required': True}, + 'description': {'required': True}, + 'skills': {'required': True}, + } + _attribute_map = { - 'x_ms_client_request_id': {'key': 'x-ms-client-request-id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'skills': {'key': 'skills', 'type': '[SearchIndexerSkill]'}, + 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } def __init__( self, **kwargs ): - super(RequestOptions, self).__init__(**kwargs) - self.x_ms_client_request_id = kwargs.get('x_ms_client_request_id', None) + super(SearchIndexerSkillset, self).__init__(**kwargs) + self.name = kwargs['name'] + self.description = kwargs['description'] + self.skills = kwargs['skills'] + self.cognitive_services_account = kwargs.get('cognitive_services_account', None) + self.e_tag = kwargs.get('e_tag', None) -class ResourceCounter(msrest.serialization.Model): - """Represents a resource's usage and quota. +class SearchIndexerStatus(msrest.serialization.Model): + """Represents the current status and execution history of an indexer. + + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param usage: Required. The resource usage amount. - :type usage: long - :param quota: The resource amount quota. - :type quota: long + :ivar status: Required. Overall indexer status. Possible values include: "unknown", "error", + "running". + :vartype status: str or ~azure.search.documents.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~azure.search.documents.models.IndexerExecutionResult + :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse + chronological order. + :vartype execution_history: list[~azure.search.documents.models.IndexerExecutionResult] + :ivar limits: Required. The execution limits for the indexer. + :vartype limits: ~azure.search.documents.models.SearchIndexerLimits """ _validation = { - 'usage': {'required': True}, + 'status': {'required': True, 'readonly': True}, + 'last_result': {'readonly': True}, + 'execution_history': {'required': True, 'readonly': True}, + 'limits': {'required': True, 'readonly': True}, } _attribute_map = { - 'usage': {'key': 'usage', 'type': 'long'}, - 'quota': {'key': 'quota', 'type': 'long'}, + 'status': {'key': 'status', 'type': 'str'}, + 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, + 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, + 'limits': {'key': 'limits', 'type': 'SearchIndexerLimits'}, } def __init__( self, **kwargs ): - super(ResourceCounter, self).__init__(**kwargs) - self.usage = kwargs.get('usage', None) - self.quota = kwargs.get('quota', None) + super(SearchIndexerStatus, self).__init__(**kwargs) + self.status = None + self.last_result = None + self.execution_history = None + self.limits = None -class ScoringProfile(msrest.serialization.Model): - """Defines parameters for a search index that influence scoring in search queries. +class SearchIndexerWarning(msrest.serialization.Model): + """Represents an item-level warning. + + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the scoring profile. - :type name: str - :param text_weights: Parameters that boost scoring based on text matches in certain index - fields. - :type text_weights: ~search_service_client.models.TextWeights - :param functions: The collection of functions that influence the scoring of documents. - :type functions: list[~search_service_client.models.ScoringFunction] - :param function_aggregation: A value indicating how the results of individual scoring functions - should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible - values include: 'sum', 'average', 'minimum', 'maximum', 'firstMatching'. - :type function_aggregation: str or ~search_service_client.models.ScoringFunctionAggregation + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: Required. The message describing the warning that occurred while processing the + item. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not be always available. + :vartype documentation_link: str """ _validation = { - 'name': {'required': True}, + 'key': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, } _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'text_weights': {'key': 'text', 'type': 'TextWeights'}, - 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, - 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, } def __init__( self, **kwargs ): - super(ScoringProfile, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.text_weights = kwargs.get('text_weights', None) - self.functions = kwargs.get('functions', None) - self.function_aggregation = kwargs.get('function_aggregation', None) - + super(SearchIndexerWarning, self).__init__(**kwargs) + self.key = None + self.message = None + self.name = None + self.details = None + self.documentation_link = None -class SearchError(msrest.serialization.Model): - """Describes an error condition for the Azure Cognitive Search API. - Variables are only populated by the server, and will be ignored when sending a request. +class SearchResourceEncryptionKey(msrest.serialization.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. All required parameters must be populated in order to send to Azure. - :ivar code: One of a server-defined set of error codes. - :vartype code: str - :ivar message: Required. A human-readable representation of the error. - :vartype message: str - :ivar details: An array of details about specific errors that led to this reported error. - :vartype details: list[~search_service_client.models.SearchError] + :param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data + at rest. + :type key_name: str + :param key_version: Required. The version of your Azure Key Vault key to be used to encrypt + your data at rest. + :type key_version: str + :param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that + contains the key to be used to encrypt your data at rest. An example URI might be https://my- + keyvault-name.vault.azure.net. + :type vault_uri: str + :param access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :type access_credentials: + ~azure.search.documents.models.AzureActiveDirectoryApplicationCredentials """ _validation = { - 'code': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'details': {'readonly': True}, + 'key_name': {'required': True}, + 'key_version': {'required': True}, + 'vault_uri': {'required': True}, } _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[SearchError]'}, + 'key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, + 'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, + 'vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, + 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, } def __init__( self, **kwargs ): - super(SearchError, self).__init__(**kwargs) - self.code = None - self.message = None - self.details = None + super(SearchResourceEncryptionKey, self).__init__(**kwargs) + self.key_name = kwargs['key_name'] + self.key_version = kwargs['key_version'] + self.vault_uri = kwargs['vault_uri'] + self.access_credentials = kwargs.get('access_credentials', None) -class SentimentSkill(Skill): +class SentimentSkill(SearchIndexerSkill): """Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. All required parameters must be populated in order to send to Azure. @@ -3852,14 +4137,14 @@ class SentimentSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', - 'ru', 'es', 'sv', 'tr'. - :type default_language_code: str or ~search_service_client.models.SentimentSkillLanguage + Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", + "ru", "es", "sv", "tr". + :type default_language_code: str or ~azure.search.documents.models.SentimentSkillLanguage """ _validation = { @@ -3893,19 +4178,19 @@ class ServiceCounters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param document_counter: Required. Total number of documents across all indexes in the service. - :type document_counter: ~search_service_client.models.ResourceCounter + :type document_counter: ~azure.search.documents.models.ResourceCounter :param index_counter: Required. Total number of indexes. - :type index_counter: ~search_service_client.models.ResourceCounter + :type index_counter: ~azure.search.documents.models.ResourceCounter :param indexer_counter: Required. Total number of indexers. - :type indexer_counter: ~search_service_client.models.ResourceCounter + :type indexer_counter: ~azure.search.documents.models.ResourceCounter :param data_source_counter: Required. Total number of data sources. - :type data_source_counter: ~search_service_client.models.ResourceCounter + :type data_source_counter: ~azure.search.documents.models.ResourceCounter :param storage_size_counter: Required. Total size of used storage in bytes. - :type storage_size_counter: ~search_service_client.models.ResourceCounter + :type storage_size_counter: ~azure.search.documents.models.ResourceCounter :param synonym_map_counter: Required. Total number of synonym maps. - :type synonym_map_counter: ~search_service_client.models.ResourceCounter + :type synonym_map_counter: ~azure.search.documents.models.ResourceCounter :param skillset_counter: Required. Total number of skillsets. - :type skillset_counter: ~search_service_client.models.ResourceCounter + :type skillset_counter: ~azure.search.documents.models.ResourceCounter """ _validation = { @@ -3933,13 +4218,13 @@ def __init__( **kwargs ): super(ServiceCounters, self).__init__(**kwargs) - self.document_counter = kwargs.get('document_counter', None) - self.index_counter = kwargs.get('index_counter', None) - self.indexer_counter = kwargs.get('indexer_counter', None) - self.data_source_counter = kwargs.get('data_source_counter', None) - self.storage_size_counter = kwargs.get('storage_size_counter', None) - self.synonym_map_counter = kwargs.get('synonym_map_counter', None) - self.skillset_counter = kwargs.get('skillset_counter', None) + self.document_counter = kwargs['document_counter'] + self.index_counter = kwargs['index_counter'] + self.indexer_counter = kwargs['indexer_counter'] + self.data_source_counter = kwargs['data_source_counter'] + self.storage_size_counter = kwargs['storage_size_counter'] + self.synonym_map_counter = kwargs['synonym_map_counter'] + self.skillset_counter = kwargs['skillset_counter'] class ServiceLimits(msrest.serialization.Model): @@ -3982,9 +4267,9 @@ class ServiceStatistics(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param counters: Required. Service level resource counters. - :type counters: ~search_service_client.models.ServiceCounters + :type counters: ~azure.search.documents.models.ServiceCounters :param limits: Required. Service level general limits. - :type limits: ~search_service_client.models.ServiceLimits + :type limits: ~azure.search.documents.models.ServiceLimits """ _validation = { @@ -4002,11 +4287,11 @@ def __init__( **kwargs ): super(ServiceStatistics, self).__init__(**kwargs) - self.counters = kwargs.get('counters', None) - self.limits = kwargs.get('limits', None) + self.counters = kwargs['counters'] + self.limits = kwargs['limits'] -class ShaperSkill(Skill): +class ShaperSkill(SearchIndexerSkill): """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). All required parameters must be populated in order to send to Azure. @@ -4026,10 +4311,10 @@ class ShaperSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -4119,50 +4404,6 @@ def __init__( self.filter_token = kwargs.get('filter_token', "_") -class Skillset(msrest.serialization.Model): - """A list of skills. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the skillset. - :type name: str - :param description: Required. The description of the skillset. - :type description: str - :param skills: Required. A list of skills in the skillset. - :type skills: list[~search_service_client.models.Skill] - :param cognitive_services_account: Details about cognitive services to be used when running - skills. - :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount - :param e_tag: The ETag of the skillset. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'description': {'required': True}, - 'skills': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'skills': {'key': 'skills', 'type': '[Skill]'}, - 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(Skillset, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.description = kwargs.get('description', None) - self.skills = kwargs.get('skills', None) - self.cognitive_services_account = kwargs.get('cognitive_services_account', None) - self.e_tag = kwargs.get('e_tag', None) - - class SnowballTokenFilter(TokenFilter): """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. @@ -4175,11 +4416,11 @@ class SnowballTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'armenian', 'basque', - 'catalan', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'german2', 'hungarian', - 'italian', 'kp', 'lovins', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', - 'spanish', 'swedish', 'turkish'. - :type language: str or ~search_service_client.models.SnowballTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", "turkish". + :type language: str or ~azure.search.documents.models.SnowballTokenFilterLanguage """ _validation = { @@ -4200,7 +4441,7 @@ def __init__( ): super(SnowballTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' - self.language = kwargs.get('language', None) + self.language = kwargs['language'] class SoftDeleteColumnDeletionDetectionPolicy(DataDeletionDetectionPolicy): @@ -4237,7 +4478,7 @@ def __init__( self.soft_delete_marker_value = kwargs.get('soft_delete_marker_value', None) -class SplitSkill(Skill): +class SplitSkill(SearchIndexerSkill): """A skill to split a string into chunks of text. All required parameters must be populated in order to send to Azure. @@ -4257,16 +4498,16 @@ class SplitSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt'. - :type default_language_code: str or ~search_service_client.models.SplitSkillLanguage + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :type default_language_code: str or ~azure.search.documents.models.SplitSkillLanguage :param text_split_mode: A value indicating which split mode to perform. Possible values - include: 'pages', 'sentences'. - :type text_split_mode: str or ~search_service_client.models.TextSplitMode + include: "pages", "sentences". + :type text_split_mode: str or ~azure.search.documents.models.TextSplitMode :param maximum_page_length: The desired maximum page length. Default is 10000. :type maximum_page_length: int """ @@ -4326,123 +4567,6 @@ def __init__( self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' -class StandardAnalyzer(Analyzer): - """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - :param stopwords: A list of stopwords. - :type stopwords: list[str] - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - 'stopwords': {'key': 'stopwords', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - super(StandardAnalyzer, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' - self.max_token_length = kwargs.get('max_token_length', 255) - self.stopwords = kwargs.get('stopwords', None) - - -class StandardTokenizer(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(StandardTokenizer, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' - self.max_token_length = kwargs.get('max_token_length', 255) - - -class StandardTokenizerV2(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(StandardTokenizerV2, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' - self.max_token_length = kwargs.get('max_token_length', 255) - - class StemmerOverrideTokenFilter(TokenFilter): """Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. @@ -4478,7 +4602,7 @@ def __init__( ): super(StemmerOverrideTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' - self.rules = kwargs.get('rules', None) + self.rules = kwargs['rules'] class StemmerTokenFilter(TokenFilter): @@ -4493,16 +4617,16 @@ class StemmerTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'dutchKp', - 'english', 'lightEnglish', 'minimalEnglish', 'possessiveEnglish', 'porter2', 'lovins', - 'finnish', 'lightFinnish', 'french', 'lightFrench', 'minimalFrench', 'galician', - 'minimalGalician', 'german', 'german2', 'lightGerman', 'minimalGerman', 'greek', 'hindi', - 'hungarian', 'lightHungarian', 'indonesian', 'irish', 'italian', 'lightItalian', 'sorani', - 'latvian', 'norwegian', 'lightNorwegian', 'minimalNorwegian', 'lightNynorsk', 'minimalNynorsk', - 'portuguese', 'lightPortuguese', 'minimalPortuguese', 'portugueseRslp', 'romanian', 'russian', - 'lightRussian', 'spanish', 'lightSpanish', 'swedish', 'lightSwedish', 'turkish'. - :type language: str or ~search_service_client.models.StemmerTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". + :type language: str or ~azure.search.documents.models.StemmerTokenFilterLanguage """ _validation = { @@ -4523,10 +4647,10 @@ def __init__( ): super(StemmerTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' - self.language = kwargs.get('language', None) + self.language = kwargs['language'] -class StopAnalyzer(Analyzer): +class StopAnalyzer(LexicalAnalyzer): """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -4578,12 +4702,12 @@ class StopwordsTokenFilter(TokenFilter): both be set. :type stopwords: list[str] :param stopwords_list: A predefined list of stopwords to use. This property and the stopwords - property cannot both be set. Default is English. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'english', - 'finnish', 'french', 'galician', 'german', 'greek', 'hindi', 'hungarian', 'indonesian', - 'irish', 'italian', 'latvian', 'norwegian', 'persian', 'portuguese', 'romanian', 'russian', - 'sorani', 'spanish', 'swedish', 'thai', 'turkish'. - :type stopwords_list: str or ~search_service_client.models.StopwordsList + property cannot both be set. Default is English. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", + "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", + "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", + "sorani", "spanish", "swedish", "thai", "turkish". + :type stopwords_list: str or ~azure.search.documents.models.StopwordsList :param ignore_case: A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. :type ignore_case: bool @@ -4654,8 +4778,8 @@ def __init__( **kwargs ): super(Suggester, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.source_fields = kwargs.get('source_fields', None) + self.name = kwargs['name'] + self.source_fields = kwargs['source_fields'] class SynonymMap(msrest.serialization.Model): @@ -4681,7 +4805,7 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey :param e_tag: The ETag of the synonym map. :type e_tag: str """ @@ -4696,7 +4820,7 @@ class SynonymMap(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, 'format': {'key': 'format', 'type': 'str'}, 'synonyms': {'key': 'synonyms', 'type': 'str'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } @@ -4707,8 +4831,8 @@ def __init__( **kwargs ): super(SynonymMap, self).__init__(**kwargs) - self.name = kwargs.get('name', None) - self.synonyms = kwargs.get('synonyms', None) + self.name = kwargs['name'] + self.synonyms = kwargs['synonyms'] self.encryption_key = kwargs.get('encryption_key', None) self.e_tag = kwargs.get('e_tag', None) @@ -4764,7 +4888,7 @@ def __init__( ): super(SynonymTokenFilter, self).__init__(**kwargs) self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' - self.synonyms = kwargs.get('synonyms', None) + self.synonyms = kwargs['synonyms'] self.ignore_case = kwargs.get('ignore_case', False) self.expand = kwargs.get('expand', True) @@ -4783,11 +4907,11 @@ class TagScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the tag scoring function. - :type parameters: ~search_service_client.models.TagScoringParameters + :type parameters: ~azure.search.documents.models.TagScoringParameters """ _validation = { @@ -4811,7 +4935,7 @@ def __init__( ): super(TagScoringFunction, self).__init__(**kwargs) self.type = 'tag' - self.parameters = kwargs.get('parameters', None) + self.parameters = kwargs['parameters'] class TagScoringParameters(msrest.serialization.Model): @@ -4837,10 +4961,10 @@ def __init__( **kwargs ): super(TagScoringParameters, self).__init__(**kwargs) - self.tags_parameter = kwargs.get('tags_parameter', None) + self.tags_parameter = kwargs['tags_parameter'] -class TextTranslationSkill(Skill): +class TextTranslationSkill(SearchIndexerSkill): """A skill to translate text from one language to another. All required parameters must be populated in order to send to Azure. @@ -4860,37 +4984,37 @@ class TextTranslationSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_to_language_code: Required. The language code to translate documents into for - documents that don't specify the to language explicitly. Possible values include: 'af', 'ar', - 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', - 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', - 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', - 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', - 'vi', 'cy', 'yua'. + documents that don't specify the to language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", + "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", + "vi", "cy", "yua". :type default_to_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param default_from_language_code: The language code to translate documents from for documents - that don't specify the from language explicitly. Possible values include: 'af', 'ar', 'bn', - 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', - 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', - 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', - 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', - 'yua'. + that don't specify the from language explicitly. Possible values include: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", "sr-Cyrl", + "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", + "yua". :type default_from_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param suggested_from: The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the - automatic language detection is unsuccessful. Default is en. Possible values include: 'af', - 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', - 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', - 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', - 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', - 'ur', 'vi', 'cy', 'yua'. - :type suggested_from: str or ~search_service_client.models.TextTranslationSkillLanguage + automatic language detection is unsuccessful. Default is en. Possible values include: "af", + "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", + "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", + "sw", "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", + "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", + "ur", "vi", "cy", "yua". + :type suggested_from: str or ~azure.search.documents.models.TextTranslationSkillLanguage """ _validation = { @@ -4918,7 +5042,7 @@ def __init__( ): super(TextTranslationSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' - self.default_to_language_code = kwargs.get('default_to_language_code', None) + self.default_to_language_code = kwargs['default_to_language_code'] self.default_from_language_code = kwargs.get('default_from_language_code', None) self.suggested_from = kwargs.get('suggested_from', None) @@ -4946,52 +5070,7 @@ def __init__( **kwargs ): super(TextWeights, self).__init__(**kwargs) - self.weights = kwargs.get('weights', None) - - -class TokenInfo(msrest.serialization.Model): - """Information about a token returned by an analyzer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar token: Required. The token returned by the analyzer. - :vartype token: str - :ivar start_offset: Required. The index of the first character of the token in the input text. - :vartype start_offset: int - :ivar end_offset: Required. The index of the last character of the token in the input text. - :vartype end_offset: int - :ivar position: Required. The position of the token in the input text relative to other tokens. - The first token in the input text has position 0, the next has position 1, and so on. Depending - on the analyzer used, some tokens might have the same position, for example if they are - synonyms of each other. - :vartype position: int - """ - - _validation = { - 'token': {'required': True, 'readonly': True}, - 'start_offset': {'required': True, 'readonly': True}, - 'end_offset': {'required': True, 'readonly': True}, - 'position': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'token': {'key': 'token', 'type': 'str'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, - 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'position': {'key': 'position', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(TokenInfo, self).__init__(**kwargs) - self.token = None - self.start_offset = None - self.end_offset = None - self.position = None + self.weights = kwargs['weights'] class TruncateTokenFilter(TokenFilter): @@ -5031,7 +5110,7 @@ def __init__( self.length = kwargs.get('length', 300) -class UaxUrlEmailTokenizer(Tokenizer): +class UaxUrlEmailTokenizer(LexicalTokenizer): """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -5106,7 +5185,7 @@ def __init__( self.only_on_same_position = kwargs.get('only_on_same_position', False) -class WebApiSkill(Skill): +class WebApiSkill(SearchIndexerSkill): """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. All required parameters must be populated in order to send to Azure. @@ -5126,10 +5205,10 @@ class WebApiSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param uri: Required. The url for the Web API. :type uri: str :param http_headers: The headers required to make the http request. @@ -5173,7 +5252,7 @@ def __init__( ): super(WebApiSkill, self).__init__(**kwargs) self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' - self.uri = kwargs.get('uri', None) + self.uri = kwargs['uri'] self.http_headers = kwargs.get('http_headers', None) self.http_method = kwargs.get('http_method', None) self.timeout = kwargs.get('timeout', None) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py index 40d12418b74e..0dd3e1c170f3 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_models_py3.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -10,75 +12,52 @@ from azure.core.exceptions import HttpResponseError import msrest.serialization +from ._search_service_client_enums import * -class AccessCondition(msrest.serialization.Model): - """Parameter group. - - :param if_match: Defines the If-Match condition. The operation will be performed only if the - ETag on the server matches this value. - :type if_match: str - :param if_none_match: Defines the If-None-Match condition. The operation will be performed only - if the ETag on the server does not match this value. - :type if_none_match: str - """ - - _attribute_map = { - 'if_match': {'key': 'If-Match', 'type': 'str'}, - 'if_none_match': {'key': 'If-None-Match', 'type': 'str'}, - } - - def __init__( - self, - *, - if_match: Optional[str] = None, - if_none_match: Optional[str] = None, - **kwargs - ): - super(AccessCondition, self).__init__(**kwargs) - self.if_match = if_match - self.if_none_match = if_none_match +class AnalyzedTokenInfo(msrest.serialization.Model): + """Information about a token returned by an analyzer. -class Analyzer(msrest.serialization.Model): - """Base type for analyzers. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: CustomAnalyzer, PatternAnalyzer, StandardAnalyzer, StopAnalyzer. + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str + :ivar token: Required. The token returned by the analyzer. + :vartype token: str + :ivar start_offset: Required. The index of the first character of the token in the input text. + :vartype start_offset: int + :ivar end_offset: Required. The index of the last character of the token in the input text. + :vartype end_offset: int + :ivar position: Required. The position of the token in the input text relative to other tokens. + The first token in the input text has position 0, the next has position 1, and so on. Depending + on the analyzer used, some tokens might have the same position, for example if they are + synonyms of each other. + :vartype position: int """ _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, + 'token': {'required': True, 'readonly': True}, + 'start_offset': {'required': True, 'readonly': True}, + 'end_offset': {'required': True, 'readonly': True}, + 'position': {'required': True, 'readonly': True}, } _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - } - - _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'StandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + 'token': {'key': 'token', 'type': 'str'}, + 'start_offset': {'key': 'startOffset', 'type': 'int'}, + 'end_offset': {'key': 'endOffset', 'type': 'int'}, + 'position': {'key': 'position', 'type': 'int'}, } def __init__( self, - *, - name: str, **kwargs ): - super(Analyzer, self).__init__(**kwargs) - self.odata_type = None - self.name = name + super(AnalyzedTokenInfo, self).__init__(**kwargs) + self.token = None + self.start_offset = None + self.end_offset = None + self.position = None class AnalyzeRequest(msrest.serialization.Model): @@ -90,33 +69,33 @@ class AnalyzeRequest(msrest.serialization.Model): :type text: str :param analyzer: The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are - mutually exclusive. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName + mutually exclusive. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName :param tokenizer: The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters - are mutually exclusive. Possible values include: 'classic', 'edgeNGram', 'keyword_v2', - 'letter', 'lowercase', 'microsoft_language_tokenizer', 'microsoft_language_stemming_tokenizer', - 'nGram', 'path_hierarchy_v2', 'pattern', 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + are mutually exclusive. Possible values include: "classic", "edgeNGram", "keyword_v2", + "letter", "lowercase", "microsoft_language_tokenizer", "microsoft_language_stemming_tokenizer", + "nGram", "path_hierarchy_v2", "pattern", "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. :type char_filters: list[str] @@ -138,8 +117,8 @@ def __init__( self, *, text: str, - analyzer: Optional[Union[str, "AnalyzerName"]] = None, - tokenizer: Optional[Union[str, "TokenizerName"]] = None, + analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + tokenizer: Optional[Union[str, "LexicalTokenizerName"]] = None, token_filters: Optional[List[Union[str, "TokenFilterName"]]] = None, char_filters: Optional[List[str]] = None, **kwargs @@ -158,7 +137,7 @@ class AnalyzeResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param tokens: Required. The list of tokens returned by the analyzer specified in the request. - :type tokens: list[~search_service_client.models.TokenInfo] + :type tokens: list[~azure.search.documents.models.AnalyzedTokenInfo] """ _validation = { @@ -166,13 +145,13 @@ class AnalyzeResult(msrest.serialization.Model): } _attribute_map = { - 'tokens': {'key': 'tokens', 'type': '[TokenInfo]'}, + 'tokens': {'key': 'tokens', 'type': '[AnalyzedTokenInfo]'}, } def __init__( self, *, - tokens: List["TokenInfo"], + tokens: List["AnalyzedTokenInfo"], **kwargs ): super(AnalyzeResult, self).__init__(**kwargs) @@ -217,7 +196,7 @@ def __init__( **kwargs ): super(TokenFilter, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name @@ -257,7 +236,7 @@ def __init__( **kwargs ): super(AsciiFoldingTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.AsciiFoldingTokenFilter' self.preserve_original = preserve_original @@ -295,6 +274,78 @@ def __init__( self.application_secret = application_secret +class Similarity(msrest.serialization.Model): + """Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: BM25Similarity, ClassicSimilarity. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.BM25Similarity': 'BM25Similarity', '#Microsoft.Azure.Search.ClassicSimilarity': 'ClassicSimilarity'} + } + + def __init__( + self, + **kwargs + ): + super(Similarity, self).__init__(**kwargs) + self.odata_type: Optional[str] = None + + +class BM25Similarity(Similarity): + """Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + :param k1: This property controls the scaling function between the term frequency of each + matching terms and the final relevance score of a document-query pair. By default, a value of + 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. + :type k1: float + :param b: This property controls how the length of a document affects the relevance score. By + default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, + while a value of 1.0 means the score is fully normalized by the length of the document. + :type b: float + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'k1': {'key': 'k1', 'type': 'float'}, + 'b': {'key': 'b', 'type': 'float'}, + } + + def __init__( + self, + *, + k1: Optional[float] = None, + b: Optional[float] = None, + **kwargs + ): + super(BM25Similarity, self).__init__(**kwargs) + self.odata_type: str = '#Microsoft.Azure.Search.BM25Similarity' + self.k1 = k1 + self.b = b + + class CharFilter(msrest.serialization.Model): """Base type for character filters. @@ -333,12 +384,12 @@ def __init__( **kwargs ): super(CharFilter, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.name = name class CjkBigramTokenFilter(TokenFilter): - """Forms bigrams of CJK terms that are generated from StandardTokenizer. This token filter is implemented using Apache Lucene. + """Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -350,7 +401,7 @@ class CjkBigramTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param ignore_scripts: The scripts to ignore. - :type ignore_scripts: list[str or ~search_service_client.models.CjkBigramTokenFilterScripts] + :type ignore_scripts: list[str or ~azure.search.documents.models.CjkBigramTokenFilterScripts] :param output_unigrams: A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. :type output_unigrams: bool @@ -377,16 +428,41 @@ def __init__( **kwargs ): super(CjkBigramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CjkBigramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.CjkBigramTokenFilter' self.ignore_scripts = ignore_scripts self.output_unigrams = output_unigrams -class Tokenizer(msrest.serialization.Model): +class ClassicSimilarity(Similarity): + """Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Constant filled by server. + :type odata_type: str + """ + + _validation = { + 'odata_type': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(ClassicSimilarity, self).__init__(**kwargs) + self.odata_type: str = '#Microsoft.Azure.Search.ClassicSimilarity' + + +class LexicalTokenizer(msrest.serialization.Model): """Base type for tokenizers. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, StandardTokenizer, StandardTokenizerV2, UaxUrlEmailTokenizer. + sub-classes are: ClassicTokenizer, EdgeNGramTokenizer, KeywordTokenizer, KeywordTokenizerV2, MicrosoftLanguageStemmingTokenizer, MicrosoftLanguageTokenizer, NGramTokenizer, PathHierarchyTokenizerV2, PatternTokenizer, LuceneStandardTokenizer, LuceneStandardTokenizerV2, UaxUrlEmailTokenizer. All required parameters must be populated in order to send to Azure. @@ -410,7 +486,7 @@ class Tokenizer(msrest.serialization.Model): } _subtype_map = { - 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'StandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'StandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} + 'odata_type': {'#Microsoft.Azure.Search.ClassicTokenizer': 'ClassicTokenizer', '#Microsoft.Azure.Search.EdgeNGramTokenizer': 'EdgeNGramTokenizer', '#Microsoft.Azure.Search.KeywordTokenizer': 'KeywordTokenizer', '#Microsoft.Azure.Search.KeywordTokenizerV2': 'KeywordTokenizerV2', '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer': 'MicrosoftLanguageStemmingTokenizer', '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer': 'MicrosoftLanguageTokenizer', '#Microsoft.Azure.Search.NGramTokenizer': 'NGramTokenizer', '#Microsoft.Azure.Search.PathHierarchyTokenizerV2': 'PathHierarchyTokenizerV2', '#Microsoft.Azure.Search.PatternTokenizer': 'PatternTokenizer', '#Microsoft.Azure.Search.StandardTokenizer': 'LuceneStandardTokenizer', '#Microsoft.Azure.Search.StandardTokenizerV2': 'LuceneStandardTokenizerV2', '#Microsoft.Azure.Search.UaxUrlEmailTokenizer': 'UaxUrlEmailTokenizer'} } def __init__( @@ -419,12 +495,12 @@ def __init__( name: str, **kwargs ): - super(Tokenizer, self).__init__(**kwargs) - self.odata_type = None + super(LexicalTokenizer, self).__init__(**kwargs) + self.odata_type: Optional[str] = None self.name = name -class ClassicTokenizer(Tokenizer): +class ClassicTokenizer(LexicalTokenizer): """Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -461,7 +537,7 @@ def __init__( **kwargs ): super(ClassicTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.ClassicTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.ClassicTokenizer' self.max_token_length = max_token_length @@ -500,7 +576,7 @@ def __init__( **kwargs ): super(CognitiveServicesAccount, self).__init__(**kwargs) - self.odata_type = None + self.odata_type: Optional[str] = None self.description = description @@ -538,7 +614,7 @@ def __init__( **kwargs ): super(CognitiveServicesAccountKey, self).__init__(description=description, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CognitiveServicesByKey' + self.odata_type: str = '#Microsoft.Azure.Search.CognitiveServicesByKey' self.key = key @@ -589,13 +665,13 @@ def __init__( **kwargs ): super(CommonGramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CommonGramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.CommonGramTokenFilter' self.common_words = common_words self.ignore_case = ignore_case self.use_query_mode = use_query_mode -class Skill(msrest.serialization.Model): +class SearchIndexerSkill(msrest.serialization.Model): """Base type for skills. You probably want to use the sub-classes and not this class directly. Known @@ -618,10 +694,10 @@ class Skill(msrest.serialization.Model): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -653,8 +729,8 @@ def __init__( context: Optional[str] = None, **kwargs ): - super(Skill, self).__init__(**kwargs) - self.odata_type = None + super(SearchIndexerSkill, self).__init__(**kwargs) + self.odata_type: Optional[str] = None self.name = name self.description = description self.context = context @@ -662,7 +738,7 @@ def __init__( self.outputs = outputs -class ConditionalSkill(Skill): +class ConditionalSkill(SearchIndexerSkill): """A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. All required parameters must be populated in order to send to Azure. @@ -682,10 +758,10 @@ class ConditionalSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -714,7 +790,7 @@ def __init__( **kwargs ): super(ConditionalSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Util.ConditionalSkill' + self.odata_type: str = '#Microsoft.Skills.Util.ConditionalSkill' class CorsOptions(msrest.serialization.Model): @@ -752,7 +828,49 @@ def __init__( self.max_age_in_seconds = max_age_in_seconds -class CustomAnalyzer(Analyzer): +class LexicalAnalyzer(msrest.serialization.Model): + """Base type for analyzers. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CustomAnalyzer, PatternAnalyzer, LuceneStandardAnalyzer, StopAnalyzer. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + } + + _subtype_map = { + 'odata_type': {'#Microsoft.Azure.Search.CustomAnalyzer': 'CustomAnalyzer', '#Microsoft.Azure.Search.PatternAnalyzer': 'PatternAnalyzer', '#Microsoft.Azure.Search.StandardAnalyzer': 'LuceneStandardAnalyzer', '#Microsoft.Azure.Search.StopAnalyzer': 'StopAnalyzer'} + } + + def __init__( + self, + *, + name: str, + **kwargs + ): + super(LexicalAnalyzer, self).__init__(**kwargs) + self.odata_type: Optional[str] = None + self.name = name + + +class CustomAnalyzer(LexicalAnalyzer): """Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. All required parameters must be populated in order to send to Azure. @@ -765,15 +883,15 @@ class CustomAnalyzer(Analyzer): 128 characters. :type name: str :param tokenizer: Required. The name of the tokenizer to use to divide continuous text into a - sequence of tokens, such as breaking a sentence into words. Possible values include: 'classic', - 'edgeNGram', 'keyword_v2', 'letter', 'lowercase', 'microsoft_language_tokenizer', - 'microsoft_language_stemming_tokenizer', 'nGram', 'path_hierarchy_v2', 'pattern', - 'standard_v2', 'uax_url_email', 'whitespace'. - :type tokenizer: str or ~search_service_client.models.TokenizerName + sequence of tokens, such as breaking a sentence into words. Possible values include: "classic", + "edgeNGram", "keyword_v2", "letter", "lowercase", "microsoft_language_tokenizer", + "microsoft_language_stemming_tokenizer", "nGram", "path_hierarchy_v2", "pattern", + "standard_v2", "uax_url_email", "whitespace". + :type tokenizer: str or ~azure.search.documents.models.LexicalTokenizerName :param token_filters: A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. - :type token_filters: list[str or ~search_service_client.models.TokenFilterName] + :type token_filters: list[str or ~azure.search.documents.models.TokenFilterName] :param char_filters: A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. @@ -798,13 +916,13 @@ def __init__( self, *, name: str, - tokenizer: Union[str, "TokenizerName"], + tokenizer: Union[str, "LexicalTokenizerName"], token_filters: Optional[List[Union[str, "TokenFilterName"]]] = None, char_filters: Optional[List[str]] = None, **kwargs ): super(CustomAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.CustomAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.CustomAnalyzer' self.tokenizer = tokenizer self.token_filters = token_filters self.char_filters = char_filters @@ -840,41 +958,7 @@ def __init__( **kwargs ): super(DataChangeDetectionPolicy, self).__init__(**kwargs) - self.odata_type = None - - -class DataContainer(msrest.serialization.Model): - """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the table or view (for Azure SQL data source) or collection - (for CosmosDB data source) that will be indexed. - :type name: str - :param query: A query that is applied to this data container. The syntax and meaning of this - parameter is datasource-specific. Not supported by Azure SQL datasources. - :type query: str - """ - - _validation = { - 'name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'query': {'key': 'query', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - query: Optional[str] = None, - **kwargs - ): - super(DataContainer, self).__init__(**kwargs) - self.name = name - self.query = query + self.odata_type: Optional[str] = None class DataDeletionDetectionPolicy(msrest.serialization.Model): @@ -907,73 +991,7 @@ def __init__( **kwargs ): super(DataDeletionDetectionPolicy, self).__init__(**kwargs) - self.odata_type = None - - -class DataSource(msrest.serialization.Model): - """Represents a datasource definition, which can be used to configure an indexer. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the datasource. - :type name: str - :param description: The description of the datasource. - :type description: str - :param type: Required. The type of the datasource. Possible values include: 'azuresql', - 'cosmosdb', 'azureblob', 'azuretable', 'mysql'. - :type type: str or ~search_service_client.models.DataSourceType - :param credentials: Required. Credentials for the datasource. - :type credentials: ~search_service_client.models.DataSourceCredentials - :param container: Required. The data container for the datasource. - :type container: ~search_service_client.models.DataContainer - :param data_change_detection_policy: The data change detection policy for the datasource. - :type data_change_detection_policy: ~search_service_client.models.DataChangeDetectionPolicy - :param data_deletion_detection_policy: The data deletion detection policy for the datasource. - :type data_deletion_detection_policy: ~search_service_client.models.DataDeletionDetectionPolicy - :param e_tag: The ETag of the DataSource. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - 'credentials': {'required': True}, - 'container': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, - 'container': {'key': 'container', 'type': 'DataContainer'}, - 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, - 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "DataSourceType"], - credentials: "DataSourceCredentials", - container: "DataContainer", - description: Optional[str] = None, - data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, - data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(DataSource, self).__init__(**kwargs) - self.name = name - self.description = description - self.type = type - self.credentials = credentials - self.container = container - self.data_change_detection_policy = data_change_detection_policy - self.data_deletion_detection_policy = data_deletion_detection_policy - self.e_tag = e_tag + self.odata_type: Optional[str] = None class DataSourceCredentials(msrest.serialization.Model): @@ -1025,7 +1043,7 @@ def __init__( **kwargs ): super(DefaultCognitiveServicesAccount, self).__init__(description=description, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.DefaultCognitiveServices' + self.odata_type: str = '#Microsoft.Azure.Search.DefaultCognitiveServices' class DictionaryDecompounderTokenFilter(TokenFilter): @@ -1087,7 +1105,7 @@ def __init__( **kwargs ): super(DictionaryDecompounderTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter' self.word_list = word_list self.min_word_size = min_word_size self.min_subword_size = min_subword_size @@ -1112,9 +1130,9 @@ class ScoringFunction(msrest.serialization.Model): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation """ _validation = { @@ -1143,7 +1161,7 @@ def __init__( **kwargs ): super(ScoringFunction, self).__init__(**kwargs) - self.type = None + self.type: Optional[str] = None self.field_name = field_name self.boost = boost self.interpolation = interpolation @@ -1163,11 +1181,11 @@ class DistanceScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the distance scoring function. - :type parameters: ~search_service_client.models.DistanceScoringParameters + :type parameters: ~azure.search.documents.models.DistanceScoringParameters """ _validation = { @@ -1195,7 +1213,7 @@ def __init__( **kwargs ): super(DistanceScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'distance' + self.type: str = 'distance' self.parameters = parameters @@ -1252,8 +1270,8 @@ class EdgeNGramTokenFilter(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1279,7 +1297,7 @@ def __init__( **kwargs ): super(EdgeNGramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.EdgeNGramTokenFilter' self.min_gram = min_gram self.max_gram = max_gram self.side = side @@ -1303,8 +1321,8 @@ class EdgeNGramTokenFilterV2(TokenFilter): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param side: Specifies which side of the input the n-gram should be generated from. Default is - "front". Possible values include: 'front', 'back'. - :type side: str or ~search_service_client.models.EdgeNGramTokenFilterSide + "front". Possible values include: "front", "back". + :type side: str or ~azure.search.documents.models.EdgeNGramTokenFilterSide """ _validation = { @@ -1332,13 +1350,13 @@ def __init__( **kwargs ): super(EdgeNGramTokenFilterV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' + self.odata_type: str = '#Microsoft.Azure.Search.EdgeNGramTokenFilterV2' self.min_gram = min_gram self.max_gram = max_gram self.side = side -class EdgeNGramTokenizer(Tokenizer): +class EdgeNGramTokenizer(LexicalTokenizer): """Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -1356,7 +1374,7 @@ class EdgeNGramTokenizer(Tokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -1384,7 +1402,7 @@ def __init__( **kwargs ): super(EdgeNGramTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.EdgeNGramTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.EdgeNGramTokenizer' self.min_gram = min_gram self.max_gram = max_gram self.token_chars = token_chars @@ -1425,62 +1443,12 @@ def __init__( **kwargs ): super(ElisionTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.ElisionTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.ElisionTokenFilter' self.articles = articles -class EncryptionKey(msrest.serialization.Model): - """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. - - All required parameters must be populated in order to send to Azure. - - :param key_vault_key_name: Required. The name of your Azure Key Vault key to be used to encrypt - your data at rest. - :type key_vault_key_name: str - :param key_vault_key_version: Required. The version of your Azure Key Vault key to be used to - encrypt your data at rest. - :type key_vault_key_version: str - :param key_vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, - that contains the key to be used to encrypt your data at rest. An example URI might be - https://my-keyvault-name.vault.azure.net. - :type key_vault_uri: str - :param access_credentials: Optional Azure Active Directory credentials used for accessing your - Azure Key Vault. Not required if using managed identity instead. - :type access_credentials: - ~search_service_client.models.AzureActiveDirectoryApplicationCredentials - """ - - _validation = { - 'key_vault_key_name': {'required': True}, - 'key_vault_key_version': {'required': True}, - 'key_vault_uri': {'required': True}, - } - - _attribute_map = { - 'key_vault_key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, - 'key_vault_key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, - 'key_vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, - 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, - } - - def __init__( - self, - *, - key_vault_key_name: str, - key_vault_key_version: str, - key_vault_uri: str, - access_credentials: Optional["AzureActiveDirectoryApplicationCredentials"] = None, - **kwargs - ): - super(EncryptionKey, self).__init__(**kwargs) - self.key_vault_key_name = key_vault_key_name - self.key_vault_key_version = key_vault_key_version - self.key_vault_uri = key_vault_uri - self.access_credentials = access_credentials - - -class EntityRecognitionSkill(Skill): - """Text analytics entity recognition. +class EntityRecognitionSkill(SearchIndexerSkill): + """Text analytics entity recognition. All required parameters must be populated in order to send to Azure. @@ -1499,17 +1467,17 @@ class EntityRecognitionSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param categories: A list of entity categories that should be extracted. - :type categories: list[str or ~search_service_client.models.EntityCategory] + :type categories: list[str or ~azure.search.documents.models.EntityCategory] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'ar', 'cs', 'zh-Hans', 'zh-Hant', 'da', 'nl', 'en', 'fi', 'fr', 'de', - 'el', 'hu', 'it', 'ja', 'ko', 'no', 'pl', 'pt-PT', 'pt-BR', 'ru', 'es', 'sv', 'tr'. + Possible values include: "ar", "cs", "zh-Hans", "zh-Hant", "da", "nl", "en", "fi", "fr", "de", + "el", "hu", "it", "ja", "ko", "no", "pl", "pt-PT", "pt-BR", "ru", "es", "sv", "tr". :type default_language_code: str or - ~search_service_client.models.EntityRecognitionSkillLanguage + ~azure.search.documents.models.EntityRecognitionSkillLanguage :param include_typeless_entities: Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not @@ -1555,205 +1523,13 @@ def __init__( **kwargs ): super(EntityRecognitionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.EntityRecognitionSkill' + self.odata_type: str = '#Microsoft.Skills.Text.EntityRecognitionSkill' self.categories = categories self.default_language_code = default_language_code self.include_typeless_entities = include_typeless_entities self.minimum_precision = minimum_precision -class Field(msrest.serialization.Model): - """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the field, which must be unique within the fields collection - of the index or parent field. - :type name: str - :param type: Required. The data type of the field. Possible values include: 'Edm.String', - 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean', 'Edm.DateTimeOffset', - 'Edm.GeographyPoint', 'Edm.ComplexType'. - :type type: str or ~search_service_client.models.DataType - :param key: A value indicating whether the field uniquely identifies documents in the index. - Exactly one top-level field in each index must be chosen as the key field and it must be of - type Edm.String. Key fields can be used to look up documents directly and update or delete - specific documents. Default is false for simple fields and null for complex fields. - :type key: bool - :param retrievable: A value indicating whether the field can be returned in a search result. - You can disable this option if you want to use a field (for example, margin) as a filter, - sorting, or scoring mechanism but do not want the field to be visible to the end user. This - property must be true for key fields, and it must be null for complex fields. This property can - be changed on existing fields. Enabling this property does not cause any increase in index - storage requirements. Default is true for simple fields and null for complex fields. - :type retrievable: bool - :param searchable: A value indicating whether the field is full-text searchable. This means it - will undergo analysis such as word-breaking during indexing. If you set a searchable field to a - value like "sunny day", internally it will be split into the individual tokens "sunny" and - "day". This enables full-text searches for these terms. Fields of type Edm.String or - Collection(Edm.String) are searchable by default. This property must be false for simple fields - of other non-string data types, and it must be null for complex fields. Note: searchable fields - consume extra space in your index since Azure Cognitive Search will store an additional - tokenized version of the field value for full-text searches. If you want to save space in your - index and you don't need a field to be included in searches, set searchable to false. - :type searchable: bool - :param filterable: A value indicating whether to enable the field to be referenced in $filter - queries. filterable differs from searchable in how strings are handled. Fields of type - Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so - comparisons are for exact matches only. For example, if you set such a field f to "sunny day", - $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property - must be null for complex fields. Default is true for simple fields and null for complex fields. - :type filterable: bool - :param sortable: A value indicating whether to enable the field to be referenced in $orderby - expressions. By default Azure Cognitive Search sorts results by score, but in many experiences - users will want to sort by fields in the documents. A simple field can be sortable only if it - is single-valued (it has a single value in the scope of the parent document). Simple collection - fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex - collections are also multi-valued, and therefore cannot be sortable. This is true whether it's - an immediate parent field, or an ancestor field, that's the complex collection. Complex fields - cannot be sortable and the sortable property must be null for such fields. The default for - sortable is true for single-valued simple fields, false for multi-valued simple fields, and - null for complex fields. - :type sortable: bool - :param facetable: A value indicating whether to enable the field to be referenced in facet - queries. Typically used in a presentation of search results that includes hit count by category - (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so - on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or - Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple - fields. - :type facetable: bool - :param analyzer: The name of the analyzer to use for the field. This option can be used only - with searchable fields and it can't be set together with either searchAnalyzer or - indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null - for complex fields. Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', - 'bn.microsoft', 'eu.lucene', 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh- - Hans.microsoft', 'zh-Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', - 'cs.microsoft', 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', - 'en.microsoft', 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', - 'fr.lucene', 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', - 'gu.microsoft', 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', - 'is.microsoft', 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', - 'ja.microsoft', 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', - 'lv.lucene', 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', - 'no.lucene', 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt- - PT.microsoft', 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', - 'ru.lucene', 'sr-cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', - 'es.microsoft', 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', - 'th.microsoft', 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', - 'vi.microsoft', 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', - 'simple', 'stop', 'whitespace'. - :type analyzer: str or ~search_service_client.models.AnalyzerName - :param search_analyzer: The name of the analyzer used at search time for the field. This option - can be used only with searchable fields. It must be set together with indexAnalyzer and it - cannot be set together with the analyzer option. This property cannot be set to the name of a - language analyzer; use the analyzer property instead if you need a language analyzer. This - analyzer can be updated on an existing field. Must be null for complex fields. Possible values - include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', 'bg.microsoft', - 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh-Hans.lucene', 'zh- - Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', 'cs.lucene', 'da.microsoft', - 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', 'en.lucene', 'et.microsoft', - 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', 'gl.lucene', 'de.microsoft', - 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', 'he.microsoft', 'hi.microsoft', - 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', 'id.microsoft', 'id.lucene', - 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', 'ja.lucene', 'kn.microsoft', - 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', 'lt.microsoft', 'ml.microsoft', - 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', 'fa.lucene', 'pl.microsoft', - 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', 'pt-PT.lucene', - 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type search_analyzer: str or ~search_service_client.models.AnalyzerName - :param index_analyzer: The name of the analyzer used at indexing time for the field. This - option can be used only with searchable fields. It must be set together with searchAnalyzer and - it cannot be set together with the analyzer option. This property cannot be set to the name of - a language analyzer; use the analyzer property instead if you need a language analyzer. Once - the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. - Possible values include: 'ar.microsoft', 'ar.lucene', 'hy.lucene', 'bn.microsoft', 'eu.lucene', - 'bg.microsoft', 'bg.lucene', 'ca.microsoft', 'ca.lucene', 'zh-Hans.microsoft', 'zh- - Hans.lucene', 'zh-Hant.microsoft', 'zh-Hant.lucene', 'hr.microsoft', 'cs.microsoft', - 'cs.lucene', 'da.microsoft', 'da.lucene', 'nl.microsoft', 'nl.lucene', 'en.microsoft', - 'en.lucene', 'et.microsoft', 'fi.microsoft', 'fi.lucene', 'fr.microsoft', 'fr.lucene', - 'gl.lucene', 'de.microsoft', 'de.lucene', 'el.microsoft', 'el.lucene', 'gu.microsoft', - 'he.microsoft', 'hi.microsoft', 'hi.lucene', 'hu.microsoft', 'hu.lucene', 'is.microsoft', - 'id.microsoft', 'id.lucene', 'ga.lucene', 'it.microsoft', 'it.lucene', 'ja.microsoft', - 'ja.lucene', 'kn.microsoft', 'ko.microsoft', 'ko.lucene', 'lv.microsoft', 'lv.lucene', - 'lt.microsoft', 'ml.microsoft', 'ms.microsoft', 'mr.microsoft', 'nb.microsoft', 'no.lucene', - 'fa.lucene', 'pl.microsoft', 'pl.lucene', 'pt-BR.microsoft', 'pt-BR.lucene', 'pt-PT.microsoft', - 'pt-PT.lucene', 'pa.microsoft', 'ro.microsoft', 'ro.lucene', 'ru.microsoft', 'ru.lucene', 'sr- - cyrillic.microsoft', 'sr-latin.microsoft', 'sk.microsoft', 'sl.microsoft', 'es.microsoft', - 'es.lucene', 'sv.microsoft', 'sv.lucene', 'ta.microsoft', 'te.microsoft', 'th.microsoft', - 'th.lucene', 'tr.microsoft', 'tr.lucene', 'uk.microsoft', 'ur.microsoft', 'vi.microsoft', - 'standard.lucene', 'standardasciifolding.lucene', 'keyword', 'pattern', 'simple', 'stop', - 'whitespace'. - :type index_analyzer: str or ~search_service_client.models.AnalyzerName - :param synonym_maps: A list of the names of synonym maps to associate with this field. This - option can be used only with searchable fields. Currently only one synonym map per field is - supported. Assigning a synonym map to a field ensures that query terms targeting that field are - expanded at query-time using the rules in the synonym map. This attribute can be changed on - existing fields. Must be null or an empty collection for complex fields. - :type synonym_maps: list[str] - :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or - Collection(Edm.ComplexType). Must be null or empty for simple fields. - :type fields: list[~search_service_client.models.Field] - """ - - _validation = { - 'name': {'required': True}, - 'type': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'type': {'key': 'type', 'type': 'str'}, - 'key': {'key': 'key', 'type': 'bool'}, - 'retrievable': {'key': 'retrievable', 'type': 'bool'}, - 'searchable': {'key': 'searchable', 'type': 'bool'}, - 'filterable': {'key': 'filterable', 'type': 'bool'}, - 'sortable': {'key': 'sortable', 'type': 'bool'}, - 'facetable': {'key': 'facetable', 'type': 'bool'}, - 'analyzer': {'key': 'analyzer', 'type': 'str'}, - 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, - 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, - 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - } - - def __init__( - self, - *, - name: str, - type: Union[str, "DataType"], - key: Optional[bool] = None, - retrievable: Optional[bool] = None, - searchable: Optional[bool] = None, - filterable: Optional[bool] = None, - sortable: Optional[bool] = None, - facetable: Optional[bool] = None, - analyzer: Optional[Union[str, "AnalyzerName"]] = None, - search_analyzer: Optional[Union[str, "AnalyzerName"]] = None, - index_analyzer: Optional[Union[str, "AnalyzerName"]] = None, - synonym_maps: Optional[List[str]] = None, - fields: Optional[List["Field"]] = None, - **kwargs - ): - super(Field, self).__init__(**kwargs) - self.name = name - self.type = type - self.key = key - self.retrievable = retrievable - self.searchable = searchable - self.filterable = filterable - self.sortable = sortable - self.facetable = facetable - self.analyzer = analyzer - self.search_analyzer = search_analyzer - self.index_analyzer = index_analyzer - self.synonym_maps = synonym_maps - self.fields = fields - - class FieldMapping(msrest.serialization.Model): """Defines a mapping between a field in a data source and a target field in an index. @@ -1765,7 +1541,7 @@ class FieldMapping(msrest.serialization.Model): name by default. :type target_field_name: str :param mapping_function: A function to apply to each source field value before indexing. - :type mapping_function: ~search_service_client.models.FieldMappingFunction + :type mapping_function: ~azure.search.documents.models.FieldMappingFunction """ _validation = { @@ -1839,11 +1615,11 @@ class FreshnessScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the freshness scoring function. - :type parameters: ~search_service_client.models.FreshnessScoringParameters + :type parameters: ~azure.search.documents.models.FreshnessScoringParameters """ _validation = { @@ -1871,7 +1647,7 @@ def __init__( **kwargs ): super(FreshnessScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'freshness' + self.type: str = 'freshness' self.parameters = parameters @@ -1964,11 +1740,11 @@ def __init__( **kwargs ): super(HighWaterMarkChangeDetectionPolicy, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' + self.odata_type: str = '#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy' self.high_water_mark_column_name = high_water_mark_column_name -class ImageAnalysisSkill(Skill): +class ImageAnalysisSkill(SearchIndexerSkill): """A skill that analyzes image files. It extracts a rich set of visual features based on the image content. All required parameters must be populated in order to send to Azure. @@ -1988,17 +1764,17 @@ class ImageAnalysisSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'en', 'es', 'ja', 'pt', 'zh'. - :type default_language_code: str or ~search_service_client.models.ImageAnalysisSkillLanguage + Possible values include: "en", "es", "ja", "pt", "zh". + :type default_language_code: str or ~azure.search.documents.models.ImageAnalysisSkillLanguage :param visual_features: A list of visual features. - :type visual_features: list[str or ~search_service_client.models.VisualFeature] + :type visual_features: list[str or ~azure.search.documents.models.VisualFeature] :param details: A string indicating which domain-specific details to return. - :type details: list[str or ~search_service_client.models.ImageDetail] + :type details: list[str or ~azure.search.documents.models.ImageDetail] """ _validation = { @@ -2033,346 +1809,90 @@ def __init__( **kwargs ): super(ImageAnalysisSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Vision.ImageAnalysisSkill' + self.odata_type: str = '#Microsoft.Skills.Vision.ImageAnalysisSkill' self.default_language_code = default_language_code self.visual_features = visual_features self.details = details -class Index(msrest.serialization.Model): - """Represents a search index definition, which describes the fields and search behavior of an index. +class IndexerExecutionResult(msrest.serialization.Model): + """Represents the result of an individual indexer execution. + + Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. - :param name: Required. The name of the index. - :type name: str - :param fields: Required. The fields of the index. - :type fields: list[~search_service_client.models.Field] - :param scoring_profiles: The scoring profiles for the index. - :type scoring_profiles: list[~search_service_client.models.ScoringProfile] - :param default_scoring_profile: The name of the scoring profile to use if none is specified in - the query. If this property is not set and no scoring profile is specified in the query, then - default scoring (tf-idf) will be used. - :type default_scoring_profile: str - :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. - :type cors_options: ~search_service_client.models.CorsOptions - :param suggesters: The suggesters for the index. - :type suggesters: list[~search_service_client.models.Suggester] - :param analyzers: The analyzers for the index. - :type analyzers: list[~search_service_client.models.Analyzer] - :param tokenizers: The tokenizers for the index. - :type tokenizers: list[~search_service_client.models.Tokenizer] - :param token_filters: The token filters for the index. - :type token_filters: list[~search_service_client.models.TokenFilter] - :param char_filters: The character filters for the index. - :type char_filters: list[~search_service_client.models.CharFilter] - :param encryption_key: A description of an encryption key that you create in Azure Key Vault. - This key is used to provide an additional level of encryption-at-rest for your data when you - want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive - Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive - Search will ignore attempts to set this property to null. You can change this property as - needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with - customer-managed keys is not available for free search services, and is only available for paid - services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey - :param e_tag: The ETag of the index. - :type e_tag: str + :ivar status: Required. The outcome of this indexer execution. Possible values include: + "transientFailure", "success", "inProgress", "reset". + :vartype status: str or ~azure.search.documents.models.IndexerExecutionStatus + :ivar error_message: The error message indicating the top-level error, if any. + :vartype error_message: str + :ivar start_time: The start time of this indexer execution. + :vartype start_time: ~datetime.datetime + :ivar end_time: The end time of this indexer execution, if the execution has already completed. + :vartype end_time: ~datetime.datetime + :ivar errors: Required. The item-level indexing errors. + :vartype errors: list[~azure.search.documents.models.SearchIndexerError] + :ivar warnings: Required. The item-level indexing warnings. + :vartype warnings: list[~azure.search.documents.models.SearchIndexerWarning] + :ivar item_count: Required. The number of items that were processed during this indexer + execution. This includes both successfully processed items and items where indexing was + attempted but failed. + :vartype item_count: int + :ivar failed_item_count: Required. The number of items that failed to be indexed during this + indexer execution. + :vartype failed_item_count: int + :ivar initial_tracking_state: Change tracking state with which an indexer execution started. + :vartype initial_tracking_state: str + :ivar final_tracking_state: Change tracking state with which an indexer execution finished. + :vartype final_tracking_state: str """ _validation = { - 'name': {'required': True}, - 'fields': {'required': True}, + 'status': {'required': True, 'readonly': True}, + 'error_message': {'readonly': True}, + 'start_time': {'readonly': True}, + 'end_time': {'readonly': True}, + 'errors': {'required': True, 'readonly': True}, + 'warnings': {'required': True, 'readonly': True}, + 'item_count': {'required': True, 'readonly': True}, + 'failed_item_count': {'required': True, 'readonly': True}, + 'initial_tracking_state': {'readonly': True}, + 'final_tracking_state': {'readonly': True}, } _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'fields': {'key': 'fields', 'type': '[Field]'}, - 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, - 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, - 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, - 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, - 'analyzers': {'key': 'analyzers', 'type': '[Analyzer]'}, - 'tokenizers': {'key': 'tokenizers', 'type': '[Tokenizer]'}, - 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, - 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + 'status': {'key': 'status', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, + 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, + 'errors': {'key': 'errors', 'type': '[SearchIndexerError]'}, + 'warnings': {'key': 'warnings', 'type': '[SearchIndexerWarning]'}, + 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, + 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, + 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, + 'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'}, } def __init__( self, - *, - name: str, - fields: List["Field"], - scoring_profiles: Optional[List["ScoringProfile"]] = None, - default_scoring_profile: Optional[str] = None, - cors_options: Optional["CorsOptions"] = None, - suggesters: Optional[List["Suggester"]] = None, - analyzers: Optional[List["Analyzer"]] = None, - tokenizers: Optional[List["Tokenizer"]] = None, - token_filters: Optional[List["TokenFilter"]] = None, - char_filters: Optional[List["CharFilter"]] = None, - encryption_key: Optional["EncryptionKey"] = None, - e_tag: Optional[str] = None, **kwargs ): - super(Index, self).__init__(**kwargs) - self.name = name - self.fields = fields - self.scoring_profiles = scoring_profiles - self.default_scoring_profile = default_scoring_profile - self.cors_options = cors_options - self.suggesters = suggesters - self.analyzers = analyzers - self.tokenizers = tokenizers - self.token_filters = token_filters - self.char_filters = char_filters - self.encryption_key = encryption_key - self.e_tag = e_tag - + super(IndexerExecutionResult, self).__init__(**kwargs) + self.status = None + self.error_message = None + self.start_time = None + self.end_time = None + self.errors = None + self.warnings = None + self.item_count = None + self.failed_item_count = None + self.initial_tracking_state = None + self.final_tracking_state = None -class Indexer(msrest.serialization.Model): - """Represents an indexer. - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the indexer. - :type name: str - :param description: The description of the indexer. - :type description: str - :param data_source_name: Required. The name of the datasource from which this indexer reads - data. - :type data_source_name: str - :param skillset_name: The name of the skillset executing with this indexer. - :type skillset_name: str - :param target_index_name: Required. The name of the index to which this indexer writes data. - :type target_index_name: str - :param schedule: The schedule for this indexer. - :type schedule: ~search_service_client.models.IndexingSchedule - :param parameters: Parameters for indexer execution. - :type parameters: ~search_service_client.models.IndexingParameters - :param field_mappings: Defines mappings between fields in the data source and corresponding - target fields in the index. - :type field_mappings: list[~search_service_client.models.FieldMapping] - :param output_field_mappings: Output field mappings are applied after enrichment and - immediately before indexing. - :type output_field_mappings: list[~search_service_client.models.FieldMapping] - :param is_disabled: A value indicating whether the indexer is disabled. Default is false. - :type is_disabled: bool - :param e_tag: The ETag of the Indexer. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'data_source_name': {'required': True}, - 'target_index_name': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, - 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, - 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, - 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, - 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, - 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, - 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, - 'is_disabled': {'key': 'disabled', 'type': 'bool'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - data_source_name: str, - target_index_name: str, - description: Optional[str] = None, - skillset_name: Optional[str] = None, - schedule: Optional["IndexingSchedule"] = None, - parameters: Optional["IndexingParameters"] = None, - field_mappings: Optional[List["FieldMapping"]] = None, - output_field_mappings: Optional[List["FieldMapping"]] = None, - is_disabled: Optional[bool] = False, - e_tag: Optional[str] = None, - **kwargs - ): - super(Indexer, self).__init__(**kwargs) - self.name = name - self.description = description - self.data_source_name = data_source_name - self.skillset_name = skillset_name - self.target_index_name = target_index_name - self.schedule = schedule - self.parameters = parameters - self.field_mappings = field_mappings - self.output_field_mappings = output_field_mappings - self.is_disabled = is_disabled - self.e_tag = e_tag - - -class IndexerExecutionInfo(msrest.serialization.Model): - """Represents the current status and execution history of an indexer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. Overall indexer status. Possible values include: 'unknown', 'error', - 'running'. - :vartype status: str or ~search_service_client.models.IndexerStatus - :ivar last_result: The result of the most recent or an in-progress indexer execution. - :vartype last_result: ~search_service_client.models.IndexerExecutionResult - :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse - chronological order. - :vartype execution_history: list[~search_service_client.models.IndexerExecutionResult] - :ivar limits: Required. The execution limits for the indexer. - :vartype limits: ~search_service_client.models.IndexerLimits - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'last_result': {'readonly': True}, - 'execution_history': {'required': True, 'readonly': True}, - 'limits': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'str'}, - 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, - 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, - 'limits': {'key': 'limits', 'type': 'IndexerLimits'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerExecutionInfo, self).__init__(**kwargs) - self.status = None - self.last_result = None - self.execution_history = None - self.limits = None - - -class IndexerExecutionResult(msrest.serialization.Model): - """Represents the result of an individual indexer execution. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar status: Required. The outcome of this indexer execution. Possible values include: - 'transientFailure', 'success', 'inProgress', 'reset'. - :vartype status: str or ~search_service_client.models.IndexerExecutionStatus - :ivar error_message: The error message indicating the top-level error, if any. - :vartype error_message: str - :ivar start_time: The start time of this indexer execution. - :vartype start_time: ~datetime.datetime - :ivar end_time: The end time of this indexer execution, if the execution has already completed. - :vartype end_time: ~datetime.datetime - :ivar errors: Required. The item-level indexing errors. - :vartype errors: list[~search_service_client.models.ItemError] - :ivar warnings: Required. The item-level indexing warnings. - :vartype warnings: list[~search_service_client.models.ItemWarning] - :ivar item_count: Required. The number of items that were processed during this indexer - execution. This includes both successfully processed items and items where indexing was - attempted but failed. - :vartype item_count: int - :ivar failed_item_count: Required. The number of items that failed to be indexed during this - indexer execution. - :vartype failed_item_count: int - :ivar initial_tracking_state: Change tracking state with which an indexer execution started. - :vartype initial_tracking_state: str - :ivar final_tracking_state: Change tracking state with which an indexer execution finished. - :vartype final_tracking_state: str - """ - - _validation = { - 'status': {'required': True, 'readonly': True}, - 'error_message': {'readonly': True}, - 'start_time': {'readonly': True}, - 'end_time': {'readonly': True}, - 'errors': {'required': True, 'readonly': True}, - 'warnings': {'required': True, 'readonly': True}, - 'item_count': {'required': True, 'readonly': True}, - 'failed_item_count': {'required': True, 'readonly': True}, - 'initial_tracking_state': {'readonly': True}, - 'final_tracking_state': {'readonly': True}, - } - - _attribute_map = { - 'status': {'key': 'status', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, - 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, - 'errors': {'key': 'errors', 'type': '[ItemError]'}, - 'warnings': {'key': 'warnings', 'type': '[ItemWarning]'}, - 'item_count': {'key': 'itemsProcessed', 'type': 'int'}, - 'failed_item_count': {'key': 'itemsFailed', 'type': 'int'}, - 'initial_tracking_state': {'key': 'initialTrackingState', 'type': 'str'}, - 'final_tracking_state': {'key': 'finalTrackingState', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerExecutionResult, self).__init__(**kwargs) - self.status = None - self.error_message = None - self.start_time = None - self.end_time = None - self.errors = None - self.warnings = None - self.item_count = None - self.failed_item_count = None - self.initial_tracking_state = None - self.final_tracking_state = None - - -class IndexerLimits(msrest.serialization.Model): - """IndexerLimits. - - Variables are only populated by the server, and will be ignored when sending a request. - - :ivar max_run_time: The maximum duration that the indexer is permitted to run for one - execution. - :vartype max_run_time: ~datetime.timedelta - :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be - considered valid for indexing. - :vartype max_document_extraction_size: long - :ivar max_document_content_characters_to_extract: The maximum number of characters that will be - extracted from a document picked up for indexing. - :vartype max_document_content_characters_to_extract: long - """ - - _validation = { - 'max_run_time': {'readonly': True}, - 'max_document_extraction_size': {'readonly': True}, - 'max_document_content_characters_to_extract': {'readonly': True}, - } - - _attribute_map = { - 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, - 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, - 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, - } - - def __init__( - self, - **kwargs - ): - super(IndexerLimits, self).__init__(**kwargs) - self.max_run_time = None - self.max_document_extraction_size = None - self.max_document_content_characters_to_extract = None - - -class IndexingParameters(msrest.serialization.Model): - """Represents parameters for indexer execution. +class IndexingParameters(msrest.serialization.Model): + """Represents parameters for indexer execution. :param batch_size: The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. @@ -2455,7 +1975,7 @@ class InputFieldMappingEntry(msrest.serialization.Model): :param source_context: The source context used for selecting recursive inputs. :type source_context: str :param inputs: The recursive inputs used when creating a complex type. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] """ _validation = { @@ -2485,133 +2005,23 @@ def __init__( self.inputs = inputs -class ItemError(msrest.serialization.Model): - """Represents an item- or document-level indexing error. - - Variables are only populated by the server, and will be ignored when sending a request. +class KeepTokenFilter(TokenFilter): + """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. - :ivar key: The key of the item for which indexing failed. - :vartype key: str - :ivar error_message: Required. The message describing the error that occurred while processing - the item. - :vartype error_message: str - :ivar status_code: Required. The status code indicating why the indexing operation failed. - Possible values include: 400 for a malformed input document, 404 for document not found, 409 - for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the - service is too busy. - :vartype status_code: int - :ivar name: The name of the source at which the error originated. For example, this could refer - to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the error to assist in debugging the indexer. - This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - 'key': {'readonly': True}, - 'error_message': {'required': True, 'readonly': True}, - 'status_code': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, - } - - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'error_message': {'key': 'errorMessage', 'type': 'str'}, - 'status_code': {'key': 'statusCode', 'type': 'int'}, - 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemError, self).__init__(**kwargs) - self.key = None - self.error_message = None - self.status_code = None - self.name = None - self.details = None - self.documentation_link = None - - -class ItemWarning(msrest.serialization.Model): - """Represents an item-level warning. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar key: The key of the item which generated a warning. - :vartype key: str - :ivar message: Required. The message describing the warning that occurred while processing the - item. - :vartype message: str - :ivar name: The name of the source at which the warning originated. For example, this could - refer to a particular skill in the attached skillset. This may not be always available. - :vartype name: str - :ivar details: Additional, verbose details about the warning to assist in debugging the - indexer. This may not be always available. - :vartype details: str - :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This - may not be always available. - :vartype documentation_link: str - """ - - _validation = { - 'key': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'name': {'readonly': True}, - 'details': {'readonly': True}, - 'documentation_link': {'readonly': True}, - } - - _attribute_map = { - 'key': {'key': 'key', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'details': {'key': 'details', 'type': 'str'}, - 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - super(ItemWarning, self).__init__(**kwargs) - self.key = None - self.message = None - self.name = None - self.details = None - self.documentation_link = None - - -class KeepTokenFilter(TokenFilter): - """A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled - by server. - :type odata_type: str - :param name: Required. The name of the token filter. It must only contain letters, digits, - spaces, dashes or underscores, can only start and end with alphanumeric characters, and is - limited to 128 characters. - :type name: str - :param keep_words: Required. The list of words to keep. - :type keep_words: list[str] - :param lower_case_keep_words: A value indicating whether to lower case all words first. Default - is false. - :type lower_case_keep_words: bool + :param odata_type: Required. Identifies the concrete type of the token filter.Constant filled + by server. + :type odata_type: str + :param name: Required. The name of the token filter. It must only contain letters, digits, + spaces, dashes or underscores, can only start and end with alphanumeric characters, and is + limited to 128 characters. + :type name: str + :param keep_words: Required. The list of words to keep. + :type keep_words: list[str] + :param lower_case_keep_words: A value indicating whether to lower case all words first. Default + is false. + :type lower_case_keep_words: bool """ _validation = { @@ -2636,12 +2046,12 @@ def __init__( **kwargs ): super(KeepTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeepTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.KeepTokenFilter' self.keep_words = keep_words self.lower_case_keep_words = lower_case_keep_words -class KeyPhraseExtractionSkill(Skill): +class KeyPhraseExtractionSkill(SearchIndexerSkill): """A skill that uses text analytics for key phrase extraction. All required parameters must be populated in order to send to Azure. @@ -2661,15 +2071,15 @@ class KeyPhraseExtractionSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'it', 'ja', 'ko', 'no', 'pl', 'pt- - PT', 'pt-BR', 'ru', 'es', 'sv'. + Possible values include: "da", "nl", "en", "fi", "fr", "de", "it", "ja", "ko", "no", "pl", "pt- + PT", "pt-BR", "ru", "es", "sv". :type default_language_code: str or - ~search_service_client.models.KeyPhraseExtractionSkillLanguage + ~azure.search.documents.models.KeyPhraseExtractionSkillLanguage :param max_key_phrase_count: A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. :type max_key_phrase_count: int @@ -2705,7 +2115,7 @@ def __init__( **kwargs ): super(KeyPhraseExtractionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' + self.odata_type: str = '#Microsoft.Skills.Text.KeyPhraseExtractionSkill' self.default_language_code = default_language_code self.max_key_phrase_count = max_key_phrase_count @@ -2751,12 +2161,12 @@ def __init__( **kwargs ): super(KeywordMarkerTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.KeywordMarkerTokenFilter' self.keywords = keywords self.ignore_case = ignore_case -class KeywordTokenizer(Tokenizer): +class KeywordTokenizer(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2791,11 +2201,11 @@ def __init__( **kwargs ): super(KeywordTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.KeywordTokenizer' self.buffer_size = buffer_size -class KeywordTokenizerV2(Tokenizer): +class KeywordTokenizerV2(LexicalTokenizer): """Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -2832,11 +2242,11 @@ def __init__( **kwargs ): super(KeywordTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.KeywordTokenizerV2' + self.odata_type: str = '#Microsoft.Azure.Search.KeywordTokenizerV2' self.max_token_length = max_token_length -class LanguageDetectionSkill(Skill): +class LanguageDetectionSkill(SearchIndexerSkill): """A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. All required parameters must be populated in order to send to Azure. @@ -2856,10 +2266,10 @@ class LanguageDetectionSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -2888,7 +2298,7 @@ def __init__( **kwargs ): super(LanguageDetectionSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.LanguageDetectionSkill' + self.odata_type: str = '#Microsoft.Skills.Text.LanguageDetectionSkill' class LengthTokenFilter(TokenFilter): @@ -2903,39 +2313,39 @@ class LengthTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param min: The minimum length in characters. Default is 0. Maximum is 300. Must be less than - the value of max. - :type min: int - :param max: The maximum length in characters. Default and maximum is 300. - :type max: int + :param min_length: The minimum length in characters. Default is 0. Maximum is 300. Must be less + than the value of max. + :type min_length: int + :param max_length: The maximum length in characters. Default and maximum is 300. + :type max_length: int """ _validation = { 'odata_type': {'required': True}, 'name': {'required': True}, - 'min': {'maximum': 300}, - 'max': {'maximum': 300}, + 'min_length': {'maximum': 300}, + 'max_length': {'maximum': 300}, } _attribute_map = { 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, - 'min': {'key': 'min', 'type': 'int'}, - 'max': {'key': 'max', 'type': 'int'}, + 'min_length': {'key': 'min', 'type': 'int'}, + 'max_length': {'key': 'max', 'type': 'int'}, } def __init__( self, *, name: str, - min: Optional[int] = 0, - max: Optional[int] = 300, + min_length: Optional[int] = 0, + max_length: Optional[int] = 300, **kwargs ): super(LengthTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.LengthTokenFilter' - self.min = min - self.max = max + self.odata_type: str = '#Microsoft.Azure.Search.LengthTokenFilter' + self.min_length = min_length + self.max_length = max_length class LimitTokenFilter(TokenFilter): @@ -2978,7 +2388,7 @@ def __init__( **kwargs ): super(LimitTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.LimitTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.LimitTokenFilter' self.max_token_count = max_token_count self.consume_all_tokens = consume_all_tokens @@ -2991,7 +2401,7 @@ class ListDataSourcesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar data_sources: Required. The datasources in the Search service. - :vartype data_sources: list[~search_service_client.models.DataSource] + :vartype data_sources: list[~azure.search.documents.models.SearchIndexerDataSource] """ _validation = { @@ -2999,7 +2409,7 @@ class ListDataSourcesResult(msrest.serialization.Model): } _attribute_map = { - 'data_sources': {'key': 'value', 'type': '[DataSource]'}, + 'data_sources': {'key': 'value', 'type': '[SearchIndexerDataSource]'}, } def __init__( @@ -3018,7 +2428,7 @@ class ListIndexersResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexers: Required. The indexers in the Search service. - :vartype indexers: list[~search_service_client.models.Indexer] + :vartype indexers: list[~azure.search.documents.models.SearchIndexer] """ _validation = { @@ -3026,7 +2436,7 @@ class ListIndexersResult(msrest.serialization.Model): } _attribute_map = { - 'indexers': {'key': 'value', 'type': '[Indexer]'}, + 'indexers': {'key': 'value', 'type': '[SearchIndexer]'}, } def __init__( @@ -3045,7 +2455,7 @@ class ListIndexesResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar indexes: Required. The indexes in the Search service. - :vartype indexes: list[~search_service_client.models.Index] + :vartype indexes: list[~azure.search.documents.models.SearchIndex] """ _validation = { @@ -3053,7 +2463,7 @@ class ListIndexesResult(msrest.serialization.Model): } _attribute_map = { - 'indexes': {'key': 'value', 'type': '[Index]'}, + 'indexes': {'key': 'value', 'type': '[SearchIndex]'}, } def __init__( @@ -3065,14 +2475,14 @@ def __init__( class ListSkillsetsResult(msrest.serialization.Model): - """Response from a list Skillset request. If successful, it includes the full definitions of all skillsets. + """Response from a list skillset request. If successful, it includes the full definitions of all skillsets. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar skillsets: Required. The skillsets defined in the Search service. - :vartype skillsets: list[~search_service_client.models.Skillset] + :vartype skillsets: list[~azure.search.documents.models.SearchIndexerSkillset] """ _validation = { @@ -3080,7 +2490,7 @@ class ListSkillsetsResult(msrest.serialization.Model): } _attribute_map = { - 'skillsets': {'key': 'value', 'type': '[Skillset]'}, + 'skillsets': {'key': 'value', 'type': '[SearchIndexerSkillset]'}, } def __init__( @@ -3099,7 +2509,7 @@ class ListSynonymMapsResult(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :ivar synonym_maps: Required. The synonym maps in the Search service. - :vartype synonym_maps: list[~search_service_client.models.SynonymMap] + :vartype synonym_maps: list[~azure.search.documents.models.SynonymMap] """ _validation = { @@ -3118,6 +2528,133 @@ def __init__( self.synonym_maps = None +class LuceneStandardAnalyzer(LexicalAnalyzer): + """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + :param stopwords: A list of stopwords. + :type stopwords: list[str] + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + 'stopwords': {'key': 'stopwords', 'type': '[str]'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + stopwords: Optional[List[str]] = None, + **kwargs + ): + super(LuceneStandardAnalyzer, self).__init__(name=name, **kwargs) + self.odata_type: str = '#Microsoft.Azure.Search.StandardAnalyzer' + self.max_token_length = max_token_length + self.stopwords = stopwords + + +class LuceneStandardTokenizer(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(LuceneStandardTokenizer, self).__init__(name=name, **kwargs) + self.odata_type: str = '#Microsoft.Azure.Search.StandardTokenizer' + self.max_token_length = max_token_length + + +class LuceneStandardTokenizerV2(LexicalTokenizer): + """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. + + All required parameters must be populated in order to send to Azure. + + :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by + server. + :type odata_type: str + :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, + dashes or underscores, can only start and end with alphanumeric characters, and is limited to + 128 characters. + :type name: str + :param max_token_length: The maximum token length. Default is 255. Tokens longer than the + maximum length are split. The maximum token length that can be used is 300 characters. + :type max_token_length: int + """ + + _validation = { + 'odata_type': {'required': True}, + 'name': {'required': True}, + 'max_token_length': {'maximum': 300}, + } + + _attribute_map = { + 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, + } + + def __init__( + self, + *, + name: str, + max_token_length: Optional[int] = 255, + **kwargs + ): + super(LuceneStandardTokenizerV2, self).__init__(name=name, **kwargs) + self.odata_type: str = '#Microsoft.Azure.Search.StandardTokenizerV2' + self.max_token_length = max_token_length + + class MagnitudeScoringFunction(ScoringFunction): """Defines a function that boosts scores based on the magnitude of a numeric field. @@ -3132,11 +2669,11 @@ class MagnitudeScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the magnitude scoring function. - :type parameters: ~search_service_client.models.MagnitudeScoringParameters + :type parameters: ~azure.search.documents.models.MagnitudeScoringParameters """ _validation = { @@ -3164,7 +2701,7 @@ def __init__( **kwargs ): super(MagnitudeScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'magnitude' + self.type: str = 'magnitude' self.parameters = parameters @@ -3244,11 +2781,11 @@ def __init__( **kwargs ): super(MappingCharFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.MappingCharFilter' + self.odata_type: str = '#Microsoft.Azure.Search.MappingCharFilter' self.mappings = mappings -class MergeSkill(Skill): +class MergeSkill(SearchIndexerSkill): """A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. All required parameters must be populated in order to send to Azure. @@ -3268,10 +2805,10 @@ class MergeSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param insert_pre_tag: The tag indicates the start of the merged text. By default, the tag is an empty space. :type insert_pre_tag: str @@ -3310,12 +2847,12 @@ def __init__( **kwargs ): super(MergeSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.MergeSkill' + self.odata_type: str = '#Microsoft.Skills.Text.MergeSkill' self.insert_pre_tag = insert_pre_tag self.insert_post_tag = insert_post_tag -class MicrosoftLanguageStemmingTokenizer(Tokenizer): +class MicrosoftLanguageStemmingTokenizer(LexicalTokenizer): """Divides text using language-specific rules and reduces words to their base forms. All required parameters must be populated in order to send to Azure. @@ -3336,13 +2873,13 @@ class MicrosoftLanguageStemmingTokenizer(Tokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'arabic', 'bangla', 'bulgarian', 'catalan', 'croatian', 'czech', 'danish', 'dutch', 'english', - 'estonian', 'finnish', 'french', 'german', 'greek', 'gujarati', 'hebrew', 'hindi', 'hungarian', - 'icelandic', 'indonesian', 'italian', 'kannada', 'latvian', 'lithuanian', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovak', 'slovenian', 'spanish', - 'swedish', 'tamil', 'telugu', 'turkish', 'ukrainian', 'urdu'. - :type language: str or ~search_service_client.models.MicrosoftStemmingTokenizerLanguage + "arabic", "bangla", "bulgarian", "catalan", "croatian", "czech", "danish", "dutch", "english", + "estonian", "finnish", "french", "german", "greek", "gujarati", "hebrew", "hindi", "hungarian", + "icelandic", "indonesian", "italian", "kannada", "latvian", "lithuanian", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovak", "slovenian", "spanish", + "swedish", "tamil", "telugu", "turkish", "ukrainian", "urdu". + :type language: str or ~azure.search.documents.models.MicrosoftStemmingTokenizerLanguage """ _validation = { @@ -3369,13 +2906,13 @@ def __init__( **kwargs ): super(MicrosoftLanguageStemmingTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer' self.max_token_length = max_token_length self.is_search_tokenizer = is_search_tokenizer self.language = language -class MicrosoftLanguageTokenizer(Tokenizer): +class MicrosoftLanguageTokenizer(LexicalTokenizer): """Divides text using language-specific rules. All required parameters must be populated in order to send to Azure. @@ -3396,13 +2933,13 @@ class MicrosoftLanguageTokenizer(Tokenizer): as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. :type is_search_tokenizer: bool :param language: The language to use. The default is English. Possible values include: - 'bangla', 'bulgarian', 'catalan', 'chineseSimplified', 'chineseTraditional', 'croatian', - 'czech', 'danish', 'dutch', 'english', 'french', 'german', 'greek', 'gujarati', 'hindi', - 'icelandic', 'indonesian', 'italian', 'japanese', 'kannada', 'korean', 'malay', 'malayalam', - 'marathi', 'norwegianBokmaal', 'polish', 'portuguese', 'portugueseBrazilian', 'punjabi', - 'romanian', 'russian', 'serbianCyrillic', 'serbianLatin', 'slovenian', 'spanish', 'swedish', - 'tamil', 'telugu', 'thai', 'ukrainian', 'urdu', 'vietnamese'. - :type language: str or ~search_service_client.models.MicrosoftTokenizerLanguage + "bangla", "bulgarian", "catalan", "chineseSimplified", "chineseTraditional", "croatian", + "czech", "danish", "dutch", "english", "french", "german", "greek", "gujarati", "hindi", + "icelandic", "indonesian", "italian", "japanese", "kannada", "korean", "malay", "malayalam", + "marathi", "norwegianBokmaal", "polish", "portuguese", "portugueseBrazilian", "punjabi", + "romanian", "russian", "serbianCyrillic", "serbianLatin", "slovenian", "spanish", "swedish", + "tamil", "telugu", "thai", "ukrainian", "urdu", "vietnamese". + :type language: str or ~azure.search.documents.models.MicrosoftTokenizerLanguage """ _validation = { @@ -3429,7 +2966,7 @@ def __init__( **kwargs ): super(MicrosoftLanguageTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.MicrosoftLanguageTokenizer' self.max_token_length = max_token_length self.is_search_tokenizer = is_search_tokenizer self.language = language @@ -3475,7 +3012,7 @@ def __init__( **kwargs ): super(NGramTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.NGramTokenFilter' self.min_gram = min_gram self.max_gram = max_gram @@ -3522,12 +3059,12 @@ def __init__( **kwargs ): super(NGramTokenFilterV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.NGramTokenFilterV2' + self.odata_type: str = '#Microsoft.Azure.Search.NGramTokenFilterV2' self.min_gram = min_gram self.max_gram = max_gram -class NGramTokenizer(Tokenizer): +class NGramTokenizer(LexicalTokenizer): """Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3545,7 +3082,7 @@ class NGramTokenizer(Tokenizer): :param max_gram: The maximum n-gram length. Default is 2. Maximum is 300. :type max_gram: int :param token_chars: Character classes to keep in the tokens. - :type token_chars: list[str or ~search_service_client.models.TokenCharacterKind] + :type token_chars: list[str or ~azure.search.documents.models.TokenCharacterKind] """ _validation = { @@ -3573,13 +3110,13 @@ def __init__( **kwargs ): super(NGramTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.NGramTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.NGramTokenizer' self.min_gram = min_gram self.max_gram = max_gram self.token_chars = token_chars -class OcrSkill(Skill): +class OcrSkill(SearchIndexerSkill): """A skill that extracts text from image files. All required parameters must be populated in order to send to Azure. @@ -3599,18 +3136,18 @@ class OcrSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param text_extraction_algorithm: A value indicating which algorithm to use for extracting - text. Default is printed. Possible values include: 'printed', 'handwritten'. - :type text_extraction_algorithm: str or ~search_service_client.models.TextExtractionAlgorithm + text. Default is printed. Possible values include: "printed", "handwritten". + :type text_extraction_algorithm: str or ~azure.search.documents.models.TextExtractionAlgorithm :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', - 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro', 'sr-Cyrl', 'sr- - Latn', 'sk'. - :type default_language_code: str or ~search_service_client.models.OcrSkillLanguage + Possible values include: "zh-Hans", "zh-Hant", "cs", "da", "nl", "en", "fi", "fr", "de", "el", + "hu", "it", "ja", "ko", "nb", "pl", "pt", "ru", "es", "sv", "tr", "ar", "ro", "sr-Cyrl", "sr- + Latn", "sk". + :type default_language_code: str or ~azure.search.documents.models.OcrSkillLanguage :param should_detect_orientation: A value indicating to turn orientation detection on or not. Default is false. :type should_detect_orientation: bool @@ -3648,7 +3185,7 @@ def __init__( **kwargs ): super(OcrSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Vision.OcrSkill' + self.odata_type: str = '#Microsoft.Skills.Vision.OcrSkill' self.text_extraction_algorithm = text_extraction_algorithm self.default_language_code = default_language_code self.should_detect_orientation = should_detect_orientation @@ -3686,7 +3223,7 @@ def __init__( self.target_name = target_name -class PathHierarchyTokenizerV2(Tokenizer): +class PathHierarchyTokenizerV2(LexicalTokenizer): """Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3739,7 +3276,7 @@ def __init__( **kwargs ): super(PathHierarchyTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' + self.odata_type: str = '#Microsoft.Azure.Search.PathHierarchyTokenizerV2' self.delimiter = delimiter self.replacement = replacement self.max_token_length = max_token_length @@ -3747,7 +3284,7 @@ def __init__( self.number_of_tokens_to_skip = number_of_tokens_to_skip -class PatternAnalyzer(Analyzer): +class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3763,11 +3300,11 @@ class PatternAnalyzer(Analyzer): true. :type lower_case_terms: bool :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param stopwords: A list of stopwords. :type stopwords: list[str] """ @@ -3797,7 +3334,7 @@ def __init__( **kwargs ): super(PatternAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.PatternAnalyzer' self.lower_case_terms = lower_case_terms self.pattern = pattern self.flags = flags @@ -3845,7 +3382,7 @@ def __init__( **kwargs ): super(PatternCaptureTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PatternCaptureTokenFilter' self.patterns = patterns self.preserve_original = preserve_original @@ -3891,7 +3428,7 @@ def __init__( **kwargs ): super(PatternReplaceCharFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternReplaceCharFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PatternReplaceCharFilter' self.pattern = pattern self.replacement = replacement @@ -3937,12 +3474,12 @@ def __init__( **kwargs ): super(PatternReplaceTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PatternReplaceTokenFilter' self.pattern = pattern self.replacement = replacement -class PatternTokenizer(Tokenizer): +class PatternTokenizer(LexicalTokenizer): """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -3955,11 +3492,11 @@ class PatternTokenizer(Tokenizer): 128 characters. :type name: str :param pattern: A regular expression pattern to match token separators. Default is an - expression that matches one or more whitespace characters. + expression that matches one or more non-word characters. :type pattern: str - :param flags: Regular expression flags. Possible values include: 'CANON_EQ', - 'CASE_INSENSITIVE', 'COMMENTS', 'DOTALL', 'LITERAL', 'MULTILINE', 'UNICODE_CASE', 'UNIX_LINES'. - :type flags: str or ~search_service_client.models.RegexFlags + :param flags: Regular expression flags. Possible values include: "CANON_EQ", + "CASE_INSENSITIVE", "COMMENTS", "DOTALL", "LITERAL", "MULTILINE", "UNICODE_CASE", "UNIX_LINES". + :type flags: str or ~azure.search.documents.models.RegexFlags :param group: The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. @@ -3989,7 +3526,7 @@ def __init__( **kwargs ): super(PatternTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PatternTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.PatternTokenizer' self.pattern = pattern self.flags = flags self.group = group @@ -4008,9 +3545,9 @@ class PhoneticTokenFilter(TokenFilter): limited to 128 characters. :type name: str :param encoder: The phonetic encoder to use. Default is "metaphone". Possible values include: - 'metaphone', 'doubleMetaphone', 'soundex', 'refinedSoundex', 'caverphone1', 'caverphone2', - 'cologne', 'nysiis', 'koelnerPhonetik', 'haasePhonetik', 'beiderMorse'. - :type encoder: str or ~search_service_client.models.PhoneticEncoder + "metaphone", "doubleMetaphone", "soundex", "refinedSoundex", "caverphone1", "caverphone2", + "cologne", "nysiis", "koelnerPhonetik", "haasePhonetik", "beiderMorse". + :type encoder: str or ~azure.search.documents.models.PhoneticEncoder :param replace_original_tokens: A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. :type replace_original_tokens: bool @@ -4037,7 +3574,7 @@ def __init__( **kwargs ): super(PhoneticTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.PhoneticTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.PhoneticTokenFilter' self.encoder = encoder self.replace_original_tokens = replace_original_tokens @@ -4104,13 +3641,13 @@ class ScoringProfile(msrest.serialization.Model): :type name: str :param text_weights: Parameters that boost scoring based on text matches in certain index fields. - :type text_weights: ~search_service_client.models.TextWeights + :type text_weights: ~azure.search.documents.models.TextWeights :param functions: The collection of functions that influence the scoring of documents. - :type functions: list[~search_service_client.models.ScoringFunction] + :type functions: list[~azure.search.documents.models.ScoringFunction] :param function_aggregation: A value indicating how the results of individual scoring functions should be combined. Defaults to "Sum". Ignored if there are no scoring functions. Possible - values include: 'sum', 'average', 'minimum', 'maximum', 'firstMatching'. - :type function_aggregation: str or ~search_service_client.models.ScoringFunctionAggregation + values include: "sum", "average", "minimum", "maximum", "firstMatching". + :type function_aggregation: str or ~azure.search.documents.models.ScoringFunctionAggregation """ _validation = { @@ -4119,65 +3656,832 @@ class ScoringProfile(msrest.serialization.Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, - 'text_weights': {'key': 'text', 'type': 'TextWeights'}, - 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, - 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + 'text_weights': {'key': 'text', 'type': 'TextWeights'}, + 'functions': {'key': 'functions', 'type': '[ScoringFunction]'}, + 'function_aggregation': {'key': 'functionAggregation', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + text_weights: Optional["TextWeights"] = None, + functions: Optional[List["ScoringFunction"]] = None, + function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None, + **kwargs + ): + super(ScoringProfile, self).__init__(**kwargs) + self.name = name + self.text_weights = text_weights + self.functions = functions + self.function_aggregation = function_aggregation + + +class SearchError(msrest.serialization.Model): + """Describes an error condition for the Azure Cognitive Search API. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar code: One of a server-defined set of error codes. + :vartype code: str + :ivar message: Required. A human-readable representation of the error. + :vartype message: str + :ivar details: An array of details about specific errors that led to this reported error. + :vartype details: list[~azure.search.documents.models.SearchError] + """ + + _validation = { + 'code': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'details': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'details': {'key': 'details', 'type': '[SearchError]'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchError, self).__init__(**kwargs) + self.code = None + self.message = None + self.details = None + + +class SearchField(msrest.serialization.Model): + """Represents a field in an index definition, which describes the name, data type, and search behavior of a field. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the field, which must be unique within the fields collection + of the index or parent field. + :type name: str + :param type: Required. The data type of the field. Possible values include: "Edm.String", + "Edm.Int32", "Edm.Int64", "Edm.Double", "Edm.Boolean", "Edm.DateTimeOffset", + "Edm.GeographyPoint", "Edm.ComplexType". + :type type: str or ~azure.search.documents.models.SearchFieldDataType + :param key: A value indicating whether the field uniquely identifies documents in the index. + Exactly one top-level field in each index must be chosen as the key field and it must be of + type Edm.String. Key fields can be used to look up documents directly and update or delete + specific documents. Default is false for simple fields and null for complex fields. + :type key: bool + :param retrievable: A value indicating whether the field can be returned in a search result. + You can disable this option if you want to use a field (for example, margin) as a filter, + sorting, or scoring mechanism but do not want the field to be visible to the end user. This + property must be true for key fields, and it must be null for complex fields. This property can + be changed on existing fields. Enabling this property does not cause any increase in index + storage requirements. Default is true for simple fields and null for complex fields. + :type retrievable: bool + :param searchable: A value indicating whether the field is full-text searchable. This means it + will undergo analysis such as word-breaking during indexing. If you set a searchable field to a + value like "sunny day", internally it will be split into the individual tokens "sunny" and + "day". This enables full-text searches for these terms. Fields of type Edm.String or + Collection(Edm.String) are searchable by default. This property must be false for simple fields + of other non-string data types, and it must be null for complex fields. Note: searchable fields + consume extra space in your index since Azure Cognitive Search will store an additional + tokenized version of the field value for full-text searches. If you want to save space in your + index and you don't need a field to be included in searches, set searchable to false. + :type searchable: bool + :param filterable: A value indicating whether to enable the field to be referenced in $filter + queries. filterable differs from searchable in how strings are handled. Fields of type + Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so + comparisons are for exact matches only. For example, if you set such a field f to "sunny day", + $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property + must be null for complex fields. Default is true for simple fields and null for complex fields. + :type filterable: bool + :param sortable: A value indicating whether to enable the field to be referenced in $orderby + expressions. By default Azure Cognitive Search sorts results by score, but in many experiences + users will want to sort by fields in the documents. A simple field can be sortable only if it + is single-valued (it has a single value in the scope of the parent document). Simple collection + fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex + collections are also multi-valued, and therefore cannot be sortable. This is true whether it's + an immediate parent field, or an ancestor field, that's the complex collection. Complex fields + cannot be sortable and the sortable property must be null for such fields. The default for + sortable is true for single-valued simple fields, false for multi-valued simple fields, and + null for complex fields. + :type sortable: bool + :param facetable: A value indicating whether to enable the field to be referenced in facet + queries. Typically used in a presentation of search results that includes hit count by category + (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so + on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or + Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple + fields. + :type facetable: bool + :param analyzer: The name of the analyzer to use for the field. This option can be used only + with searchable fields and it can't be set together with either searchAnalyzer or + indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null + for complex fields. Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", + "bn.microsoft", "eu.lucene", "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh- + Hans.microsoft", "zh-Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", + "cs.microsoft", "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", + "en.microsoft", "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", + "fr.lucene", "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", + "gu.microsoft", "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", + "is.microsoft", "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", + "ja.microsoft", "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", + "lv.lucene", "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", + "no.lucene", "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt- + PT.microsoft", "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", + "ru.lucene", "sr-cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", + "es.microsoft", "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", + "th.microsoft", "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", + "vi.microsoft", "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", + "simple", "stop", "whitespace". + :type analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :param search_analyzer: The name of the analyzer used at search time for the field. This option + can be used only with searchable fields. It must be set together with indexAnalyzer and it + cannot be set together with the analyzer option. This property cannot be set to the name of a + language analyzer; use the analyzer property instead if you need a language analyzer. This + analyzer can be updated on an existing field. Must be null for complex fields. Possible values + include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", "bg.microsoft", + "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh-Hans.lucene", "zh- + Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", "cs.lucene", "da.microsoft", + "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", "en.lucene", "et.microsoft", + "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", "gl.lucene", "de.microsoft", + "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", "he.microsoft", "hi.microsoft", + "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", "id.microsoft", "id.lucene", + "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", "ja.lucene", "kn.microsoft", + "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", "lt.microsoft", "ml.microsoft", + "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", "fa.lucene", "pl.microsoft", + "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", "pt-PT.lucene", + "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type search_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :param index_analyzer: The name of the analyzer used at indexing time for the field. This + option can be used only with searchable fields. It must be set together with searchAnalyzer and + it cannot be set together with the analyzer option. This property cannot be set to the name of + a language analyzer; use the analyzer property instead if you need a language analyzer. Once + the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. + Possible values include: "ar.microsoft", "ar.lucene", "hy.lucene", "bn.microsoft", "eu.lucene", + "bg.microsoft", "bg.lucene", "ca.microsoft", "ca.lucene", "zh-Hans.microsoft", "zh- + Hans.lucene", "zh-Hant.microsoft", "zh-Hant.lucene", "hr.microsoft", "cs.microsoft", + "cs.lucene", "da.microsoft", "da.lucene", "nl.microsoft", "nl.lucene", "en.microsoft", + "en.lucene", "et.microsoft", "fi.microsoft", "fi.lucene", "fr.microsoft", "fr.lucene", + "gl.lucene", "de.microsoft", "de.lucene", "el.microsoft", "el.lucene", "gu.microsoft", + "he.microsoft", "hi.microsoft", "hi.lucene", "hu.microsoft", "hu.lucene", "is.microsoft", + "id.microsoft", "id.lucene", "ga.lucene", "it.microsoft", "it.lucene", "ja.microsoft", + "ja.lucene", "kn.microsoft", "ko.microsoft", "ko.lucene", "lv.microsoft", "lv.lucene", + "lt.microsoft", "ml.microsoft", "ms.microsoft", "mr.microsoft", "nb.microsoft", "no.lucene", + "fa.lucene", "pl.microsoft", "pl.lucene", "pt-BR.microsoft", "pt-BR.lucene", "pt-PT.microsoft", + "pt-PT.lucene", "pa.microsoft", "ro.microsoft", "ro.lucene", "ru.microsoft", "ru.lucene", "sr- + cyrillic.microsoft", "sr-latin.microsoft", "sk.microsoft", "sl.microsoft", "es.microsoft", + "es.lucene", "sv.microsoft", "sv.lucene", "ta.microsoft", "te.microsoft", "th.microsoft", + "th.lucene", "tr.microsoft", "tr.lucene", "uk.microsoft", "ur.microsoft", "vi.microsoft", + "standard.lucene", "standardasciifolding.lucene", "keyword", "pattern", "simple", "stop", + "whitespace". + :type index_analyzer: str or ~azure.search.documents.models.LexicalAnalyzerName + :param synonym_maps: A list of the names of synonym maps to associate with this field. This + option can be used only with searchable fields. Currently only one synonym map per field is + supported. Assigning a synonym map to a field ensures that query terms targeting that field are + expanded at query-time using the rules in the synonym map. This attribute can be changed on + existing fields. Must be null or an empty collection for complex fields. + :type synonym_maps: list[str] + :param fields: A list of sub-fields if this is a field of type Edm.ComplexType or + Collection(Edm.ComplexType). Must be null or empty for simple fields. + :type fields: list[~azure.search.documents.models.SearchField] + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'key': {'key': 'key', 'type': 'bool'}, + 'retrievable': {'key': 'retrievable', 'type': 'bool'}, + 'searchable': {'key': 'searchable', 'type': 'bool'}, + 'filterable': {'key': 'filterable', 'type': 'bool'}, + 'sortable': {'key': 'sortable', 'type': 'bool'}, + 'facetable': {'key': 'facetable', 'type': 'bool'}, + 'analyzer': {'key': 'analyzer', 'type': 'str'}, + 'search_analyzer': {'key': 'searchAnalyzer', 'type': 'str'}, + 'index_analyzer': {'key': 'indexAnalyzer', 'type': 'str'}, + 'synonym_maps': {'key': 'synonymMaps', 'type': '[str]'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "SearchFieldDataType"], + key: Optional[bool] = None, + retrievable: Optional[bool] = None, + searchable: Optional[bool] = None, + filterable: Optional[bool] = None, + sortable: Optional[bool] = None, + facetable: Optional[bool] = None, + analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + search_analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + index_analyzer: Optional[Union[str, "LexicalAnalyzerName"]] = None, + synonym_maps: Optional[List[str]] = None, + fields: Optional[List["SearchField"]] = None, + **kwargs + ): + super(SearchField, self).__init__(**kwargs) + self.name = name + self.type = type + self.key = key + self.retrievable = retrievable + self.searchable = searchable + self.filterable = filterable + self.sortable = sortable + self.facetable = facetable + self.analyzer = analyzer + self.search_analyzer = search_analyzer + self.index_analyzer = index_analyzer + self.synonym_maps = synonym_maps + self.fields = fields + + +class SearchIndex(msrest.serialization.Model): + """Represents a search index definition, which describes the fields and search behavior of an index. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the index. + :type name: str + :param fields: Required. The fields of the index. + :type fields: list[~azure.search.documents.models.SearchField] + :param scoring_profiles: The scoring profiles for the index. + :type scoring_profiles: list[~azure.search.documents.models.ScoringProfile] + :param default_scoring_profile: The name of the scoring profile to use if none is specified in + the query. If this property is not set and no scoring profile is specified in the query, then + default scoring (tf-idf) will be used. + :type default_scoring_profile: str + :param cors_options: Options to control Cross-Origin Resource Sharing (CORS) for the index. + :type cors_options: ~azure.search.documents.models.CorsOptions + :param suggesters: The suggesters for the index. + :type suggesters: list[~azure.search.documents.models.Suggester] + :param analyzers: The analyzers for the index. + :type analyzers: list[~azure.search.documents.models.LexicalAnalyzer] + :param tokenizers: The tokenizers for the index. + :type tokenizers: list[~azure.search.documents.models.LexicalTokenizer] + :param token_filters: The token filters for the index. + :type token_filters: list[~azure.search.documents.models.TokenFilter] + :param char_filters: The character filters for the index. + :type char_filters: list[~azure.search.documents.models.CharFilter] + :param encryption_key: A description of an encryption key that you create in Azure Key Vault. + This key is used to provide an additional level of encryption-at-rest for your data when you + want full assurance that no one, not even Microsoft, can decrypt your data in Azure Cognitive + Search. Once you have encrypted your data, it will always remain encrypted. Azure Cognitive + Search will ignore attempts to set this property to null. You can change this property as + needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with + customer-managed keys is not available for free search services, and is only available for paid + services created on or after January 1, 2019. + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey + :param similarity: The type of similarity algorithm to be used when scoring and ranking the + documents matching a search query. The similarity algorithm can only be defined at index + creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity + algorithm is used. + :type similarity: ~azure.search.documents.models.Similarity + :param e_tag: The ETag of the index. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'fields': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'fields': {'key': 'fields', 'type': '[SearchField]'}, + 'scoring_profiles': {'key': 'scoringProfiles', 'type': '[ScoringProfile]'}, + 'default_scoring_profile': {'key': 'defaultScoringProfile', 'type': 'str'}, + 'cors_options': {'key': 'corsOptions', 'type': 'CorsOptions'}, + 'suggesters': {'key': 'suggesters', 'type': '[Suggester]'}, + 'analyzers': {'key': 'analyzers', 'type': '[LexicalAnalyzer]'}, + 'tokenizers': {'key': 'tokenizers', 'type': '[LexicalTokenizer]'}, + 'token_filters': {'key': 'tokenFilters', 'type': '[TokenFilter]'}, + 'char_filters': {'key': 'charFilters', 'type': '[CharFilter]'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, + 'similarity': {'key': 'similarity', 'type': 'Similarity'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + fields: List["SearchField"], + scoring_profiles: Optional[List["ScoringProfile"]] = None, + default_scoring_profile: Optional[str] = None, + cors_options: Optional["CorsOptions"] = None, + suggesters: Optional[List["Suggester"]] = None, + analyzers: Optional[List["LexicalAnalyzer"]] = None, + tokenizers: Optional[List["LexicalTokenizer"]] = None, + token_filters: Optional[List["TokenFilter"]] = None, + char_filters: Optional[List["CharFilter"]] = None, + encryption_key: Optional["SearchResourceEncryptionKey"] = None, + similarity: Optional["Similarity"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndex, self).__init__(**kwargs) + self.name = name + self.fields = fields + self.scoring_profiles = scoring_profiles + self.default_scoring_profile = default_scoring_profile + self.cors_options = cors_options + self.suggesters = suggesters + self.analyzers = analyzers + self.tokenizers = tokenizers + self.token_filters = token_filters + self.char_filters = char_filters + self.encryption_key = encryption_key + self.similarity = similarity + self.e_tag = e_tag + + +class SearchIndexer(msrest.serialization.Model): + """Represents an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the indexer. + :type name: str + :param description: The description of the indexer. + :type description: str + :param data_source_name: Required. The name of the datasource from which this indexer reads + data. + :type data_source_name: str + :param skillset_name: The name of the skillset executing with this indexer. + :type skillset_name: str + :param target_index_name: Required. The name of the index to which this indexer writes data. + :type target_index_name: str + :param schedule: The schedule for this indexer. + :type schedule: ~azure.search.documents.models.IndexingSchedule + :param parameters: Parameters for indexer execution. + :type parameters: ~azure.search.documents.models.IndexingParameters + :param field_mappings: Defines mappings between fields in the data source and corresponding + target fields in the index. + :type field_mappings: list[~azure.search.documents.models.FieldMapping] + :param output_field_mappings: Output field mappings are applied after enrichment and + immediately before indexing. + :type output_field_mappings: list[~azure.search.documents.models.FieldMapping] + :param is_disabled: A value indicating whether the indexer is disabled. Default is false. + :type is_disabled: bool + :param e_tag: The ETag of the indexer. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'data_source_name': {'required': True}, + 'target_index_name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'data_source_name': {'key': 'dataSourceName', 'type': 'str'}, + 'skillset_name': {'key': 'skillsetName', 'type': 'str'}, + 'target_index_name': {'key': 'targetIndexName', 'type': 'str'}, + 'schedule': {'key': 'schedule', 'type': 'IndexingSchedule'}, + 'parameters': {'key': 'parameters', 'type': 'IndexingParameters'}, + 'field_mappings': {'key': 'fieldMappings', 'type': '[FieldMapping]'}, + 'output_field_mappings': {'key': 'outputFieldMappings', 'type': '[FieldMapping]'}, + 'is_disabled': {'key': 'disabled', 'type': 'bool'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + data_source_name: str, + target_index_name: str, + description: Optional[str] = None, + skillset_name: Optional[str] = None, + schedule: Optional["IndexingSchedule"] = None, + parameters: Optional["IndexingParameters"] = None, + field_mappings: Optional[List["FieldMapping"]] = None, + output_field_mappings: Optional[List["FieldMapping"]] = None, + is_disabled: Optional[bool] = False, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndexer, self).__init__(**kwargs) + self.name = name + self.description = description + self.data_source_name = data_source_name + self.skillset_name = skillset_name + self.target_index_name = target_index_name + self.schedule = schedule + self.parameters = parameters + self.field_mappings = field_mappings + self.output_field_mappings = output_field_mappings + self.is_disabled = is_disabled + self.e_tag = e_tag + + +class SearchIndexerDataContainer(msrest.serialization.Model): + """Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the table or view (for Azure SQL data source) or collection + (for CosmosDB data source) that will be indexed. + :type name: str + :param query: A query that is applied to this data container. The syntax and meaning of this + parameter is datasource-specific. Not supported by Azure SQL datasources. + :type query: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'query': {'key': 'query', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + query: Optional[str] = None, + **kwargs + ): + super(SearchIndexerDataContainer, self).__init__(**kwargs) + self.name = name + self.query = query + + +class SearchIndexerDataSource(msrest.serialization.Model): + """Represents a datasource definition, which can be used to configure an indexer. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the datasource. + :type name: str + :param description: The description of the datasource. + :type description: str + :param type: Required. The type of the datasource. Possible values include: "azuresql", + "cosmosdb", "azureblob", "azuretable", "mysql". + :type type: str or ~azure.search.documents.models.SearchIndexerDataSourceType + :param credentials: Required. Credentials for the datasource. + :type credentials: ~azure.search.documents.models.DataSourceCredentials + :param container: Required. The data container for the datasource. + :type container: ~azure.search.documents.models.SearchIndexerDataContainer + :param data_change_detection_policy: The data change detection policy for the datasource. + :type data_change_detection_policy: ~azure.search.documents.models.DataChangeDetectionPolicy + :param data_deletion_detection_policy: The data deletion detection policy for the datasource. + :type data_deletion_detection_policy: + ~azure.search.documents.models.DataDeletionDetectionPolicy + :param e_tag: The ETag of the data source. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'type': {'required': True}, + 'credentials': {'required': True}, + 'container': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'credentials': {'key': 'credentials', 'type': 'DataSourceCredentials'}, + 'container': {'key': 'container', 'type': 'SearchIndexerDataContainer'}, + 'data_change_detection_policy': {'key': 'dataChangeDetectionPolicy', 'type': 'DataChangeDetectionPolicy'}, + 'data_deletion_detection_policy': {'key': 'dataDeletionDetectionPolicy', 'type': 'DataDeletionDetectionPolicy'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + type: Union[str, "SearchIndexerDataSourceType"], + credentials: "DataSourceCredentials", + container: "SearchIndexerDataContainer", + description: Optional[str] = None, + data_change_detection_policy: Optional["DataChangeDetectionPolicy"] = None, + data_deletion_detection_policy: Optional["DataDeletionDetectionPolicy"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndexerDataSource, self).__init__(**kwargs) + self.name = name + self.description = description + self.type = type + self.credentials = credentials + self.container = container + self.data_change_detection_policy = data_change_detection_policy + self.data_deletion_detection_policy = data_deletion_detection_policy + self.e_tag = e_tag + + +class SearchIndexerError(msrest.serialization.Model): + """Represents an item- or document-level indexing error. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item for which indexing failed. + :vartype key: str + :ivar error_message: Required. The message describing the error that occurred while processing + the item. + :vartype error_message: str + :ivar status_code: Required. The status code indicating why the indexing operation failed. + Possible values include: 400 for a malformed input document, 404 for document not found, 409 + for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the + service is too busy. + :vartype status_code: int + :ivar name: The name of the source at which the error originated. For example, this could refer + to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the error to assist in debugging the indexer. + This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of errors. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'error_message': {'required': True, 'readonly': True}, + 'status_code': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + 'status_code': {'key': 'statusCode', 'type': 'int'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerError, self).__init__(**kwargs) + self.key = None + self.error_message = None + self.status_code = None + self.name = None + self.details = None + self.documentation_link = None + + +class SearchIndexerLimits(msrest.serialization.Model): + """SearchIndexerLimits. + + Variables are only populated by the server, and will be ignored when sending a request. + + :ivar max_run_time: The maximum duration that the indexer is permitted to run for one + execution. + :vartype max_run_time: ~datetime.timedelta + :ivar max_document_extraction_size: The maximum size of a document, in bytes, which will be + considered valid for indexing. + :vartype max_document_extraction_size: long + :ivar max_document_content_characters_to_extract: The maximum number of characters that will be + extracted from a document picked up for indexing. + :vartype max_document_content_characters_to_extract: long + """ + + _validation = { + 'max_run_time': {'readonly': True}, + 'max_document_extraction_size': {'readonly': True}, + 'max_document_content_characters_to_extract': {'readonly': True}, + } + + _attribute_map = { + 'max_run_time': {'key': 'maxRunTime', 'type': 'duration'}, + 'max_document_extraction_size': {'key': 'maxDocumentExtractionSize', 'type': 'long'}, + 'max_document_content_characters_to_extract': {'key': 'maxDocumentContentCharactersToExtract', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerLimits, self).__init__(**kwargs) + self.max_run_time = None + self.max_document_extraction_size = None + self.max_document_content_characters_to_extract = None + + +class SearchIndexerSkillset(msrest.serialization.Model): + """A list of skills. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the skillset. + :type name: str + :param description: Required. The description of the skillset. + :type description: str + :param skills: Required. A list of skills in the skillset. + :type skills: list[~azure.search.documents.models.SearchIndexerSkill] + :param cognitive_services_account: Details about cognitive services to be used when running + skills. + :type cognitive_services_account: ~azure.search.documents.models.CognitiveServicesAccount + :param e_tag: The ETag of the skillset. + :type e_tag: str + """ + + _validation = { + 'name': {'required': True}, + 'description': {'required': True}, + 'skills': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'description': {'key': 'description', 'type': 'str'}, + 'skills': {'key': 'skills', 'type': '[SearchIndexerSkill]'}, + 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, + 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, + } + + def __init__( + self, + *, + name: str, + description: str, + skills: List["SearchIndexerSkill"], + cognitive_services_account: Optional["CognitiveServicesAccount"] = None, + e_tag: Optional[str] = None, + **kwargs + ): + super(SearchIndexerSkillset, self).__init__(**kwargs) + self.name = name + self.description = description + self.skills = skills + self.cognitive_services_account = cognitive_services_account + self.e_tag = e_tag + + +class SearchIndexerStatus(msrest.serialization.Model): + """Represents the current status and execution history of an indexer. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar status: Required. Overall indexer status. Possible values include: "unknown", "error", + "running". + :vartype status: str or ~azure.search.documents.models.IndexerStatus + :ivar last_result: The result of the most recent or an in-progress indexer execution. + :vartype last_result: ~azure.search.documents.models.IndexerExecutionResult + :ivar execution_history: Required. History of the recent indexer executions, sorted in reverse + chronological order. + :vartype execution_history: list[~azure.search.documents.models.IndexerExecutionResult] + :ivar limits: Required. The execution limits for the indexer. + :vartype limits: ~azure.search.documents.models.SearchIndexerLimits + """ + + _validation = { + 'status': {'required': True, 'readonly': True}, + 'last_result': {'readonly': True}, + 'execution_history': {'required': True, 'readonly': True}, + 'limits': {'required': True, 'readonly': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'last_result': {'key': 'lastResult', 'type': 'IndexerExecutionResult'}, + 'execution_history': {'key': 'executionHistory', 'type': '[IndexerExecutionResult]'}, + 'limits': {'key': 'limits', 'type': 'SearchIndexerLimits'}, + } + + def __init__( + self, + **kwargs + ): + super(SearchIndexerStatus, self).__init__(**kwargs) + self.status = None + self.last_result = None + self.execution_history = None + self.limits = None + + +class SearchIndexerWarning(msrest.serialization.Model): + """Represents an item-level warning. + + Variables are only populated by the server, and will be ignored when sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar key: The key of the item which generated a warning. + :vartype key: str + :ivar message: Required. The message describing the warning that occurred while processing the + item. + :vartype message: str + :ivar name: The name of the source at which the warning originated. For example, this could + refer to a particular skill in the attached skillset. This may not be always available. + :vartype name: str + :ivar details: Additional, verbose details about the warning to assist in debugging the + indexer. This may not be always available. + :vartype details: str + :ivar documentation_link: A link to a troubleshooting guide for these classes of warnings. This + may not be always available. + :vartype documentation_link: str + """ + + _validation = { + 'key': {'readonly': True}, + 'message': {'required': True, 'readonly': True}, + 'name': {'readonly': True}, + 'details': {'readonly': True}, + 'documentation_link': {'readonly': True}, + } + + _attribute_map = { + 'key': {'key': 'key', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'details': {'key': 'details', 'type': 'str'}, + 'documentation_link': {'key': 'documentationLink', 'type': 'str'}, } def __init__( self, - *, - name: str, - text_weights: Optional["TextWeights"] = None, - functions: Optional[List["ScoringFunction"]] = None, - function_aggregation: Optional[Union[str, "ScoringFunctionAggregation"]] = None, **kwargs ): - super(ScoringProfile, self).__init__(**kwargs) - self.name = name - self.text_weights = text_weights - self.functions = functions - self.function_aggregation = function_aggregation - + super(SearchIndexerWarning, self).__init__(**kwargs) + self.key = None + self.message = None + self.name = None + self.details = None + self.documentation_link = None -class SearchError(msrest.serialization.Model): - """Describes an error condition for the Azure Cognitive Search API. - Variables are only populated by the server, and will be ignored when sending a request. +class SearchResourceEncryptionKey(msrest.serialization.Model): + """A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym maps. All required parameters must be populated in order to send to Azure. - :ivar code: One of a server-defined set of error codes. - :vartype code: str - :ivar message: Required. A human-readable representation of the error. - :vartype message: str - :ivar details: An array of details about specific errors that led to this reported error. - :vartype details: list[~search_service_client.models.SearchError] + :param key_name: Required. The name of your Azure Key Vault key to be used to encrypt your data + at rest. + :type key_name: str + :param key_version: Required. The version of your Azure Key Vault key to be used to encrypt + your data at rest. + :type key_version: str + :param vault_uri: Required. The URI of your Azure Key Vault, also referred to as DNS name, that + contains the key to be used to encrypt your data at rest. An example URI might be https://my- + keyvault-name.vault.azure.net. + :type vault_uri: str + :param access_credentials: Optional Azure Active Directory credentials used for accessing your + Azure Key Vault. Not required if using managed identity instead. + :type access_credentials: + ~azure.search.documents.models.AzureActiveDirectoryApplicationCredentials """ _validation = { - 'code': {'readonly': True}, - 'message': {'required': True, 'readonly': True}, - 'details': {'readonly': True}, + 'key_name': {'required': True}, + 'key_version': {'required': True}, + 'vault_uri': {'required': True}, } _attribute_map = { - 'code': {'key': 'code', 'type': 'str'}, - 'message': {'key': 'message', 'type': 'str'}, - 'details': {'key': 'details', 'type': '[SearchError]'}, + 'key_name': {'key': 'keyVaultKeyName', 'type': 'str'}, + 'key_version': {'key': 'keyVaultKeyVersion', 'type': 'str'}, + 'vault_uri': {'key': 'keyVaultUri', 'type': 'str'}, + 'access_credentials': {'key': 'accessCredentials', 'type': 'AzureActiveDirectoryApplicationCredentials'}, } def __init__( self, + *, + key_name: str, + key_version: str, + vault_uri: str, + access_credentials: Optional["AzureActiveDirectoryApplicationCredentials"] = None, **kwargs ): - super(SearchError, self).__init__(**kwargs) - self.code = None - self.message = None - self.details = None + super(SearchResourceEncryptionKey, self).__init__(**kwargs) + self.key_name = key_name + self.key_version = key_version + self.vault_uri = vault_uri + self.access_credentials = access_credentials -class SentimentSkill(Skill): +class SentimentSkill(SearchIndexerSkill): """Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1. All required parameters must be populated in order to send to Azure. @@ -4197,14 +4501,14 @@ class SentimentSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el', 'it', 'no', 'pl', 'pt-PT', - 'ru', 'es', 'sv', 'tr'. - :type default_language_code: str or ~search_service_client.models.SentimentSkillLanguage + Possible values include: "da", "nl", "en", "fi", "fr", "de", "el", "it", "no", "pl", "pt-PT", + "ru", "es", "sv", "tr". + :type default_language_code: str or ~azure.search.documents.models.SentimentSkillLanguage """ _validation = { @@ -4235,7 +4539,7 @@ def __init__( **kwargs ): super(SentimentSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.SentimentSkill' + self.odata_type: str = '#Microsoft.Skills.Text.SentimentSkill' self.default_language_code = default_language_code @@ -4245,19 +4549,19 @@ class ServiceCounters(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param document_counter: Required. Total number of documents across all indexes in the service. - :type document_counter: ~search_service_client.models.ResourceCounter + :type document_counter: ~azure.search.documents.models.ResourceCounter :param index_counter: Required. Total number of indexes. - :type index_counter: ~search_service_client.models.ResourceCounter + :type index_counter: ~azure.search.documents.models.ResourceCounter :param indexer_counter: Required. Total number of indexers. - :type indexer_counter: ~search_service_client.models.ResourceCounter + :type indexer_counter: ~azure.search.documents.models.ResourceCounter :param data_source_counter: Required. Total number of data sources. - :type data_source_counter: ~search_service_client.models.ResourceCounter + :type data_source_counter: ~azure.search.documents.models.ResourceCounter :param storage_size_counter: Required. Total size of used storage in bytes. - :type storage_size_counter: ~search_service_client.models.ResourceCounter + :type storage_size_counter: ~azure.search.documents.models.ResourceCounter :param synonym_map_counter: Required. Total number of synonym maps. - :type synonym_map_counter: ~search_service_client.models.ResourceCounter + :type synonym_map_counter: ~azure.search.documents.models.ResourceCounter :param skillset_counter: Required. Total number of skillsets. - :type skillset_counter: ~search_service_client.models.ResourceCounter + :type skillset_counter: ~azure.search.documents.models.ResourceCounter """ _validation = { @@ -4347,9 +4651,9 @@ class ServiceStatistics(msrest.serialization.Model): All required parameters must be populated in order to send to Azure. :param counters: Required. Service level resource counters. - :type counters: ~search_service_client.models.ServiceCounters + :type counters: ~azure.search.documents.models.ServiceCounters :param limits: Required. Service level general limits. - :type limits: ~search_service_client.models.ServiceLimits + :type limits: ~azure.search.documents.models.ServiceLimits """ _validation = { @@ -4374,7 +4678,7 @@ def __init__( self.limits = limits -class ShaperSkill(Skill): +class ShaperSkill(SearchIndexerSkill): """A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). All required parameters must be populated in order to send to Azure. @@ -4394,10 +4698,10 @@ class ShaperSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] """ _validation = { @@ -4426,7 +4730,7 @@ def __init__( **kwargs ): super(ShaperSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Util.ShaperSkill' + self.odata_type: str = '#Microsoft.Skills.Util.ShaperSkill' class ShingleTokenFilter(TokenFilter): @@ -4492,7 +4796,7 @@ def __init__( **kwargs ): super(ShingleTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.ShingleTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.ShingleTokenFilter' self.max_shingle_size = max_shingle_size self.min_shingle_size = min_shingle_size self.output_unigrams = output_unigrams @@ -4501,56 +4805,6 @@ def __init__( self.filter_token = filter_token -class Skillset(msrest.serialization.Model): - """A list of skills. - - All required parameters must be populated in order to send to Azure. - - :param name: Required. The name of the skillset. - :type name: str - :param description: Required. The description of the skillset. - :type description: str - :param skills: Required. A list of skills in the skillset. - :type skills: list[~search_service_client.models.Skill] - :param cognitive_services_account: Details about cognitive services to be used when running - skills. - :type cognitive_services_account: ~search_service_client.models.CognitiveServicesAccount - :param e_tag: The ETag of the skillset. - :type e_tag: str - """ - - _validation = { - 'name': {'required': True}, - 'description': {'required': True}, - 'skills': {'required': True}, - } - - _attribute_map = { - 'name': {'key': 'name', 'type': 'str'}, - 'description': {'key': 'description', 'type': 'str'}, - 'skills': {'key': 'skills', 'type': '[Skill]'}, - 'cognitive_services_account': {'key': 'cognitiveServices', 'type': 'CognitiveServicesAccount'}, - 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, - } - - def __init__( - self, - *, - name: str, - description: str, - skills: List["Skill"], - cognitive_services_account: Optional["CognitiveServicesAccount"] = None, - e_tag: Optional[str] = None, - **kwargs - ): - super(Skillset, self).__init__(**kwargs) - self.name = name - self.description = description - self.skills = skills - self.cognitive_services_account = cognitive_services_account - self.e_tag = e_tag - - class SnowballTokenFilter(TokenFilter): """A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. @@ -4563,11 +4817,11 @@ class SnowballTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'armenian', 'basque', - 'catalan', 'danish', 'dutch', 'english', 'finnish', 'french', 'german', 'german2', 'hungarian', - 'italian', 'kp', 'lovins', 'norwegian', 'porter', 'portuguese', 'romanian', 'russian', - 'spanish', 'swedish', 'turkish'. - :type language: str or ~search_service_client.models.SnowballTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "armenian", "basque", + "catalan", "danish", "dutch", "english", "finnish", "french", "german", "german2", "hungarian", + "italian", "kp", "lovins", "norwegian", "porter", "portuguese", "romanian", "russian", + "spanish", "swedish", "turkish". + :type language: str or ~azure.search.documents.models.SnowballTokenFilterLanguage """ _validation = { @@ -4590,7 +4844,7 @@ def __init__( **kwargs ): super(SnowballTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.SnowballTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.SnowballTokenFilter' self.language = language @@ -4626,12 +4880,12 @@ def __init__( **kwargs ): super(SoftDeleteColumnDeletionDetectionPolicy, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' + self.odata_type: str = '#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy' self.soft_delete_column_name = soft_delete_column_name self.soft_delete_marker_value = soft_delete_marker_value -class SplitSkill(Skill): +class SplitSkill(SearchIndexerSkill): """A skill to split a string into chunks of text. All required parameters must be populated in order to send to Azure. @@ -4651,16 +4905,16 @@ class SplitSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_language_code: A value indicating which language code to use. Default is en. - Possible values include: 'da', 'de', 'en', 'es', 'fi', 'fr', 'it', 'ko', 'pt'. - :type default_language_code: str or ~search_service_client.models.SplitSkillLanguage + Possible values include: "da", "de", "en", "es", "fi", "fr", "it", "ko", "pt". + :type default_language_code: str or ~azure.search.documents.models.SplitSkillLanguage :param text_split_mode: A value indicating which split mode to perform. Possible values - include: 'pages', 'sentences'. - :type text_split_mode: str or ~search_service_client.models.TextSplitMode + include: "pages", "sentences". + :type text_split_mode: str or ~azure.search.documents.models.TextSplitMode :param maximum_page_length: The desired maximum page length. Default is 10000. :type maximum_page_length: int """ @@ -4697,7 +4951,7 @@ def __init__( **kwargs ): super(SplitSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.SplitSkill' + self.odata_type: str = '#Microsoft.Skills.Text.SplitSkill' self.default_language_code = default_language_code self.text_split_mode = text_split_mode self.maximum_page_length = maximum_page_length @@ -4726,134 +4980,7 @@ def __init__( **kwargs ): super(SqlIntegratedChangeTrackingPolicy, self).__init__(**kwargs) - self.odata_type = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' - - -class StandardAnalyzer(Analyzer): - """Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the analyzer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the analyzer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - :param stopwords: A list of stopwords. - :type stopwords: list[str] - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - 'stopwords': {'key': 'stopwords', 'type': '[str]'}, - } - - def __init__( - self, - *, - name: str, - max_token_length: Optional[int] = 255, - stopwords: Optional[List[str]] = None, - **kwargs - ): - super(StandardAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardAnalyzer' - self.max_token_length = max_token_length - self.stopwords = stopwords - - -class StandardTokenizer(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - *, - name: str, - max_token_length: Optional[int] = 255, - **kwargs - ): - super(StandardTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizer' - self.max_token_length = max_token_length - - -class StandardTokenizerV2(Tokenizer): - """Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. - - All required parameters must be populated in order to send to Azure. - - :param odata_type: Required. Identifies the concrete type of the tokenizer.Constant filled by - server. - :type odata_type: str - :param name: Required. The name of the tokenizer. It must only contain letters, digits, spaces, - dashes or underscores, can only start and end with alphanumeric characters, and is limited to - 128 characters. - :type name: str - :param max_token_length: The maximum token length. Default is 255. Tokens longer than the - maximum length are split. The maximum token length that can be used is 300 characters. - :type max_token_length: int - """ - - _validation = { - 'odata_type': {'required': True}, - 'name': {'required': True}, - 'max_token_length': {'maximum': 300}, - } - - _attribute_map = { - 'odata_type': {'key': '@odata\\.type', 'type': 'str'}, - 'name': {'key': 'name', 'type': 'str'}, - 'max_token_length': {'key': 'maxTokenLength', 'type': 'int'}, - } - - def __init__( - self, - *, - name: str, - max_token_length: Optional[int] = 255, - **kwargs - ): - super(StandardTokenizerV2, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StandardTokenizerV2' - self.max_token_length = max_token_length + self.odata_type: str = '#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy' class StemmerOverrideTokenFilter(TokenFilter): @@ -4893,7 +5020,7 @@ def __init__( **kwargs ): super(StemmerOverrideTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.StemmerOverrideTokenFilter' self.rules = rules @@ -4909,16 +5036,16 @@ class StemmerTokenFilter(TokenFilter): spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. :type name: str - :param language: Required. The language to use. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'dutchKp', - 'english', 'lightEnglish', 'minimalEnglish', 'possessiveEnglish', 'porter2', 'lovins', - 'finnish', 'lightFinnish', 'french', 'lightFrench', 'minimalFrench', 'galician', - 'minimalGalician', 'german', 'german2', 'lightGerman', 'minimalGerman', 'greek', 'hindi', - 'hungarian', 'lightHungarian', 'indonesian', 'irish', 'italian', 'lightItalian', 'sorani', - 'latvian', 'norwegian', 'lightNorwegian', 'minimalNorwegian', 'lightNynorsk', 'minimalNynorsk', - 'portuguese', 'lightPortuguese', 'minimalPortuguese', 'portugueseRslp', 'romanian', 'russian', - 'lightRussian', 'spanish', 'lightSpanish', 'swedish', 'lightSwedish', 'turkish'. - :type language: str or ~search_service_client.models.StemmerTokenFilterLanguage + :param language: Required. The language to use. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "dutchKp", + "english", "lightEnglish", "minimalEnglish", "possessiveEnglish", "porter2", "lovins", + "finnish", "lightFinnish", "french", "lightFrench", "minimalFrench", "galician", + "minimalGalician", "german", "german2", "lightGerman", "minimalGerman", "greek", "hindi", + "hungarian", "lightHungarian", "indonesian", "irish", "italian", "lightItalian", "sorani", + "latvian", "norwegian", "lightNorwegian", "minimalNorwegian", "lightNynorsk", "minimalNynorsk", + "portuguese", "lightPortuguese", "minimalPortuguese", "portugueseRslp", "romanian", "russian", + "lightRussian", "spanish", "lightSpanish", "swedish", "lightSwedish", "turkish". + :type language: str or ~azure.search.documents.models.StemmerTokenFilterLanguage """ _validation = { @@ -4941,11 +5068,11 @@ def __init__( **kwargs ): super(StemmerTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StemmerTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.StemmerTokenFilter' self.language = language -class StopAnalyzer(Analyzer): +class StopAnalyzer(LexicalAnalyzer): """Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -4980,7 +5107,7 @@ def __init__( **kwargs ): super(StopAnalyzer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StopAnalyzer' + self.odata_type: str = '#Microsoft.Azure.Search.StopAnalyzer' self.stopwords = stopwords @@ -5000,12 +5127,12 @@ class StopwordsTokenFilter(TokenFilter): both be set. :type stopwords: list[str] :param stopwords_list: A predefined list of stopwords to use. This property and the stopwords - property cannot both be set. Default is English. Possible values include: 'arabic', 'armenian', - 'basque', 'brazilian', 'bulgarian', 'catalan', 'czech', 'danish', 'dutch', 'english', - 'finnish', 'french', 'galician', 'german', 'greek', 'hindi', 'hungarian', 'indonesian', - 'irish', 'italian', 'latvian', 'norwegian', 'persian', 'portuguese', 'romanian', 'russian', - 'sorani', 'spanish', 'swedish', 'thai', 'turkish'. - :type stopwords_list: str or ~search_service_client.models.StopwordsList + property cannot both be set. Default is English. Possible values include: "arabic", "armenian", + "basque", "brazilian", "bulgarian", "catalan", "czech", "danish", "dutch", "english", + "finnish", "french", "galician", "german", "greek", "hindi", "hungarian", "indonesian", + "irish", "italian", "latvian", "norwegian", "persian", "portuguese", "romanian", "russian", + "sorani", "spanish", "swedish", "thai", "turkish". + :type stopwords_list: str or ~azure.search.documents.models.StopwordsList :param ignore_case: A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. :type ignore_case: bool @@ -5039,7 +5166,7 @@ def __init__( **kwargs ): super(StopwordsTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.StopwordsTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.StopwordsTokenFilter' self.stopwords = stopwords self.stopwords_list = stopwords_list self.ignore_case = ignore_case @@ -5112,7 +5239,7 @@ class SynonymMap(msrest.serialization.Model): needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. - :type encryption_key: ~search_service_client.models.EncryptionKey + :type encryption_key: ~azure.search.documents.models.SearchResourceEncryptionKey :param e_tag: The ETag of the synonym map. :type e_tag: str """ @@ -5127,7 +5254,7 @@ class SynonymMap(msrest.serialization.Model): 'name': {'key': 'name', 'type': 'str'}, 'format': {'key': 'format', 'type': 'str'}, 'synonyms': {'key': 'synonyms', 'type': 'str'}, - 'encryption_key': {'key': 'encryptionKey', 'type': 'EncryptionKey'}, + 'encryption_key': {'key': 'encryptionKey', 'type': 'SearchResourceEncryptionKey'}, 'e_tag': {'key': '@odata\\.etag', 'type': 'str'}, } @@ -5138,7 +5265,7 @@ def __init__( *, name: str, synonyms: str, - encryption_key: Optional["EncryptionKey"] = None, + encryption_key: Optional["SearchResourceEncryptionKey"] = None, e_tag: Optional[str] = None, **kwargs ): @@ -5204,7 +5331,7 @@ def __init__( **kwargs ): super(SynonymTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.SynonymTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.SynonymTokenFilter' self.synonyms = synonyms self.ignore_case = ignore_case self.expand = expand @@ -5224,11 +5351,11 @@ class TagScoringFunction(ScoringFunction): 1.0. :type boost: float :param interpolation: A value indicating how boosting will be interpolated across document - scores; defaults to "Linear". Possible values include: 'linear', 'constant', 'quadratic', - 'logarithmic'. - :type interpolation: str or ~search_service_client.models.ScoringFunctionInterpolation + scores; defaults to "Linear". Possible values include: "linear", "constant", "quadratic", + "logarithmic". + :type interpolation: str or ~azure.search.documents.models.ScoringFunctionInterpolation :param parameters: Required. Parameter values for the tag scoring function. - :type parameters: ~search_service_client.models.TagScoringParameters + :type parameters: ~azure.search.documents.models.TagScoringParameters """ _validation = { @@ -5256,7 +5383,7 @@ def __init__( **kwargs ): super(TagScoringFunction, self).__init__(field_name=field_name, boost=boost, interpolation=interpolation, **kwargs) - self.type = 'tag' + self.type: str = 'tag' self.parameters = parameters @@ -5288,7 +5415,7 @@ def __init__( self.tags_parameter = tags_parameter -class TextTranslationSkill(Skill): +class TextTranslationSkill(SearchIndexerSkill): """A skill to translate text from one language to another. All required parameters must be populated in order to send to Azure. @@ -5308,37 +5435,37 @@ class TextTranslationSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param default_to_language_code: Required. The language code to translate documents into for - documents that don't specify the to language explicitly. Possible values include: 'af', 'ar', - 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', - 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', - 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', - 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', - 'vi', 'cy', 'yua'. + documents that don't specify the to language explicitly. Possible values include: "af", "ar", + "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", + "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", + "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", + "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", + "vi", "cy", "yua". :type default_to_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param default_from_language_code: The language code to translate documents from for documents - that don't specify the from language explicitly. Possible values include: 'af', 'ar', 'bn', - 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', 'fj', 'fil', - 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', 'sw', 'tlh', - 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', - 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', - 'yua'. + that don't specify the from language explicitly. Possible values include: "af", "ar", "bn", + "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", "fj", "fil", + "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", "sw", "tlh", + "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", "sm", "sr-Cyrl", + "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", "ur", "vi", "cy", + "yua". :type default_from_language_code: str or - ~search_service_client.models.TextTranslationSkillLanguage + ~azure.search.documents.models.TextTranslationSkillLanguage :param suggested_from: The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the - automatic language detection is unsuccessful. Default is en. Possible values include: 'af', - 'ar', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'nl', 'en', 'et', - 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'it', 'ja', - 'sw', 'tlh', 'ko', 'lv', 'lt', 'mg', 'ms', 'mt', 'nb', 'fa', 'pl', 'pt', 'otq', 'ro', 'ru', - 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', - 'ur', 'vi', 'cy', 'yua'. - :type suggested_from: str or ~search_service_client.models.TextTranslationSkillLanguage + automatic language detection is unsuccessful. Default is en. Possible values include: "af", + "ar", "bn", "bs", "bg", "yue", "ca", "zh-Hans", "zh-Hant", "hr", "cs", "da", "nl", "en", "et", + "fj", "fil", "fi", "fr", "de", "el", "ht", "he", "hi", "mww", "hu", "is", "id", "it", "ja", + "sw", "tlh", "ko", "lv", "lt", "mg", "ms", "mt", "nb", "fa", "pl", "pt", "otq", "ro", "ru", + "sm", "sr-Cyrl", "sr-Latn", "sk", "sl", "es", "sv", "ty", "ta", "te", "th", "to", "tr", "uk", + "ur", "vi", "cy", "yua". + :type suggested_from: str or ~azure.search.documents.models.TextTranslationSkillLanguage """ _validation = { @@ -5374,7 +5501,7 @@ def __init__( **kwargs ): super(TextTranslationSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Text.TranslationSkill' + self.odata_type: str = '#Microsoft.Skills.Text.TranslationSkill' self.default_to_language_code = default_to_language_code self.default_from_language_code = default_from_language_code self.suggested_from = suggested_from @@ -5408,51 +5535,6 @@ def __init__( self.weights = weights -class TokenInfo(msrest.serialization.Model): - """Information about a token returned by an analyzer. - - Variables are only populated by the server, and will be ignored when sending a request. - - All required parameters must be populated in order to send to Azure. - - :ivar token: Required. The token returned by the analyzer. - :vartype token: str - :ivar start_offset: Required. The index of the first character of the token in the input text. - :vartype start_offset: int - :ivar end_offset: Required. The index of the last character of the token in the input text. - :vartype end_offset: int - :ivar position: Required. The position of the token in the input text relative to other tokens. - The first token in the input text has position 0, the next has position 1, and so on. Depending - on the analyzer used, some tokens might have the same position, for example if they are - synonyms of each other. - :vartype position: int - """ - - _validation = { - 'token': {'required': True, 'readonly': True}, - 'start_offset': {'required': True, 'readonly': True}, - 'end_offset': {'required': True, 'readonly': True}, - 'position': {'required': True, 'readonly': True}, - } - - _attribute_map = { - 'token': {'key': 'token', 'type': 'str'}, - 'start_offset': {'key': 'startOffset', 'type': 'int'}, - 'end_offset': {'key': 'endOffset', 'type': 'int'}, - 'position': {'key': 'position', 'type': 'int'}, - } - - def __init__( - self, - **kwargs - ): - super(TokenInfo, self).__init__(**kwargs) - self.token = None - self.start_offset = None - self.end_offset = None - self.position = None - - class TruncateTokenFilter(TokenFilter): """Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. @@ -5489,11 +5571,11 @@ def __init__( **kwargs ): super(TruncateTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.TruncateTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.TruncateTokenFilter' self.length = length -class UaxUrlEmailTokenizer(Tokenizer): +class UaxUrlEmailTokenizer(LexicalTokenizer): """Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. All required parameters must be populated in order to send to Azure. @@ -5530,7 +5612,7 @@ def __init__( **kwargs ): super(UaxUrlEmailTokenizer, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' + self.odata_type: str = '#Microsoft.Azure.Search.UaxUrlEmailTokenizer' self.max_token_length = max_token_length @@ -5570,11 +5652,11 @@ def __init__( **kwargs ): super(UniqueTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.UniqueTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.UniqueTokenFilter' self.only_on_same_position = only_on_same_position -class WebApiSkill(Skill): +class WebApiSkill(SearchIndexerSkill): """A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. All required parameters must be populated in order to send to Azure. @@ -5594,10 +5676,10 @@ class WebApiSkill(Skill): :type context: str :param inputs: Required. Inputs of the skills could be a column in the source data set, or the output of an upstream skill. - :type inputs: list[~search_service_client.models.InputFieldMappingEntry] + :type inputs: list[~azure.search.documents.models.InputFieldMappingEntry] :param outputs: Required. The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. - :type outputs: list[~search_service_client.models.OutputFieldMappingEntry] + :type outputs: list[~azure.search.documents.models.OutputFieldMappingEntry] :param uri: Required. The url for the Web API. :type uri: str :param http_headers: The headers required to make the http request. @@ -5652,7 +5734,7 @@ def __init__( **kwargs ): super(WebApiSkill, self).__init__(name=name, description=description, context=context, inputs=inputs, outputs=outputs, **kwargs) - self.odata_type = '#Microsoft.Skills.Custom.WebApiSkill' + self.odata_type: str = '#Microsoft.Skills.Custom.WebApiSkill' self.uri = uri self.http_headers = http_headers self.http_method = http_method @@ -5742,7 +5824,7 @@ def __init__( **kwargs ): super(WordDelimiterTokenFilter, self).__init__(name=name, **kwargs) - self.odata_type = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' + self.odata_type: str = '#Microsoft.Azure.Search.WordDelimiterTokenFilter' self.generate_word_parts = generate_word_parts self.generate_number_parts = generate_number_parts self.catenate_words = catenate_words diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py index 2b17813411e2..ee355eec0897 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/models/_search_service_client_enums.py @@ -1,44 +1,125 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from enum import Enum -class DataSourceType(str, Enum): - """Defines the type of a datasource. +class CjkBigramTokenFilterScripts(str, Enum): + """Scripts that can be ignored by CjkBigramTokenFilter. + """ + + han = "han" #: Ignore Han script when forming bigrams of CJK terms. + hiragana = "hiragana" #: Ignore Hiragana script when forming bigrams of CJK terms. + katakana = "katakana" #: Ignore Katakana script when forming bigrams of CJK terms. + hangul = "hangul" #: Ignore Hangul script when forming bigrams of CJK terms. + +class EdgeNGramTokenFilterSide(str, Enum): + """Specifies which side of the input an n-gram should be generated from. + """ + + front = "front" #: Specifies that the n-gram should be generated from the front of the input. + back = "back" #: Specifies that the n-gram should be generated from the back of the input. + +class EntityCategory(str, Enum): + """A string indicating what entity categories to return. """ - azure_sql = "azuresql" - cosmos_db = "cosmosdb" - azure_blob = "azureblob" - azure_table = "azuretable" - my_sql = "mysql" + location = "location" #: Entities describing a physical location. + organization = "organization" #: Entities describing an organization. + person = "person" #: Entities describing a person. + quantity = "quantity" #: Entities describing a quantity. + datetime = "datetime" #: Entities describing a date and time. + url = "url" #: Entities describing a URL. + email = "email" #: Entities describing an email address. + +class EntityRecognitionSkillLanguage(str, Enum): + """The language codes supported for input text by EntityRecognitionSkill. + """ + + ar = "ar" #: Arabic. + cs = "cs" #: Czech. + zh_hans = "zh-Hans" #: Chinese-Simplified. + zh_hant = "zh-Hant" #: Chinese-Traditional. + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + el = "el" #: Greek. + hu = "hu" #: Hungarian. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + pt_br = "pt-BR" #: Portuguese (Brazil). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. + tr = "tr" #: Turkish. + +class ImageAnalysisSkillLanguage(str, Enum): + """The language codes supported for input by ImageAnalysisSkill. + """ + + en = "en" #: English. + es = "es" #: Spanish. + ja = "ja" #: Japanese. + pt = "pt" #: Portuguese. + zh = "zh" #: Chinese. + +class ImageDetail(str, Enum): + """A string indicating which domain-specific details to return. + """ + + celebrities = "celebrities" #: Details recognized as celebrities. + landmarks = "landmarks" #: Details recognized as landmarks. class IndexerExecutionStatus(str, Enum): """Represents the status of an individual indexer execution. """ - transient_failure = "transientFailure" - success = "success" - in_progress = "inProgress" - reset = "reset" + transient_failure = "transientFailure" #: An indexer invocation has failed, but the failure may be transient. Indexer invocations will continue per schedule. + success = "success" #: Indexer execution completed successfully. + in_progress = "inProgress" #: Indexer execution is in progress. + reset = "reset" #: Indexer has been reset. -class DataType(str, Enum): - """Defines the data type of a field in a search index. +class IndexerStatus(str, Enum): + """Represents the overall indexer status. + """ + + unknown = "unknown" #: Indicates that the indexer is in an unknown state. + error = "error" #: Indicates that the indexer experienced an error that cannot be corrected without human intervention. + running = "running" #: Indicates that the indexer is running normally. + +class KeyPhraseExtractionSkillLanguage(str, Enum): + """The language codes supported for input text by KeyPhraseExtractionSkill. """ - edm_string = "Edm.String" - edm_int32 = "Edm.Int32" - edm_int64 = "Edm.Int64" - edm_double = "Edm.Double" - edm_boolean = "Edm.Boolean" - edm_date_time_offset = "Edm.DateTimeOffset" - edm_geography_point = "Edm.GeographyPoint" - edm_complex_type = "Edm.ComplexType" + da = "da" #: Danish. + nl = "nl" #: Dutch. + en = "en" #: English. + fi = "fi" #: Finnish. + fr = "fr" #: French. + de = "de" #: German. + it = "it" #: Italian. + ja = "ja" #: Japanese. + ko = "ko" #: Korean. + no = "no" #: Norwegian (Bokmaal). + pl = "pl" #: Polish. + pt = "pt-PT" #: Portuguese (Portugal). + pt_br = "pt-BR" #: Portuguese (Brazil). + ru = "ru" #: Russian. + es = "es" #: Spanish. + sv = "sv" #: Swedish. -class AnalyzerName(str, Enum): +class LexicalAnalyzerName(str, Enum): """Defines the names of all text analyzers supported by Azure Cognitive Search. """ @@ -136,116 +217,7 @@ class AnalyzerName(str, Enum): stop = "stop" #: Divides text at non-letters; Applies the lowercase and stopword token filters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopAnalyzer.html. whitespace = "whitespace" #: An analyzer that uses the whitespace tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceAnalyzer.html. -class ScoringFunctionInterpolation(str, Enum): - """Defines the function used to interpolate score boosting across a range of documents. - """ - - linear = "linear" - constant = "constant" - quadratic = "quadratic" - logarithmic = "logarithmic" - -class ScoringFunctionAggregation(str, Enum): - """Defines the aggregation function used to combine the results of all the scoring functions in a - scoring profile. - """ - - sum = "sum" - average = "average" - minimum = "minimum" - maximum = "maximum" - first_matching = "firstMatching" - -class TokenFilterName(str, Enum): - """Defines the names of all token filters supported by Azure Cognitive Search. - """ - - arabic_normalization = "arabic_normalization" #: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html. - apostrophe = "apostrophe" #: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html. - ascii_folding = "asciifolding" #: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html. - cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from StandardTokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. - cjk_width = "cjk_width" #: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html. - classic = "classic" #: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html. - common_gram = "common_grams" #: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html. - edge_n_gram = "edgeNGram_v2" #: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html. - elision = "elision" #: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html. - german_normalization = "german_normalization" #: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html. - hindi_normalization = "hindi_normalization" #: Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html. - indic_normalization = "indic_normalization" #: Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html. - keyword_repeat = "keyword_repeat" #: Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html. - k_stem = "kstem" #: A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html. - length = "length" #: Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html. - limit = "limit" #: Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html. - lowercase = "lowercase" #: Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm. - n_gram = "nGram_v2" #: Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html. - persian_normalization = "persian_normalization" #: Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html. - phonetic = "phonetic" #: Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html. - porter_stem = "porter_stem" #: Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer. - reverse = "reverse" #: Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html. - scandinavian_normalization = "scandinavian_normalization" #: Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html. - scandinavian_folding_normalization = "scandinavian_folding" #: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html. - shingle = "shingle" #: Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html. - snowball = "snowball" #: A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html. - sorani_normalization = "sorani_normalization" #: Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html. - stemmer = "stemmer" #: Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters. - stopwords = "stopwords" #: Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html. - trim = "trim" #: Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html. - truncate = "truncate" #: Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html. - unique = "unique" #: Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html. - uppercase = "uppercase" #: Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html. - word_delimiter = "word_delimiter" #: Splits words into subwords and performs optional transformations on subword groups. - -class TokenCharacterKind(str, Enum): - """Represents classes of characters on which a token filter can operate. - """ - - letter = "letter" - digit = "digit" - whitespace = "whitespace" - punctuation = "punctuation" - symbol = "symbol" - -class CjkBigramTokenFilterScripts(str, Enum): - """Scripts that can be ignored by CjkBigramTokenFilter. - """ - - han = "han" - hiragana = "hiragana" - katakana = "katakana" - hangul = "hangul" - -class VisualFeature(str, Enum): - """The strings indicating what visual feature types to return. - """ - - adult = "adult" - brands = "brands" - categories = "categories" - description = "description" - faces = "faces" - objects = "objects" - tags = "tags" - -class ImageDetail(str, Enum): - """A string indicating which domain-specific details to return. - """ - - celebrities = "celebrities" - landmarks = "landmarks" - -class EntityCategory(str, Enum): - """A string indicating what entity categories to return. - """ - - location = "location" - organization = "organization" - person = "person" - quantity = "quantity" - datetime = "datetime" - url = "url" - email = "email" - -class TokenizerName(str, Enum): +class LexicalTokenizerName(str, Enum): """Defines the names of all tokenizers supported by Azure Cognitive Search. """ @@ -263,40 +235,102 @@ class TokenizerName(str, Enum): uax_url_email = "uax_url_email" #: Tokenizes urls and emails as one token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/UAX29URLEmailTokenizer.html. whitespace = "whitespace" #: Divides text at whitespace. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/WhitespaceTokenizer.html. -class RegexFlags(str, Enum): - """Defines flags that can be combined to control how regular expressions are used in the pattern - analyzer and pattern tokenizer. +class MicrosoftStemmingTokenizerLanguage(str, Enum): + """Lists the languages supported by the Microsoft language stemming tokenizer. """ - canon_eq = "CANON_EQ" - case_insensitive = "CASE_INSENSITIVE" - comments = "COMMENTS" - dotall = "DOTALL" - literal = "LITERAL" - multiline = "MULTILINE" - unicode_case = "UNICODE_CASE" - unix_lines = "UNIX_LINES" + arabic = "arabic" #: Selects the Microsoft stemming tokenizer for Arabic. + bangla = "bangla" #: Selects the Microsoft stemming tokenizer for Bangla. + bulgarian = "bulgarian" #: Selects the Microsoft stemming tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Microsoft stemming tokenizer for Catalan. + croatian = "croatian" #: Selects the Microsoft stemming tokenizer for Croatian. + czech = "czech" #: Selects the Microsoft stemming tokenizer for Czech. + danish = "danish" #: Selects the Microsoft stemming tokenizer for Danish. + dutch = "dutch" #: Selects the Microsoft stemming tokenizer for Dutch. + english = "english" #: Selects the Microsoft stemming tokenizer for English. + estonian = "estonian" #: Selects the Microsoft stemming tokenizer for Estonian. + finnish = "finnish" #: Selects the Microsoft stemming tokenizer for Finnish. + french = "french" #: Selects the Microsoft stemming tokenizer for French. + german = "german" #: Selects the Microsoft stemming tokenizer for German. + greek = "greek" #: Selects the Microsoft stemming tokenizer for Greek. + gujarati = "gujarati" #: Selects the Microsoft stemming tokenizer for Gujarati. + hebrew = "hebrew" #: Selects the Microsoft stemming tokenizer for Hebrew. + hindi = "hindi" #: Selects the Microsoft stemming tokenizer for Hindi. + hungarian = "hungarian" #: Selects the Microsoft stemming tokenizer for Hungarian. + icelandic = "icelandic" #: Selects the Microsoft stemming tokenizer for Icelandic. + indonesian = "indonesian" #: Selects the Microsoft stemming tokenizer for Indonesian. + italian = "italian" #: Selects the Microsoft stemming tokenizer for Italian. + kannada = "kannada" #: Selects the Microsoft stemming tokenizer for Kannada. + latvian = "latvian" #: Selects the Microsoft stemming tokenizer for Latvian. + lithuanian = "lithuanian" #: Selects the Microsoft stemming tokenizer for Lithuanian. + malay = "malay" #: Selects the Microsoft stemming tokenizer for Malay. + malayalam = "malayalam" #: Selects the Microsoft stemming tokenizer for Malayalam. + marathi = "marathi" #: Selects the Microsoft stemming tokenizer for Marathi. + norwegian_bokmaal = "norwegianBokmaal" #: Selects the Microsoft stemming tokenizer for Norwegian (Bokmål). + polish = "polish" #: Selects the Microsoft stemming tokenizer for Polish. + portuguese = "portuguese" #: Selects the Microsoft stemming tokenizer for Portuguese. + portuguese_brazilian = "portugueseBrazilian" #: Selects the Microsoft stemming tokenizer for Portuguese (Brazil). + punjabi = "punjabi" #: Selects the Microsoft stemming tokenizer for Punjabi. + romanian = "romanian" #: Selects the Microsoft stemming tokenizer for Romanian. + russian = "russian" #: Selects the Microsoft stemming tokenizer for Russian. + serbian_cyrillic = "serbianCyrillic" #: Selects the Microsoft stemming tokenizer for Serbian (Cyrillic). + serbian_latin = "serbianLatin" #: Selects the Microsoft stemming tokenizer for Serbian (Latin). + slovak = "slovak" #: Selects the Microsoft stemming tokenizer for Slovak. + slovenian = "slovenian" #: Selects the Microsoft stemming tokenizer for Slovenian. + spanish = "spanish" #: Selects the Microsoft stemming tokenizer for Spanish. + swedish = "swedish" #: Selects the Microsoft stemming tokenizer for Swedish. + tamil = "tamil" #: Selects the Microsoft stemming tokenizer for Tamil. + telugu = "telugu" #: Selects the Microsoft stemming tokenizer for Telugu. + turkish = "turkish" #: Selects the Microsoft stemming tokenizer for Turkish. + ukrainian = "ukrainian" #: Selects the Microsoft stemming tokenizer for Ukrainian. + urdu = "urdu" #: Selects the Microsoft stemming tokenizer for Urdu. -class KeyPhraseExtractionSkillLanguage(str, Enum): - """The language codes supported for input text by KeyPhraseExtractionSkill. +class MicrosoftTokenizerLanguage(str, Enum): + """Lists the languages supported by the Microsoft language tokenizer. """ - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - it = "it" #: Italian. - ja = "ja" #: Japanese. - ko = "ko" #: Korean. - no = "no" #: Norwegian (Bokmaal). - pl = "pl" #: Polish. - pt = "pt-PT" #: Portuguese (Portugal). - pt_br = "pt-BR" #: Portuguese (Brazil). - ru = "ru" #: Russian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. + bangla = "bangla" #: Selects the Microsoft tokenizer for Bangla. + bulgarian = "bulgarian" #: Selects the Microsoft tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Microsoft tokenizer for Catalan. + chinese_simplified = "chineseSimplified" #: Selects the Microsoft tokenizer for Chinese (Simplified). + chinese_traditional = "chineseTraditional" #: Selects the Microsoft tokenizer for Chinese (Traditional). + croatian = "croatian" #: Selects the Microsoft tokenizer for Croatian. + czech = "czech" #: Selects the Microsoft tokenizer for Czech. + danish = "danish" #: Selects the Microsoft tokenizer for Danish. + dutch = "dutch" #: Selects the Microsoft tokenizer for Dutch. + english = "english" #: Selects the Microsoft tokenizer for English. + french = "french" #: Selects the Microsoft tokenizer for French. + german = "german" #: Selects the Microsoft tokenizer for German. + greek = "greek" #: Selects the Microsoft tokenizer for Greek. + gujarati = "gujarati" #: Selects the Microsoft tokenizer for Gujarati. + hindi = "hindi" #: Selects the Microsoft tokenizer for Hindi. + icelandic = "icelandic" #: Selects the Microsoft tokenizer for Icelandic. + indonesian = "indonesian" #: Selects the Microsoft tokenizer for Indonesian. + italian = "italian" #: Selects the Microsoft tokenizer for Italian. + japanese = "japanese" #: Selects the Microsoft tokenizer for Japanese. + kannada = "kannada" #: Selects the Microsoft tokenizer for Kannada. + korean = "korean" #: Selects the Microsoft tokenizer for Korean. + malay = "malay" #: Selects the Microsoft tokenizer for Malay. + malayalam = "malayalam" #: Selects the Microsoft tokenizer for Malayalam. + marathi = "marathi" #: Selects the Microsoft tokenizer for Marathi. + norwegian_bokmaal = "norwegianBokmaal" #: Selects the Microsoft tokenizer for Norwegian (Bokmål). + polish = "polish" #: Selects the Microsoft tokenizer for Polish. + portuguese = "portuguese" #: Selects the Microsoft tokenizer for Portuguese. + portuguese_brazilian = "portugueseBrazilian" #: Selects the Microsoft tokenizer for Portuguese (Brazil). + punjabi = "punjabi" #: Selects the Microsoft tokenizer for Punjabi. + romanian = "romanian" #: Selects the Microsoft tokenizer for Romanian. + russian = "russian" #: Selects the Microsoft tokenizer for Russian. + serbian_cyrillic = "serbianCyrillic" #: Selects the Microsoft tokenizer for Serbian (Cyrillic). + serbian_latin = "serbianLatin" #: Selects the Microsoft tokenizer for Serbian (Latin). + slovenian = "slovenian" #: Selects the Microsoft tokenizer for Slovenian. + spanish = "spanish" #: Selects the Microsoft tokenizer for Spanish. + swedish = "swedish" #: Selects the Microsoft tokenizer for Swedish. + tamil = "tamil" #: Selects the Microsoft tokenizer for Tamil. + telugu = "telugu" #: Selects the Microsoft tokenizer for Telugu. + thai = "thai" #: Selects the Microsoft tokenizer for Thai. + ukrainian = "ukrainian" #: Selects the Microsoft tokenizer for Ukrainian. + urdu = "urdu" #: Selects the Microsoft tokenizer for Urdu. + vietnamese = "vietnamese" #: Selects the Microsoft tokenizer for Vietnamese. class OcrSkillLanguage(str, Enum): """The language codes supported for input by OcrSkill. @@ -329,43 +363,78 @@ class OcrSkillLanguage(str, Enum): sr_latn = "sr-Latn" #: Serbian (Latin, Serbia). sk = "sk" #: Slovak. -class ImageAnalysisSkillLanguage(str, Enum): - """The language codes supported for input by ImageAnalysisSkill. +class PhoneticEncoder(str, Enum): + """Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. """ - en = "en" #: English. - es = "es" #: Spanish. - ja = "ja" #: Japanese. - pt = "pt" #: Portuguese. - zh = "zh" #: Chinese. + metaphone = "metaphone" #: Encodes a token into a Metaphone value. + double_metaphone = "doubleMetaphone" #: Encodes a token into a double metaphone value. + soundex = "soundex" #: Encodes a token into a Soundex value. + refined_soundex = "refinedSoundex" #: Encodes a token into a Refined Soundex value. + caverphone1 = "caverphone1" #: Encodes a token into a Caverphone 1.0 value. + caverphone2 = "caverphone2" #: Encodes a token into a Caverphone 2.0 value. + cologne = "cologne" #: Encodes a token into a Cologne Phonetic value. + nysiis = "nysiis" #: Encodes a token into a NYSIIS value. + koelner_phonetik = "koelnerPhonetik" #: Encodes a token using the Kölner Phonetik algorithm. + haase_phonetik = "haasePhonetik" #: Encodes a token using the Haase refinement of the Kölner Phonetik algorithm. + beider_morse = "beiderMorse" #: Encodes a token into a Beider-Morse value. -class EntityRecognitionSkillLanguage(str, Enum): - """The language codes supported for input text by EntityRecognitionSkill. +class RegexFlags(str, Enum): + """Defines flags that can be combined to control how regular expressions are used in the pattern + analyzer and pattern tokenizer. """ - ar = "ar" #: Arabic. - cs = "cs" #: Czech. - zh_hans = "zh-Hans" #: Chinese-Simplified. - zh_hant = "zh-Hant" #: Chinese-Traditional. - da = "da" #: Danish. - nl = "nl" #: Dutch. - en = "en" #: English. - fi = "fi" #: Finnish. - fr = "fr" #: French. - de = "de" #: German. - el = "el" #: Greek. - hu = "hu" #: Hungarian. - it = "it" #: Italian. - ja = "ja" #: Japanese. - ko = "ko" #: Korean. - no = "no" #: Norwegian (Bokmaal). - pl = "pl" #: Polish. - pt = "pt-PT" #: Portuguese (Portugal). - pt_br = "pt-BR" #: Portuguese (Brazil). - ru = "ru" #: Russian. - es = "es" #: Spanish. - sv = "sv" #: Swedish. - tr = "tr" #: Turkish. + canon_eq = "CANON_EQ" #: Enables canonical equivalence. + case_insensitive = "CASE_INSENSITIVE" #: Enables case-insensitive matching. + comments = "COMMENTS" #: Permits whitespace and comments in the pattern. + dot_all = "DOTALL" #: Enables dotall mode. + literal = "LITERAL" #: Enables literal parsing of the pattern. + multiline = "MULTILINE" #: Enables multiline mode. + unicode_case = "UNICODE_CASE" #: Enables Unicode-aware case folding. + unix_lines = "UNIX_LINES" #: Enables Unix lines mode. + +class ScoringFunctionAggregation(str, Enum): + """Defines the aggregation function used to combine the results of all the scoring functions in a + scoring profile. + """ + + sum = "sum" #: Boost scores by the sum of all scoring function results. + average = "average" #: Boost scores by the average of all scoring function results. + minimum = "minimum" #: Boost scores by the minimum of all scoring function results. + maximum = "maximum" #: Boost scores by the maximum of all scoring function results. + first_matching = "firstMatching" #: Boost scores using the first applicable scoring function in the scoring profile. + +class ScoringFunctionInterpolation(str, Enum): + """Defines the function used to interpolate score boosting across a range of documents. + """ + + linear = "linear" #: Boosts scores by a linearly decreasing amount. This is the default interpolation for scoring functions. + constant = "constant" #: Boosts scores by a constant factor. + quadratic = "quadratic" #: Boosts scores by an amount that decreases quadratically. Boosts decrease slowly for higher scores, and more quickly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + logarithmic = "logarithmic" #: Boosts scores by an amount that decreases logarithmically. Boosts decrease quickly for higher scores, and more slowly as the scores decrease. This interpolation option is not allowed in tag scoring functions. + +class SearchFieldDataType(str, Enum): + """Defines the data type of a field in a search index. + """ + + string = "Edm.String" #: Indicates that a field contains a string. + int32 = "Edm.Int32" #: Indicates that a field contains a 32-bit signed integer. + int64 = "Edm.Int64" #: Indicates that a field contains a 64-bit signed integer. + double = "Edm.Double" #: Indicates that a field contains an IEEE double-precision floating point number. + boolean = "Edm.Boolean" #: Indicates that a field contains a Boolean value (true or false). + date_time_offset = "Edm.DateTimeOffset" #: Indicates that a field contains a date/time value, including timezone information. + geography_point = "Edm.GeographyPoint" #: Indicates that a field contains a geo-location in terms of longitude and latitude. + complex = "Edm.ComplexType" #: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. + +class SearchIndexerDataSourceType(str, Enum): + """Defines the type of a datasource. + """ + + azure_sql = "azuresql" #: Indicates an Azure SQL datasource. + cosmos_db = "cosmosdb" #: Indicates a CosmosDB datasource. + azure_blob = "azureblob" #: Indicates a Azure Blob datasource. + azure_table = "azuretable" #: Indicates a Azure Table datasource. + my_sql = "mysql" #: Indicates a MySql datasource. class SentimentSkillLanguage(str, Enum): """The language codes supported for input text by SentimentSkill. @@ -387,6 +456,33 @@ class SentimentSkillLanguage(str, Enum): sv = "sv" #: Swedish. tr = "tr" #: Turkish. +class SnowballTokenFilterLanguage(str, Enum): + """The language to use for a Snowball token filter. + """ + + armenian = "armenian" #: Selects the Lucene Snowball stemming tokenizer for Armenian. + basque = "basque" #: Selects the Lucene Snowball stemming tokenizer for Basque. + catalan = "catalan" #: Selects the Lucene Snowball stemming tokenizer for Catalan. + danish = "danish" #: Selects the Lucene Snowball stemming tokenizer for Danish. + dutch = "dutch" #: Selects the Lucene Snowball stemming tokenizer for Dutch. + english = "english" #: Selects the Lucene Snowball stemming tokenizer for English. + finnish = "finnish" #: Selects the Lucene Snowball stemming tokenizer for Finnish. + french = "french" #: Selects the Lucene Snowball stemming tokenizer for French. + german = "german" #: Selects the Lucene Snowball stemming tokenizer for German. + german2 = "german2" #: Selects the Lucene Snowball stemming tokenizer that uses the German variant algorithm. + hungarian = "hungarian" #: Selects the Lucene Snowball stemming tokenizer for Hungarian. + italian = "italian" #: Selects the Lucene Snowball stemming tokenizer for Italian. + kp = "kp" #: Selects the Lucene Snowball stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. + lovins = "lovins" #: Selects the Lucene Snowball stemming tokenizer for English that uses the Lovins stemming algorithm. + norwegian = "norwegian" #: Selects the Lucene Snowball stemming tokenizer for Norwegian. + porter = "porter" #: Selects the Lucene Snowball stemming tokenizer for English that uses the Porter stemming algorithm. + portuguese = "portuguese" #: Selects the Lucene Snowball stemming tokenizer for Portuguese. + romanian = "romanian" #: Selects the Lucene Snowball stemming tokenizer for Romanian. + russian = "russian" #: Selects the Lucene Snowball stemming tokenizer for Russian. + spanish = "spanish" #: Selects the Lucene Snowball stemming tokenizer for Spanish. + swedish = "swedish" #: Selects the Lucene Snowball stemming tokenizer for Swedish. + turkish = "turkish" #: Selects the Lucene Snowball stemming tokenizer for Turkish. + class SplitSkillLanguage(str, Enum): """The language codes supported for input text by SplitSkill. """ @@ -401,6 +497,115 @@ class SplitSkillLanguage(str, Enum): ko = "ko" #: Korean. pt = "pt" #: Portuguese. +class StemmerTokenFilterLanguage(str, Enum): + """The language to use for a stemmer token filter. + """ + + arabic = "arabic" #: Selects the Lucene stemming tokenizer for Arabic. + armenian = "armenian" #: Selects the Lucene stemming tokenizer for Armenian. + basque = "basque" #: Selects the Lucene stemming tokenizer for Basque. + brazilian = "brazilian" #: Selects the Lucene stemming tokenizer for Portuguese (Brazil). + bulgarian = "bulgarian" #: Selects the Lucene stemming tokenizer for Bulgarian. + catalan = "catalan" #: Selects the Lucene stemming tokenizer for Catalan. + czech = "czech" #: Selects the Lucene stemming tokenizer for Czech. + danish = "danish" #: Selects the Lucene stemming tokenizer for Danish. + dutch = "dutch" #: Selects the Lucene stemming tokenizer for Dutch. + dutch_kp = "dutchKp" #: Selects the Lucene stemming tokenizer for Dutch that uses the Kraaij-Pohlmann stemming algorithm. + english = "english" #: Selects the Lucene stemming tokenizer for English. + light_english = "lightEnglish" #: Selects the Lucene stemming tokenizer for English that does light stemming. + minimal_english = "minimalEnglish" #: Selects the Lucene stemming tokenizer for English that does minimal stemming. + possessive_english = "possessiveEnglish" #: Selects the Lucene stemming tokenizer for English that removes trailing possessives from words. + porter2 = "porter2" #: Selects the Lucene stemming tokenizer for English that uses the Porter2 stemming algorithm. + lovins = "lovins" #: Selects the Lucene stemming tokenizer for English that uses the Lovins stemming algorithm. + finnish = "finnish" #: Selects the Lucene stemming tokenizer for Finnish. + light_finnish = "lightFinnish" #: Selects the Lucene stemming tokenizer for Finnish that does light stemming. + french = "french" #: Selects the Lucene stemming tokenizer for French. + light_french = "lightFrench" #: Selects the Lucene stemming tokenizer for French that does light stemming. + minimal_french = "minimalFrench" #: Selects the Lucene stemming tokenizer for French that does minimal stemming. + galician = "galician" #: Selects the Lucene stemming tokenizer for Galician. + minimal_galician = "minimalGalician" #: Selects the Lucene stemming tokenizer for Galician that does minimal stemming. + german = "german" #: Selects the Lucene stemming tokenizer for German. + german2 = "german2" #: Selects the Lucene stemming tokenizer that uses the German variant algorithm. + light_german = "lightGerman" #: Selects the Lucene stemming tokenizer for German that does light stemming. + minimal_german = "minimalGerman" #: Selects the Lucene stemming tokenizer for German that does minimal stemming. + greek = "greek" #: Selects the Lucene stemming tokenizer for Greek. + hindi = "hindi" #: Selects the Lucene stemming tokenizer for Hindi. + hungarian = "hungarian" #: Selects the Lucene stemming tokenizer for Hungarian. + light_hungarian = "lightHungarian" #: Selects the Lucene stemming tokenizer for Hungarian that does light stemming. + indonesian = "indonesian" #: Selects the Lucene stemming tokenizer for Indonesian. + irish = "irish" #: Selects the Lucene stemming tokenizer for Irish. + italian = "italian" #: Selects the Lucene stemming tokenizer for Italian. + light_italian = "lightItalian" #: Selects the Lucene stemming tokenizer for Italian that does light stemming. + sorani = "sorani" #: Selects the Lucene stemming tokenizer for Sorani. + latvian = "latvian" #: Selects the Lucene stemming tokenizer for Latvian. + norwegian = "norwegian" #: Selects the Lucene stemming tokenizer for Norwegian (Bokmål). + light_norwegian = "lightNorwegian" #: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does light stemming. + minimal_norwegian = "minimalNorwegian" #: Selects the Lucene stemming tokenizer for Norwegian (Bokmål) that does minimal stemming. + light_nynorsk = "lightNynorsk" #: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does light stemming. + minimal_nynorsk = "minimalNynorsk" #: Selects the Lucene stemming tokenizer for Norwegian (Nynorsk) that does minimal stemming. + portuguese = "portuguese" #: Selects the Lucene stemming tokenizer for Portuguese. + light_portuguese = "lightPortuguese" #: Selects the Lucene stemming tokenizer for Portuguese that does light stemming. + minimal_portuguese = "minimalPortuguese" #: Selects the Lucene stemming tokenizer for Portuguese that does minimal stemming. + portuguese_rslp = "portugueseRslp" #: Selects the Lucene stemming tokenizer for Portuguese that uses the RSLP stemming algorithm. + romanian = "romanian" #: Selects the Lucene stemming tokenizer for Romanian. + russian = "russian" #: Selects the Lucene stemming tokenizer for Russian. + light_russian = "lightRussian" #: Selects the Lucene stemming tokenizer for Russian that does light stemming. + spanish = "spanish" #: Selects the Lucene stemming tokenizer for Spanish. + light_spanish = "lightSpanish" #: Selects the Lucene stemming tokenizer for Spanish that does light stemming. + swedish = "swedish" #: Selects the Lucene stemming tokenizer for Swedish. + light_swedish = "lightSwedish" #: Selects the Lucene stemming tokenizer for Swedish that does light stemming. + turkish = "turkish" #: Selects the Lucene stemming tokenizer for Turkish. + +class StopwordsList(str, Enum): + """Identifies a predefined list of language-specific stopwords. + """ + + arabic = "arabic" #: Selects the stopword list for Arabic. + armenian = "armenian" #: Selects the stopword list for Armenian. + basque = "basque" #: Selects the stopword list for Basque. + brazilian = "brazilian" #: Selects the stopword list for Portuguese (Brazil). + bulgarian = "bulgarian" #: Selects the stopword list for Bulgarian. + catalan = "catalan" #: Selects the stopword list for Catalan. + czech = "czech" #: Selects the stopword list for Czech. + danish = "danish" #: Selects the stopword list for Danish. + dutch = "dutch" #: Selects the stopword list for Dutch. + english = "english" #: Selects the stopword list for English. + finnish = "finnish" #: Selects the stopword list for Finnish. + french = "french" #: Selects the stopword list for French. + galician = "galician" #: Selects the stopword list for Galician. + german = "german" #: Selects the stopword list for German. + greek = "greek" #: Selects the stopword list for Greek. + hindi = "hindi" #: Selects the stopword list for Hindi. + hungarian = "hungarian" #: Selects the stopword list for Hungarian. + indonesian = "indonesian" #: Selects the stopword list for Indonesian. + irish = "irish" #: Selects the stopword list for Irish. + italian = "italian" #: Selects the stopword list for Italian. + latvian = "latvian" #: Selects the stopword list for Latvian. + norwegian = "norwegian" #: Selects the stopword list for Norwegian. + persian = "persian" #: Selects the stopword list for Persian. + portuguese = "portuguese" #: Selects the stopword list for Portuguese. + romanian = "romanian" #: Selects the stopword list for Romanian. + russian = "russian" #: Selects the stopword list for Russian. + sorani = "sorani" #: Selects the stopword list for Sorani. + spanish = "spanish" #: Selects the stopword list for Spanish. + swedish = "swedish" #: Selects the stopword list for Swedish. + thai = "thai" #: Selects the stopword list for Thai. + turkish = "turkish" #: Selects the stopword list for Turkish. + +class TextExtractionAlgorithm(str, Enum): + """A value indicating which algorithm to use. Default is printed. + """ + + printed = "printed" #: An algorithm suitable for printed text. + handwritten = "handwritten" #: An algorithm suitable for handwritten text. + +class TextSplitMode(str, Enum): + """A value indicating which split mode to perform. + """ + + pages = "pages" #: Split the text into individual pages. + sentences = "sentences" #: Split the text into individual sentences. + class TextTranslationSkillLanguage(str, Enum): """The language codes supported for input text by TextTranslationSkill. """ @@ -469,266 +674,63 @@ class TextTranslationSkillLanguage(str, Enum): cy = "cy" #: Welsh. yua = "yua" #: Yucatec Maya. -class IndexerStatus(str, Enum): - """Represents the overall indexer status. - """ - - unknown = "unknown" - error = "error" - running = "running" - -class MicrosoftTokenizerLanguage(str, Enum): - """Lists the languages supported by the Microsoft language tokenizer. - """ - - bangla = "bangla" - bulgarian = "bulgarian" - catalan = "catalan" - chinese_simplified = "chineseSimplified" - chinese_traditional = "chineseTraditional" - croatian = "croatian" - czech = "czech" - danish = "danish" - dutch = "dutch" - english = "english" - french = "french" - german = "german" - greek = "greek" - gujarati = "gujarati" - hindi = "hindi" - icelandic = "icelandic" - indonesian = "indonesian" - italian = "italian" - japanese = "japanese" - kannada = "kannada" - korean = "korean" - malay = "malay" - malayalam = "malayalam" - marathi = "marathi" - norwegian_bokmaal = "norwegianBokmaal" - polish = "polish" - portuguese = "portuguese" - portuguese_brazilian = "portugueseBrazilian" - punjabi = "punjabi" - romanian = "romanian" - russian = "russian" - serbian_cyrillic = "serbianCyrillic" - serbian_latin = "serbianLatin" - slovenian = "slovenian" - spanish = "spanish" - swedish = "swedish" - tamil = "tamil" - telugu = "telugu" - thai = "thai" - ukrainian = "ukrainian" - urdu = "urdu" - vietnamese = "vietnamese" - -class MicrosoftStemmingTokenizerLanguage(str, Enum): - """Lists the languages supported by the Microsoft language stemming tokenizer. - """ - - arabic = "arabic" - bangla = "bangla" - bulgarian = "bulgarian" - catalan = "catalan" - croatian = "croatian" - czech = "czech" - danish = "danish" - dutch = "dutch" - english = "english" - estonian = "estonian" - finnish = "finnish" - french = "french" - german = "german" - greek = "greek" - gujarati = "gujarati" - hebrew = "hebrew" - hindi = "hindi" - hungarian = "hungarian" - icelandic = "icelandic" - indonesian = "indonesian" - italian = "italian" - kannada = "kannada" - latvian = "latvian" - lithuanian = "lithuanian" - malay = "malay" - malayalam = "malayalam" - marathi = "marathi" - norwegian_bokmaal = "norwegianBokmaal" - polish = "polish" - portuguese = "portuguese" - portuguese_brazilian = "portugueseBrazilian" - punjabi = "punjabi" - romanian = "romanian" - russian = "russian" - serbian_cyrillic = "serbianCyrillic" - serbian_latin = "serbianLatin" - slovak = "slovak" - slovenian = "slovenian" - spanish = "spanish" - swedish = "swedish" - tamil = "tamil" - telugu = "telugu" - turkish = "turkish" - ukrainian = "ukrainian" - urdu = "urdu" - -class EdgeNGramTokenFilterSide(str, Enum): - """Specifies which side of the input an n-gram should be generated from. - """ - - front = "front" - back = "back" - -class PhoneticEncoder(str, Enum): - """Identifies the type of phonetic encoder to use with a PhoneticTokenFilter. - """ - - metaphone = "metaphone" - double_metaphone = "doubleMetaphone" - soundex = "soundex" - refined_soundex = "refinedSoundex" - caverphone1 = "caverphone1" - caverphone2 = "caverphone2" - cologne = "cologne" - nysiis = "nysiis" - koelner_phonetik = "koelnerPhonetik" - haase_phonetik = "haasePhonetik" - beider_morse = "beiderMorse" - -class SnowballTokenFilterLanguage(str, Enum): - """The language to use for a Snowball token filter. - """ - - armenian = "armenian" - basque = "basque" - catalan = "catalan" - danish = "danish" - dutch = "dutch" - english = "english" - finnish = "finnish" - french = "french" - german = "german" - german2 = "german2" - hungarian = "hungarian" - italian = "italian" - kp = "kp" - lovins = "lovins" - norwegian = "norwegian" - porter = "porter" - portuguese = "portuguese" - romanian = "romanian" - russian = "russian" - spanish = "spanish" - swedish = "swedish" - turkish = "turkish" - -class StemmerTokenFilterLanguage(str, Enum): - """The language to use for a stemmer token filter. - """ - - arabic = "arabic" - armenian = "armenian" - basque = "basque" - brazilian = "brazilian" - bulgarian = "bulgarian" - catalan = "catalan" - czech = "czech" - danish = "danish" - dutch = "dutch" - dutch_kp = "dutchKp" - english = "english" - light_english = "lightEnglish" - minimal_english = "minimalEnglish" - possessive_english = "possessiveEnglish" - porter2 = "porter2" - lovins = "lovins" - finnish = "finnish" - light_finnish = "lightFinnish" - french = "french" - light_french = "lightFrench" - minimal_french = "minimalFrench" - galician = "galician" - minimal_galician = "minimalGalician" - german = "german" - german2 = "german2" - light_german = "lightGerman" - minimal_german = "minimalGerman" - greek = "greek" - hindi = "hindi" - hungarian = "hungarian" - light_hungarian = "lightHungarian" - indonesian = "indonesian" - irish = "irish" - italian = "italian" - light_italian = "lightItalian" - sorani = "sorani" - latvian = "latvian" - norwegian = "norwegian" - light_norwegian = "lightNorwegian" - minimal_norwegian = "minimalNorwegian" - light_nynorsk = "lightNynorsk" - minimal_nynorsk = "minimalNynorsk" - portuguese = "portuguese" - light_portuguese = "lightPortuguese" - minimal_portuguese = "minimalPortuguese" - portuguese_rslp = "portugueseRslp" - romanian = "romanian" - russian = "russian" - light_russian = "lightRussian" - spanish = "spanish" - light_spanish = "lightSpanish" - swedish = "swedish" - light_swedish = "lightSwedish" - turkish = "turkish" - -class StopwordsList(str, Enum): - """Identifies a predefined list of language-specific stopwords. +class TokenCharacterKind(str, Enum): + """Represents classes of characters on which a token filter can operate. """ - arabic = "arabic" - armenian = "armenian" - basque = "basque" - brazilian = "brazilian" - bulgarian = "bulgarian" - catalan = "catalan" - czech = "czech" - danish = "danish" - dutch = "dutch" - english = "english" - finnish = "finnish" - french = "french" - galician = "galician" - german = "german" - greek = "greek" - hindi = "hindi" - hungarian = "hungarian" - indonesian = "indonesian" - irish = "irish" - italian = "italian" - latvian = "latvian" - norwegian = "norwegian" - persian = "persian" - portuguese = "portuguese" - romanian = "romanian" - russian = "russian" - sorani = "sorani" - spanish = "spanish" - swedish = "swedish" - thai = "thai" - turkish = "turkish" + letter = "letter" #: Keeps letters in tokens. + digit = "digit" #: Keeps digits in tokens. + whitespace = "whitespace" #: Keeps whitespace in tokens. + punctuation = "punctuation" #: Keeps punctuation in tokens. + symbol = "symbol" #: Keeps symbols in tokens. -class TextExtractionAlgorithm(str, Enum): - """A value indicating which algorithm to use. Default is printed. +class TokenFilterName(str, Enum): + """Defines the names of all token filters supported by Azure Cognitive Search. """ - printed = "printed" - handwritten = "handwritten" + arabic_normalization = "arabic_normalization" #: A token filter that applies the Arabic normalizer to normalize the orthography. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ar/ArabicNormalizationFilter.html. + apostrophe = "apostrophe" #: Strips all characters after an apostrophe (including the apostrophe itself). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/tr/ApostropheFilter.html. + ascii_folding = "asciifolding" #: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ASCIIFoldingFilter.html. + cjk_bigram = "cjk_bigram" #: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKBigramFilter.html. + cjk_width = "cjk_width" #: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/cjk/CJKWidthFilter.html. + classic = "classic" #: Removes English possessives, and dots from acronyms. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/standard/ClassicFilter.html. + common_gram = "common_grams" #: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/commongrams/CommonGramsFilter.html. + edge_n_gram = "edgeNGram_v2" #: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/EdgeNGramTokenFilter.html. + elision = "elision" #: Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/util/ElisionFilter.html. + german_normalization = "german_normalization" #: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/de/GermanNormalizationFilter.html. + hindi_normalization = "hindi_normalization" #: Normalizes text in Hindi to remove some differences in spelling variations. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/hi/HindiNormalizationFilter.html. + indic_normalization = "indic_normalization" #: Normalizes the Unicode representation of text in Indian languages. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/in/IndicNormalizationFilter.html. + keyword_repeat = "keyword_repeat" #: Emits each incoming token twice, once as keyword and once as non-keyword. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/KeywordRepeatFilter.html. + k_stem = "kstem" #: A high-performance kstem filter for English. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/en/KStemFilter.html. + length = "length" #: Removes words that are too long or too short. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LengthFilter.html. + limit = "limit" #: Limits the number of tokens while indexing. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/LimitTokenCountFilter.html. + lowercase = "lowercase" #: Normalizes token text to lower case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/LowerCaseFilter.htm. + n_gram = "nGram_v2" #: Generates n-grams of the given size(s). See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ngram/NGramTokenFilter.html. + persian_normalization = "persian_normalization" #: Applies normalization for Persian. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/fa/PersianNormalizationFilter.html. + phonetic = "phonetic" #: Create tokens for phonetic matches. See https://lucene.apache.org/core/4_10_3/analyzers-phonetic/org/apache/lucene/analysis/phonetic/package-tree.html. + porter_stem = "porter_stem" #: Uses the Porter stemming algorithm to transform the token stream. See http://tartarus.org/~martin/PorterStemmer. + reverse = "reverse" #: Reverses the token string. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/reverse/ReverseStringFilter.html. + scandinavian_normalization = "scandinavian_normalization" #: Normalizes use of the interchangeable Scandinavian characters. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianNormalizationFilter.html. + scandinavian_folding_normalization = "scandinavian_folding" #: Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/ScandinavianFoldingFilter.html. + shingle = "shingle" #: Creates combinations of tokens as a single token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/shingle/ShingleFilter.html. + snowball = "snowball" #: A filter that stems words using a Snowball-generated stemmer. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/snowball/SnowballFilter.html. + sorani_normalization = "sorani_normalization" #: Normalizes the Unicode representation of Sorani text. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/ckb/SoraniNormalizationFilter.html. + stemmer = "stemmer" #: Language specific stemming filter. See https://docs.microsoft.com/rest/api/searchservice/Custom-analyzers-in-Azure-Search#TokenFilters. + stopwords = "stopwords" #: Removes stop words from a token stream. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/StopFilter.html. + trim = "trim" #: Trims leading and trailing whitespace from tokens. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html. + truncate = "truncate" #: Truncates the terms to a specific length. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/TruncateTokenFilter.html. + unique = "unique" #: Filters out tokens with same text as the previous token. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/miscellaneous/RemoveDuplicatesTokenFilter.html. + uppercase = "uppercase" #: Normalizes token text to upper case. See http://lucene.apache.org/core/4_10_3/analyzers-common/org/apache/lucene/analysis/core/UpperCaseFilter.html. + word_delimiter = "word_delimiter" #: Splits words into subwords and performs optional transformations on subword groups. -class TextSplitMode(str, Enum): - """A value indicating which split mode to perform. +class VisualFeature(str, Enum): + """The strings indicating what visual feature types to return. """ - pages = "pages" - sentences = "sentences" + adult = "adult" #: Visual features recognized as adult persons. + brands = "brands" #: Visual features recognized as commercial brands. + categories = "categories" #: Categories. + description = "description" #: Description. + faces = "faces" #: Visual features recognized as people faces. + objects = "objects" #: Visual features recognized as objects. + tags = "tags" #: Tags. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py index d87e3cc4debb..83a82e8a47f0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/__init__.py @@ -1,6 +1,8 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py index 7ba6982701cc..f4c91d48c7af 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_data_sources_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class DataSourcesOperations(object): """DataSourcesOperations operations. @@ -22,7 +28,7 @@ class DataSourcesOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -40,43 +46,45 @@ def __init__(self, client, config, serializer, deserializer): def create_or_update( self, data_source_name, # type: str - data_source, # type: "models.DataSource" + data_source, # type: "models.SearchIndexerDataSource" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.DataSource" + # type: (...) -> "models.SearchIndexerDataSource" """Creates a new datasource or updates a datasource if it already exists. :param data_source_name: The name of the datasource to create or update. :type data_source_name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource or ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -91,17 +99,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -115,22 +123,23 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + create_or_update.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore def delete( self, data_source_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -138,30 +147,30 @@ def delete( :param data_source_name: The name of the datasource to delete. :type data_source_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -176,10 +185,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -194,7 +203,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + delete.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore def get( self, @@ -202,20 +211,21 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.DataSource" + # type: (...) -> "models.SearchIndexerDataSource" """Retrieves a datasource definition. :param data_source_name: The name of the datasource to retrieve. :type data_source_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -223,7 +233,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'dataSourceName': self._serialize.url("data_source_name", data_source_name, 'str'), @@ -250,13 +260,13 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} + get.metadata = {'url': '/datasources(\'{dataSourceName}\')'} # type: ignore def list( self, @@ -272,14 +282,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListDataSourcesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListDataSourcesResult + :rtype: ~azure.search.documents.models.ListDataSourcesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListDataSourcesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -287,7 +298,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -321,36 +332,38 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/datasources'} + list.metadata = {'url': '/datasources'} # type: ignore def create( self, - data_source, # type: "models.DataSource" + data_source, # type: "models.SearchIndexerDataSource" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.DataSource" + # type: (...) -> "models.SearchIndexerDataSource" """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search_service_client.models.DataSource + :type data_source: ~azure.search.documents.models.SearchIndexerDataSource :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: DataSource or the result of cls(response) - :rtype: ~search_service_client.models.DataSource + :return: SearchIndexerDataSource or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerDataSource :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.DataSource"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerDataSource"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -364,12 +377,12 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(data_source, 'DataSource') + body_content = self._serialize.body(data_source, 'SearchIndexerDataSource') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -381,10 +394,10 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('DataSource', pipeline_response) + deserialized = self._deserialize('SearchIndexerDataSource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/datasources'} + create.metadata = {'url': '/datasources'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py index 38ed69e0fb8d..5e2392a2806f 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexers_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class IndexersOperations(object): """IndexersOperations operations. @@ -22,7 +28,7 @@ class IndexersOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -49,14 +55,15 @@ def reset( :param indexer_name: The name of the indexer to reset. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -64,7 +71,7 @@ def reset( api_version = "2019-05-06-Preview" # Construct URL - url = self.reset.metadata['url'] + url = self.reset.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -93,7 +100,7 @@ def reset( if cls: return cls(pipeline_response, None, {}) - reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} + reset.metadata = {'url': '/indexers(\'{indexerName}\')/search.reset'} # type: ignore def run( self, @@ -107,14 +114,15 @@ def run( :param indexer_name: The name of the indexer to run. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -122,7 +130,7 @@ def run( api_version = "2019-05-06-Preview" # Construct URL - url = self.run.metadata['url'] + url = self.run.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -151,48 +159,50 @@ def run( if cls: return cls(pipeline_response, None, {}) - run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} + run.metadata = {'url': '/indexers(\'{indexerName}\')/search.run'} # type: ignore def create_or_update( self, indexer_name, # type: str - indexer, # type: "models.Indexer" + indexer, # type: "models.SearchIndexer" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.Indexer" + # type: (...) -> "models.SearchIndexer" """Creates a new indexer or updates an indexer if it already exists. :param indexer_name: The name of the indexer to create or update. :type indexer_name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~azure.search.documents.models.SearchIndexer + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer or ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -207,17 +217,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -231,22 +241,23 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} + create_or_update.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore def delete( self, indexer_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -254,30 +265,30 @@ def delete( :param indexer_name: The name of the indexer to delete. :type indexer_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -292,10 +303,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -310,7 +321,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexers(\'{indexerName}\')'} + delete.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore def get( self, @@ -318,20 +329,21 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Indexer" + # type: (...) -> "models.SearchIndexer" """Retrieves an indexer definition. :param indexer_name: The name of the indexer to retrieve. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -339,7 +351,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -366,13 +378,13 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexers(\'{indexerName}\')'} + get.metadata = {'url': '/indexers(\'{indexerName}\')'} # type: ignore def list( self, @@ -388,14 +400,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexersResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexersResult + :rtype: ~azure.search.documents.models.ListIndexersResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexersResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -403,7 +416,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -437,36 +450,38 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/indexers'} + list.metadata = {'url': '/indexers'} # type: ignore def create( self, - indexer, # type: "models.Indexer" + indexer, # type: "models.SearchIndexer" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Indexer" + # type: (...) -> "models.SearchIndexer" """Creates a new indexer. :param indexer: The definition of the indexer to create. - :type indexer: ~search_service_client.models.Indexer + :type indexer: ~azure.search.documents.models.SearchIndexer :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Indexer or the result of cls(response) - :rtype: ~search_service_client.models.Indexer + :return: SearchIndexer or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexer :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Indexer"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexer"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -480,12 +495,12 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(indexer, 'Indexer') + body_content = self._serialize.body(indexer, 'SearchIndexer') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -497,13 +512,13 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Indexer', pipeline_response) + deserialized = self._deserialize('SearchIndexer', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexers'} + create.metadata = {'url': '/indexers'} # type: ignore def get_status( self, @@ -511,20 +526,21 @@ def get_status( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.IndexerExecutionInfo" + # type: (...) -> "models.SearchIndexerStatus" """Returns the current status and execution history of an indexer. :param indexer_name: The name of the indexer for which to retrieve status. :type indexer_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: IndexerExecutionInfo or the result of cls(response) - :rtype: ~search_service_client.models.IndexerExecutionInfo + :return: SearchIndexerStatus or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerStatus :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.IndexerExecutionInfo"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerStatus"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -532,7 +548,7 @@ def get_status( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_status.metadata['url'] + url = self.get_status.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexerName': self._serialize.url("indexer_name", indexer_name, 'str'), @@ -559,10 +575,10 @@ def get_status( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('IndexerExecutionInfo', pipeline_response) + deserialized = self._deserialize('SearchIndexerStatus', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} + get_status.metadata = {'url': '/indexers(\'{indexerName}\')/search.status'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py index 8fc85d56b781..159a57239391 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_indexes_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class IndexesOperations(object): """IndexesOperations operations. @@ -22,7 +28,7 @@ class IndexesOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -39,32 +45,34 @@ def __init__(self, client, config, serializer, deserializer): def create( self, - index, # type: "models.Index" + index, # type: "models.SearchIndex" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Index" + # type: (...) -> "models.SearchIndex" """Creates a new search index. :param index: The definition of the index to create. - :type index: ~search_service_client.models.Index + :type index: ~azure.search.documents.models.SearchIndex :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -78,12 +86,12 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -95,13 +103,13 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/indexes'} + create.metadata = {'url': '/indexes'} # type: ignore def list( self, @@ -117,14 +125,15 @@ def list( default is all properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListIndexesResult or the result of cls(response) - :rtype: ~search_service_client.models.ListIndexesResult + :rtype: ~azure.search.documents.models.ListIndexesResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListIndexesResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -132,7 +141,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -166,55 +175,57 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/indexes'} + list.metadata = {'url': '/indexes'} # type: ignore def create_or_update( self, index_name, # type: str - index, # type: "models.Index" + index, # type: "models.SearchIndex" allow_index_downtime=None, # type: Optional[bool] + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.Index" + # type: (...) -> "models.SearchIndex" """Creates a new search index or updates an index if it already exists. :param index_name: The definition of the index to create or update. :type index_name: str :param index: The definition of the index to create or update. - :type index: ~search_service_client.models.Index + :type index: ~azure.search.documents.models.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. :type allow_index_downtime: bool + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index or ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -231,17 +242,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(index, 'Index') + body_content = self._serialize.body(index, 'SearchIndex') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -255,53 +266,54 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} + create_or_update.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore def delete( self, index_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None - """Deletes a search index and all the documents it contains. + """Deletes a search index and all the documents it contains. This operation is permanent, with no recovery option. Make sure you have a master copy of your index definition, data ingestion code, and a backup of the primary data source in case you need to re-build the index. :param index_name: The name of the index to delete. :type index_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -316,10 +328,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -334,7 +346,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/indexes(\'{indexName}\')'} + delete.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore def get( self, @@ -342,20 +354,21 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Index" + # type: (...) -> "models.SearchIndex" """Retrieves an index definition. :param index_name: The name of the index to retrieve. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Index or the result of cls(response) - :rtype: ~search_service_client.models.Index + :return: SearchIndex or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Index"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndex"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -363,7 +376,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -390,13 +403,13 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Index', pipeline_response) + deserialized = self._deserialize('SearchIndex', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/indexes(\'{indexName}\')'} + get.metadata = {'url': '/indexes(\'{indexName}\')'} # type: ignore def get_statistics( self, @@ -410,14 +423,15 @@ def get_statistics( :param index_name: The name of the index for which to retrieve statistics. :type index_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: GetIndexStatisticsResult or the result of cls(response) - :rtype: ~search_service_client.models.GetIndexStatisticsResult + :rtype: ~azure.search.documents.models.GetIndexStatisticsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.GetIndexStatisticsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -425,7 +439,7 @@ def get_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_statistics.metadata['url'] + url = self.get_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -458,7 +472,7 @@ def get_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} + get_statistics.metadata = {'url': '/indexes(\'{indexName}\')/search.stats'} # type: ignore def analyze( self, @@ -473,24 +487,26 @@ def analyze( :param index_name: The name of the index for which to test an analyzer. :type index_name: str :param request: The text and analyzer or analysis components to test. - :type request: ~search_service_client.models.AnalyzeRequest + :type request: ~azure.search.documents.models.AnalyzeRequest :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: AnalyzeResult or the result of cls(response) - :rtype: ~search_service_client.models.AnalyzeResult + :rtype: ~azure.search.documents.models.AnalyzeResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.analyze.metadata['url'] + url = self.analyze.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'indexName': self._serialize.url("index_name", index_name, 'str'), @@ -505,8 +521,8 @@ def analyze( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -528,4 +544,4 @@ def analyze( return cls(pipeline_response, deserialized, {}) return deserialized - analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} + analyze.metadata = {'url': '/indexes(\'{indexName}\')/search.analyze'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py index ce9beea5f278..f9dd44bd7b50 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_search_service_client_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class SearchServiceClientOperationsMixin(object): @@ -26,14 +32,15 @@ def get_service_statistics( """Gets service level statistics for a search service. :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ServiceStatistics or the result of cls(response) - :rtype: ~search_service_client.models.ServiceStatistics + :rtype: ~azure.search.documents.models.ServiceStatistics :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceStatistics"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -41,7 +48,7 @@ def get_service_statistics( api_version = "2019-05-06-Preview" # Construct URL - url = self.get_service_statistics.metadata['url'] + url = self.get_service_statistics.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -73,4 +80,4 @@ def get_service_statistics( return cls(pipeline_response, deserialized, {}) return deserialized - get_service_statistics.metadata = {'url': '/servicestats'} + get_service_statistics.metadata = {'url': '/servicestats'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py index 37ab90d2bb9d..73c19892c504 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_skillsets_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class SkillsetsOperations(object): """SkillsetsOperations operations. @@ -22,7 +28,7 @@ class SkillsetsOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -40,44 +46,46 @@ def __init__(self, client, config, serializer, deserializer): def create_or_update( self, skillset_name, # type: str - skillset, # type: "models.Skillset" + skillset, # type: "models.SearchIndexerSkillset" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): - # type: (...) -> "models.Skillset" + # type: (...) -> "models.SearchIndexerSkillset" """Creates a new skillset in a search service or updates the skillset if it already exists. :param skillset_name: The name of the skillset to create or update. :type skillset_name: str :param skillset: The skillset containing one or more skills to create or update in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset or ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -92,17 +100,17 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) @@ -116,22 +124,23 @@ def create_or_update( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if response.status_code == 201: - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + create_or_update.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore def delete( self, skillset_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -139,30 +148,30 @@ def delete( :param skillset_name: The name of the skillset to delete. :type skillset_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -177,10 +186,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -195,7 +204,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + delete.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore def get( self, @@ -203,20 +212,21 @@ def get( request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Skillset" + # type: (...) -> "models.SearchIndexerSkillset" """Retrieves a skillset in a search service. :param skillset_name: The name of the skillset to retrieve. :type skillset_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -224,7 +234,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'skillsetName': self._serialize.url("skillset_name", skillset_name, 'str'), @@ -251,13 +261,13 @@ def get( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} + get.metadata = {'url': '/skillsets(\'{skillsetName}\')'} # type: ignore def list( self, @@ -273,14 +283,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSkillsetsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSkillsetsResult + :rtype: ~azure.search.documents.models.ListSkillsetsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSkillsetsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -288,7 +299,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -322,36 +333,38 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/skillsets'} + list.metadata = {'url': '/skillsets'} # type: ignore def create( self, - skillset, # type: "models.Skillset" + skillset, # type: "models.SearchIndexerSkillset" request_options=None, # type: Optional["models.RequestOptions"] **kwargs # type: Any ): - # type: (...) -> "models.Skillset" + # type: (...) -> "models.SearchIndexerSkillset" """Creates a new skillset in a search service. :param skillset: The skillset containing one or more skills to create in a search service. - :type skillset: ~search_service_client.models.Skillset + :type skillset: ~azure.search.documents.models.SearchIndexerSkillset :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response - :return: Skillset or the result of cls(response) - :rtype: ~search_service_client.models.Skillset + :return: SearchIndexerSkillset or the result of cls(response) + :rtype: ~azure.search.documents.models.SearchIndexerSkillset :raises: ~azure.core.exceptions.HttpResponseError """ - cls = kwargs.pop('cls', None) # type: ClsType["models.Skillset"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + cls = kwargs.pop('cls', None) # type: ClsType["models.SearchIndexerSkillset"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -365,12 +378,12 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] - body_content = self._serialize.body(skillset, 'Skillset') + body_content = self._serialize.body(skillset, 'SearchIndexerSkillset') body_content_kwargs['content'] = body_content request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) @@ -382,10 +395,10 @@ def create( error = self._deserialize(models.SearchError, response) raise HttpResponseError(response=response, model=error) - deserialized = self._deserialize('Skillset', pipeline_response) + deserialized = self._deserialize('SearchIndexerSkillset', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/skillsets'} + create.metadata = {'url': '/skillsets'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py index ff4e69ec5420..c8e7b99b4e88 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_generated/operations/_synonym_maps_operations.py @@ -1,9 +1,11 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6257, generator: {generator}) +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union +from typing import TYPE_CHECKING import warnings from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error @@ -12,8 +14,12 @@ from .. import models -T = TypeVar('T') -ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] +if TYPE_CHECKING: + # pylint: disable=unused-import,ungrouped-imports + from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union + + T = TypeVar('T') + ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class SynonymMapsOperations(object): """SynonymMapsOperations operations. @@ -22,7 +28,7 @@ class SynonymMapsOperations(object): instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. - :type models: ~search_service_client.models + :type models: ~azure.search.documents.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. @@ -41,8 +47,9 @@ def create_or_update( self, synonym_map_name, # type: str synonym_map, # type: "models.SynonymMap" + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> "models.SynonymMap" @@ -51,32 +58,33 @@ def create_or_update( :param synonym_map_name: The name of the synonym map to create or update. :type synonym_map_name: str :param synonym_map: The definition of the synonym map to create or update. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap or ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id prefer = "return=representation" api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create_or_update.metadata['url'] + url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -91,13 +99,13 @@ def create_or_update( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') header_parameters['Prefer'] = self._serialize.header("prefer", prefer, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -124,13 +132,14 @@ def create_or_update( return cls(pipeline_response, deserialized, {}) return deserialized - create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + create_or_update.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore def delete( self, synonym_map_name, # type: str + if_match=None, # type: Optional[str] + if_none_match=None, # type: Optional[str] request_options=None, # type: Optional["models.RequestOptions"] - access_condition=None, # type: Optional["models.AccessCondition"] **kwargs # type: Any ): # type: (...) -> None @@ -138,30 +147,30 @@ def delete( :param synonym_map_name: The name of the synonym map to delete. :type synonym_map_name: str + :param if_match: Defines the If-Match condition. The operation will be performed only if the + ETag on the server matches this value. + :type if_match: str + :param if_none_match: Defines the If-None-Match condition. The operation will be performed only + if the ETag on the server does not match this value. + :type if_none_match: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions - :param access_condition: Parameter group. - :type access_condition: ~search_service_client.models.AccessCondition + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None - _if_match = None - _if_none_match = None - if access_condition is not None: - _if_match = access_condition.if_match - _if_none_match = access_condition.if_none_match if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" # Construct URL - url = self.delete.metadata['url'] + url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -176,10 +185,10 @@ def delete( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') - if _if_match is not None: - header_parameters['If-Match'] = self._serialize.header("if_match", _if_match, 'str') - if _if_none_match is not None: - header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) @@ -194,7 +203,7 @@ def delete( if cls: return cls(pipeline_response, None, {}) - delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + delete.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore def get( self, @@ -208,14 +217,15 @@ def get( :param synonym_map_name: The name of the synonym map to retrieve. :type synonym_map_name: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -223,7 +233,7 @@ def get( api_version = "2019-05-06-Preview" # Construct URL - url = self.get.metadata['url'] + url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), 'synonymMapName': self._serialize.url("synonym_map_name", synonym_map_name, 'str'), @@ -256,7 +266,7 @@ def get( return cls(pipeline_response, deserialized, {}) return deserialized - get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} + get.metadata = {'url': '/synonymmaps(\'{synonymMapName}\')'} # type: ignore def list( self, @@ -272,14 +282,15 @@ def list( properties. :type select: str :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: ListSynonymMapsResult or the result of cls(response) - :rtype: ~search_service_client.models.ListSynonymMapsResult + :rtype: ~azure.search.documents.models.ListSynonymMapsResult :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.ListSynonymMapsResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: @@ -287,7 +298,7 @@ def list( api_version = "2019-05-06-Preview" # Construct URL - url = self.list.metadata['url'] + url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -321,7 +332,7 @@ def list( return cls(pipeline_response, deserialized, {}) return deserialized - list.metadata = {'url': '/synonymmaps'} + list.metadata = {'url': '/synonymmaps'} # type: ignore def create( self, @@ -333,24 +344,26 @@ def create( """Creates a new synonym map. :param synonym_map: The definition of the synonym map to create. - :type synonym_map: ~search_service_client.models.SynonymMap + :type synonym_map: ~azure.search.documents.models.SynonymMap :param request_options: Parameter group. - :type request_options: ~search_service_client.models.RequestOptions + :type request_options: ~azure.search.documents.models.RequestOptions :keyword callable cls: A custom type or function that will be passed the direct response :return: SynonymMap or the result of cls(response) - :rtype: ~search_service_client.models.SynonymMap + :rtype: ~azure.search.documents.models.SynonymMap :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.SynonymMap"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) _x_ms_client_request_id = None if request_options is not None: _x_ms_client_request_id = request_options.x_ms_client_request_id api_version = "2019-05-06-Preview" + content_type = kwargs.pop("content_type", "application/json") # Construct URL - url = self.create.metadata['url'] + url = self.create.metadata['url'] # type: ignore path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } @@ -364,8 +377,8 @@ def create( header_parameters = {} # type: Dict[str, Any] if _x_ms_client_request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("x_ms_client_request_id", _x_ms_client_request_id, 'str') + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = 'application/json' - header_parameters['Content-Type'] = kwargs.pop('content_type', 'application/json') # Construct and send request body_content_kwargs = {} # type: Dict[str, Any] @@ -387,4 +400,4 @@ def create( return cls(pipeline_response, deserialized, {}) return deserialized - create.metadata = {'url': '/synonymmaps'} + create.metadata = {'url': '/synonymmaps'} # type: ignore diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py index a76d1c1976cb..e8601794c933 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_index.py @@ -6,7 +6,7 @@ from typing import TYPE_CHECKING from .edm import Collection, ComplexType -from ._generated.models import Field +from ._generated.models import SearchField if TYPE_CHECKING: from typing import Any, Dict, List @@ -15,7 +15,7 @@ def SimpleField(**kw): - # type: (**Any) -> Dict[str, Any] + # type: (**Any) -> SearchField """Configure a simple field for an Azure Search Index :param name: Required. The name of the field, which must be unique within the fields collection @@ -67,11 +67,11 @@ def SimpleField(**kw): result["facetable"] = kw.get("facetable", False) result["sortable"] = kw.get("sortable", False) result["retrievable"] = not kw.get("hidden", False) - return Field(**result) + return SearchField(**result) def SearchableField(**kw): - # type: (**Any) -> Dict[str, Any] + # type: (**Any) -> SearchField """Configure a searchable text field for an Azure Search Index :param name: Required. The name of the field, which must be unique within the fields collection @@ -204,11 +204,11 @@ def SearchableField(**kw): result["index_analyzer"] = kw["index_analyzer"] if "synonym_maps" in kw: result["synonym_maps"] = kw["synonym_maps"] - return Field(**result) + return SearchField(**result) def ComplexField(**kw): - # type: (**Any) -> Dict[str, Any] + # type: (**Any) -> SearchField """Configure a Complex or Complex collection field for an Azure Search Index @@ -225,4 +225,4 @@ def ComplexField(**kw): typ = Collection(ComplexType) if kw.get("collection", False) else ComplexType result = {"name": kw.get("name"), "type": typ} # type: Dict[str, Any] result["fields"] = kw.get("fields") - return Field(**result) + return SearchField(**result) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py index 0d6a6259f4fd..38d1d81ff7bb 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexers_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Indexer, IndexerExecutionInfo + from ._generated.models import SearchIndexer, SearchIndexerStatus from typing import Any, Dict, Optional, Sequence from azure.core.credentials import AzureKeyCredential @@ -57,13 +57,13 @@ def close(self): @distributed_trace def create_indexer(self, indexer, **kwargs): - # type: (Indexer, **Any) -> Indexer - """Creates a new Indexers. + # type: (SearchIndexer, **Any) -> SearchIndexer + """Creates a new SearchIndexer. :param indexer: The definition of the indexer to create. - :type indexer: ~~azure.search.documents.Indexer - :return: The created Indexer - :rtype: ~azure.search.documents.Indexer + :type indexer: ~~azure.search.documents.SearchIndexer + :return: The created SearchIndexer + :rtype: ~azure.search.documents.SearchIndexer .. admonition:: Example: @@ -72,7 +72,7 @@ def create_indexer(self, indexer, **kwargs): :end-before: [END create_indexer] :language: python :dedent: 4 - :caption: Create an Indexer + :caption: Create a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexers.create(indexer, **kwargs) @@ -80,42 +80,37 @@ def create_indexer(self, indexer, **kwargs): @distributed_trace def create_or_update_indexer(self, indexer, name=None, **kwargs): - # type: (Indexer, Optional[str], **Any) -> Indexer + # type: (SearchIndexer, Optional[str], **Any) -> SearchIndexer """Creates a new indexer or updates a indexer if it already exists. :param name: The name of the indexer to create or update. :type name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~azure.search.documents.Indexer - :return: The created Indexer - :rtype: ~azure.search.documents.Indexer + :type indexer: ~azure.search.documents.SearchIndexer + :return: The created IndexSearchIndexerer + :rtype: ~azure.search.documents.SearchIndexer """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - + kwargs.update(access_condition) if not name: name = indexer.name result = self._client.indexers.create_or_update( - indexer_name=name, - indexer=indexer, - access_condition=access_condition, - error_map=error_map, - **kwargs + indexer_name=name, indexer=indexer, error_map=error_map, **kwargs ) return result @distributed_trace def get_indexer(self, name, **kwargs): - # type: (str, **Any) -> Indexer + # type: (str, **Any) -> SearchIndexer """Retrieves a indexer definition. :param name: The name of the indexer to retrieve. :type name: str - :return: The Indexer that is fetched. - :rtype: ~azure.search.documents.Indexer + :return: The SearchIndexer that is fetched. + :rtype: ~azure.search.documents.SearchIndexer .. admonition:: Example: @@ -124,7 +119,7 @@ def get_indexer(self, name, **kwargs): :end-before: [END get_indexer] :language: python :dedent: 4 - :caption: Retrieve an Indexer + :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexers.get(name, **kwargs) @@ -132,10 +127,10 @@ def get_indexer(self, name, **kwargs): @distributed_trace def get_indexers(self, **kwargs): - # type: (**Any) -> Sequence[Indexer] + # type: (**Any) -> Sequence[SearchIndexer] """Lists all indexers available for a search service. - :return: List of all the Indexers. + :return: List of all the SearchIndexers. :rtype: `list[dict]` .. admonition:: Example: @@ -145,7 +140,7 @@ def get_indexers(self, **kwargs): :end-before: [END list_indexer] :language: python :dedent: 4 - :caption: List all the Indexers + :caption: List all the SearchIndexers """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = self._client.indexers.list(**kwargs) @@ -153,13 +148,13 @@ def get_indexers(self, **kwargs): @distributed_trace def delete_indexer(self, indexer, **kwargs): - # type: (Union[str, Indexer], **Any) -> None - """Deletes an indexer. To use access conditions, the Indexer model + # type: (Union[str, SearchIndexer], **Any) -> None + """Deletes an indexer. To use access conditions, the SearchIndexer model must be provided instead of the name. It is enough to provide the name of the indexer to delete unconditionally. :param indexer: The indexer to delete. - :type indexer: str or ~azure.search.documents.Indexer + :type indexer: str or ~azure.search.documents.SearchIndexer :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions @@ -173,18 +168,18 @@ def delete_indexer(self, indexer, **kwargs): :end-before: [END delete_indexer] :language: python :dedent: 4 - :caption: Delete an Indexer + :caption: Delete a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = indexer.name except AttributeError: name = indexer - self._client.indexers.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + self._client.indexers.delete(name, error_map=error_map, **kwargs) @distributed_trace def run_indexer(self, name, **kwargs): @@ -204,7 +199,7 @@ def run_indexer(self, name, **kwargs): :end-before: [END run_indexer] :language: python :dedent: 4 - :caption: Run an Indexer + :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) self._client.indexers.run(name, **kwargs) @@ -227,21 +222,21 @@ def reset_indexer(self, name, **kwargs): :end-before: [END reset_indexer] :language: python :dedent: 4 - :caption: Reset an Indexer's change tracking state + :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) self._client.indexers.reset(name, **kwargs) @distributed_trace def get_indexer_status(self, name, **kwargs): - # type: (str, **Any) -> IndexerExecutionInfo + # type: (str, **Any) -> SearchIndexerStatus """Get the status of the indexer. :param name: The name of the indexer to fetch the status. :type name: str - :return: IndexerExecutionInfo - :rtype: IndexerExecutionInfo + :return: SearchIndexerStatus + :rtype: SearchIndexerStatus .. admonition:: Example: @@ -250,7 +245,7 @@ def get_indexer_status(self, name, **kwargs): :end-before: [END get_indexer_status] :language: python :dedent: 4 - :caption: Get an Indexer's status + :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return self._client.indexers.get_status(name, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py index 355cffc065d9..bd65a30a3b87 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_indexes_client.py @@ -13,14 +13,14 @@ from ._utils import ( delistize_flags_for_index, listize_flags_for_index, - get_access_conditions + get_access_conditions, ) from .._headers_mixin import HeadersMixin from .._version import SDK_MONIKER if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import AnalyzeRequest, AnalyzeResult, Index + from ._generated.models import AnalyzeRequest, AnalyzeResult, SearchIndex from typing import Any, Dict, List, Union from azure.core.credentials import AzureKeyCredential @@ -62,11 +62,11 @@ def close(self): @distributed_trace def list_indexes(self, **kwargs): - # type: (**Any) -> ItemPaged[Index] + # type: (**Any) -> ItemPaged[SearchIndex] """List the indexes in an Azure Search service. :return: List of indexes - :rtype: list[~azure.search.documents.Index] + :rtype: list[~azure.search.documents.SearchIndex] :raises: ~azure.core.exceptions.HttpResponseError """ @@ -82,13 +82,13 @@ def extract_data(response): @distributed_trace def get_index(self, index_name, **kwargs): - # type: (str, **Any) -> Index + # type: (str, **Any) -> SearchIndex """ :param index_name: The name of the index to retrieve. :type index_name: str - :return: Index object - :rtype: ~azure.search.documents.Index + :return: SearchIndex object + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -123,12 +123,12 @@ def get_index_statistics(self, index_name, **kwargs): @distributed_trace def delete_index(self, index, **kwargs): - # type: (Union[str, Index], **Any) -> None + # type: (Union[str, SearchIndex], **Any) -> None """Deletes a search index and all the documents it contains. The model must be provided instead of the name to use the access conditions. :param index: The index to retrieve. - :type index: str or ~search.models.Index + :type index: str or ~search.models.SearchIndex :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :raises: ~azure.core.exceptions.HttpResponseError @@ -144,29 +144,26 @@ def delete_index(self, index, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: index_name = index.name except AttributeError: index_name = index self._client.indexes.delete( - index_name=index_name, - access_condition=access_condition, - error_map=error_map, - **kwargs + index_name=index_name, error_map=error_map, **kwargs ) @distributed_trace def create_index(self, index, **kwargs): - # type: (Index, **Any) -> Index + # type: (SearchIndex, **Any) -> SearchIndex """Creates a new search index. :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :return: The index created - :rtype: ~azure.search.documents.Index + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -185,19 +182,15 @@ def create_index(self, index, **kwargs): @distributed_trace def create_or_update_index( - self, - index_name, - index, - allow_index_downtime=None, - **kwargs + self, index_name, index, allow_index_downtime=None, **kwargs ): - # type: (str, Index, bool, **Any) -> Index + # type: (str, SearchIndex, bool, **Any) -> SearchIndex """Creates a new search index or updates an index if it already exists. :param index_name: The name of the index. :type index_name: str :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of @@ -207,7 +200,7 @@ def create_or_update_index( :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: The index created or updated - :rtype: :class:`~azure.search.documents.Index` + :rtype: :class:`~azure.search.documents.SearchIndex` :raises: :class:`~azure.core.exceptions.ResourceNotFoundError`, \ :class:`~azure.core.exceptions.ResourceModifiedError`, \ :class:`~azure.core.exceptions.ResourceNotModifiedError`, \ @@ -223,17 +216,16 @@ def create_or_update_index( :dedent: 4 :caption: Update an index. """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + kwargs.update(access_condition) patched_index = delistize_flags_for_index(index) result = self._client.indexes.create_or_update( index_name=index_name, index=patched_index, allow_index_downtime=allow_index_downtime, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py index 529af151836f..6b2fd3491b35 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_models.py @@ -3,10 +3,10 @@ # Licensed under the MIT License. See License.txt in the project root for # license information. # -------------------------------------------------------------------------- -from ._generated.models import Analyzer, Tokenizer +from ._generated.models import LexicalAnalyzer, LexicalTokenizer -class PatternAnalyzer(Analyzer): +class PatternAnalyzer(LexicalAnalyzer): """Flexibly separates text into terms via a regular expression. This analyzer is implemented using Apache Lucene. @@ -49,7 +49,7 @@ def __init__(self, **kwargs): self.stopwords = kwargs.get("stopwords", None) -class PatternTokenizer(Tokenizer): +class PatternTokenizer(LexicalTokenizer): """Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py index 5556aa26db03..c02a78bfe99b 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_skillsets_client.py @@ -10,14 +10,14 @@ from azure.core.exceptions import ClientAuthenticationError, ResourceNotFoundError from ._generated import SearchServiceClient as _SearchServiceClient -from ._generated.models import Skillset +from ._generated.models import SearchIndexerSkillset from ._utils import get_access_conditions from .._headers_mixin import HeadersMixin from .._version import SDK_MONIKER if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Skill + from ._generated.models import SearchIndexerSkill from typing import Any, List, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -59,10 +59,10 @@ def close(self): @distributed_trace def get_skillsets(self, **kwargs): - # type: (**Any) -> List[Skillset] - """List the Skillsets in an Azure Search service. + # type: (**Any) -> List[SearchIndexerSkillset] + """List the SearchIndexerSkillsets in an Azure Search service. - :return: List of Skillsets + :return: List of SearchIndexerSkillsets :rtype: list[dict] :raises: ~azure.core.exceptions.HttpResponseError @@ -73,7 +73,7 @@ def get_skillsets(self, **kwargs): :end-before: [END get_skillsets] :language: python :dedent: 4 - :caption: List Skillsets + :caption: List SearchIndexerSkillsets """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -82,12 +82,12 @@ def get_skillsets(self, **kwargs): @distributed_trace def get_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Retrieve a named Skillset in an Azure Search service + # type: (str, **Any) -> SearchIndexerSkillset + """Retrieve a named SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to get + :param name: The name of the SearchIndexerSkillset to get :type name: str - :return: The retrieved Skillset + :return: The retrieved SearchIndexerSkillset :rtype: dict :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` @@ -98,7 +98,7 @@ def get_skillset(self, name, **kwargs): :end-before: [END get_skillset] :language: python :dedent: 4 - :caption: Get a Skillset + :caption: Get a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -106,13 +106,13 @@ def get_skillset(self, name, **kwargs): @distributed_trace def delete_skillset(self, skillset, **kwargs): - # type: (Union[str, Skillset], **Any) -> None - """Delete a named Skillset in an Azure Search service. To use access conditions, - the Skillset model must be provided instead of the name. It is enough to provide + # type: (Union[str, SearchIndexerSkillset], **Any) -> None + """Delete a named SearchIndexerSkillset in an Azure Search service. To use access conditions, + the SearchIndexerSkillset model must be provided instead of the name. It is enough to provide the name of the skillset to delete unconditionally - :param name: The Skillset to delete - :type name: str or ~search.models.Skillset + :param name: The SearchIndexerSkillset to delete + :type name: str or ~search.models.SearchIndexerSkillset :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions @@ -123,32 +123,32 @@ def delete_skillset(self, skillset, **kwargs): :end-before: [END delete_skillset] :language: python :dedent: 4 - :caption: Delete a Skillset + :caption: Delete a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = skillset.name except AttributeError: name = skillset - self._client.skillsets.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + self._client.skillsets.delete(name, error_map=error_map, **kwargs) @distributed_trace def create_skillset(self, name, skills, description, **kwargs): - # type: (str, Sequence[Skill], str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service + # type: (str, Sequence[SearchIndexerSkill], str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to create + :param name: The name of the SearchIndexerSkillset to create :type name: str - :param skills: A list of Skill objects to include in the Skillset - :type skills: List[Skill]] - :param description: A description for the Skillset + :param skills: A list of Skill objects to include in the SearchIndexerSkillset + :type skills: List[SearchIndexerSkill]] + :param description: A description for the SearchIndexerSkillset :type description: Optional[str] - :return: The created Skillset + :return: The created SearchIndexerSkillset :rtype: dict .. admonition:: Example: @@ -158,33 +158,35 @@ def create_skillset(self, name, skills, description, **kwargs): :end-before: [END create_skillset] :language: python :dedent: 4 - :caption: Create a Skillset + :caption: Create a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - skillset = Skillset(name=name, skills=list(skills), description=description) + skillset = SearchIndexerSkillset( + name=name, skills=list(skills), description=description + ) return self._client.skillsets.create(skillset, **kwargs) @distributed_trace def create_or_update_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service, or update an + # type: (str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service, or update an existing one. The skillset param must be provided to perform the operation with access conditions. - :param name: The name of the Skillset to create or update + :param name: The name of the SearchIndexerSkillset to create or update :type name: str - :keyword skills: A list of Skill objects to include in the Skillset - :type skills: List[Skill] - :keyword description: A description for the Skillset + :keyword skills: A list of Skill objects to include in the SearchIndexerSkillset + :type skills: List[SearchIndexerSkill] + :keyword description: A description for the SearchIndexerSkillset :type description: Optional[str] - :keyword skillset: A Skillset to create or update. - :type skillset: :class:`~azure.search.documents.Skillset` + :keyword skillset: A SearchIndexerSkillset to create or update. + :type skillset: :class:`~azure.search.documents.SearchIndexerSkillset` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created or updated Skillset + :return: The created or updated SearchIndexerSkillset :rtype: dict If a `skillset` is passed in, any optional `skills`, or @@ -192,35 +194,28 @@ def create_or_update_skillset(self, name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError - } + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} access_condition = None if "skillset" in kwargs: skillset = kwargs.pop("skillset") error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - skillset = Skillset.deserialize(skillset.serialize()) + kwargs.update(access_condition) + skillset = SearchIndexerSkillset.deserialize(skillset.serialize()) skillset.name = name for param in ("description", "skills"): if param in kwargs: setattr(skillset, param, kwargs.pop(param)) else: - skillset = Skillset( + skillset = SearchIndexerSkillset( name=name, description=kwargs.pop("description", None), skills=kwargs.pop("skills", None), ) return self._client.skillsets.create_or_update( - skillset_name=name, - skillset=skillset, - access_condition=access_condition, - error_map=error_map, - **kwargs + skillset_name=name, skillset=skillset, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py index d5fe5ab9082a..965f173c666a 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_synonym_maps_client.py @@ -16,7 +16,6 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from ._generated.models import Skill from typing import Any, Dict, List, Sequence, Union, Optional from azure.core.credentials import AzureKeyCredential @@ -130,18 +129,15 @@ def delete_synonym_map(self, synonym_map, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name except AttributeError: name = synonym_map self._client.synonym_maps.delete( - synonym_map_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + synonym_map_name=name, error_map=error_map, **kwargs ) @distributed_trace @@ -190,9 +186,9 @@ def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name if synonyms: @@ -204,7 +200,6 @@ def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwargs): result = self._client.synonym_maps.create_or_update( synonym_map_name=name, synonym_map=synonym_map, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py index 0b0f6e6d2605..b19457bf6c9e 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/_utils.py @@ -14,17 +14,15 @@ ResourceNotModifiedError, ) from ._generated.models import ( - Index, + SearchIndex, PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, - AccessCondition ) from ._models import PatternAnalyzer, PatternTokenizer if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports from typing import Optional - from ._generated.models import Skillset DELIMITER = "|" @@ -118,7 +116,7 @@ def listize_flags_for_pattern_tokenizer(pattern_tokenizer): def delistize_flags_for_index(index): - # type: (Index) -> Index + # type: (SearchIndex) -> SearchIndex if index.analyzers: index.analyzers = [ delistize_flags_for_pattern_analyzer(x) # type: ignore @@ -137,7 +135,7 @@ def delistize_flags_for_index(index): def listize_flags_for_index(index): - # type: (Index) -> Index + # type: (SearchIndex) -> SearchIndex if index.analyzers: index.analyzers = [ listize_flags_for_pattern_analyzer(x) # type: ignore @@ -160,17 +158,15 @@ def listize_synonyms(synonym_map): synonym_map["synonyms"] = synonym_map["synonyms"].split("\n") return synonym_map + def get_access_conditions(model, match_condition=MatchConditions.Unconditionally): - # type: (Any, MatchConditions) -> Tuple[Dict[int, Any], AccessCondition] - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError - } + # type: (Any, MatchConditions) -> Tuple[Dict[int, Any], Dict[str, bool]] + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} if isinstance(model, six.string_types): if match_condition is not MatchConditions.Unconditionally: raise ValueError("A model must be passed to use access conditions") - return (error_map, None) + return (error_map, {}) try: if_match = prep_if_match(model.e_tag, match_condition) @@ -184,7 +180,7 @@ def get_access_conditions(model, match_condition=MatchConditions.Unconditionally error_map[412] = ResourceNotFoundError if match_condition == MatchConditions.IfMissing: error_map[412] = ResourceExistsError - return (error_map, AccessCondition(if_match=if_match, if_none_match=if_none_match)) + return (error_map, dict(if_match=if_match, if_none_match=if_none_match)) except AttributeError: raise ValueError("Unable to get e_tag from the model") diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py index 65d5dd8ef88a..25b6ccf36c40 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_datasources_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import DataSource + from .._generated.models import SearchIndexerDataSource from typing import Any, Dict, Optional, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -57,11 +57,11 @@ async def close(self): @distributed_trace_async async def create_datasource(self, data_source, **kwargs): - # type: (DataSource, **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, **Any) -> Dict[str, Any] """Creates a new datasource. :param data_source: The definition of the datasource to create. - :type data_source: ~search.models.DataSource - :return: The created DataSource + :type data_source: ~search.models.SearchIndexerDataSource + :return: The created SearchIndexerDataSource :rtype: dict .. admonition:: Example: @@ -71,7 +71,7 @@ async def create_datasource(self, data_source, **kwargs): :end-before: [END create_data_source_async] :language: python :dedent: 4 - :caption: Create a DataSource + :caption: Create a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.data_sources.create(data_source, **kwargs) @@ -79,28 +79,27 @@ async def create_datasource(self, data_source, **kwargs): @distributed_trace_async async def create_or_update_datasource(self, data_source, name=None, **kwargs): - # type: (DataSource, Optional[str], **Any) -> Dict[str, Any] + # type: (SearchIndexerDataSource, Optional[str], **Any) -> Dict[str, Any] """Creates a new datasource or updates a datasource if it already exists. :param name: The name of the datasource to create or update. :type name: str :param data_source: The definition of the datasource to create or update. - :type data_source: ~search.models.DataSource + :type data_source: ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created DataSource + :return: The created SearchIndexerDataSource :rtype: dict """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) if not name: name = data_source.name result = await self._client.data_sources.create_or_update( data_source_name=name, data_source=data_source, - access_condition=access_condition, error_map=error_map, **kwargs ) @@ -108,13 +107,13 @@ async def create_or_update_datasource(self, data_source, name=None, **kwargs): @distributed_trace_async async def delete_datasource(self, data_source, **kwargs): - # type: (Union[str, DataSource], **Any) -> None + # type: (Union[str, SearchIndexerDataSource], **Any) -> None """Deletes a datasource. To use access conditions, the Datasource model must be provided instead of the name. It is enough to provide the name of the datasource to delete unconditionally :param data_source: The datasource to delete. - :type data_source: str or ~search.models.DataSource + :type data_source: str or ~search.models.SearchIndexerDataSource :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: None @@ -127,22 +126,19 @@ async def delete_datasource(self, data_source, **kwargs): :end-before: [END delete_data_source_async] :language: python :dedent: 4 - :caption: Delete a DataSource + :caption: Delete a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - data_source, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + data_source, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = data_source.name except AttributeError: name = data_source await self._client.data_sources.delete( - data_source_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + data_source_name=name, error_map=error_map, **kwargs ) @distributed_trace_async @@ -152,14 +148,14 @@ async def get_datasource(self, name, **kwargs): :param name: The name of the datasource to retrieve. :type name: str - :return: The DataSource that is fetched. + :return: The SearchIndexerDataSource that is fetched. .. literalinclude:: ../samples/async_samples/sample_data_source_operations_async.py :start-after: [START get_data_source_async] :end-before: [END get_data_source_async] :language: python :dedent: 4 - :caption: Retrieve a DataSource + :caption: Retrieve a SearchIndexerDataSource """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.data_sources.get(name, **kwargs) @@ -167,7 +163,7 @@ async def get_datasource(self, name, **kwargs): @distributed_trace_async async def get_datasources(self, **kwargs): - # type: (**Any) -> Sequence[DataSource] + # type: (**Any) -> Sequence[SearchIndexerDataSource] """Lists all datasources available for a search service. :return: List of all the data sources. @@ -180,7 +176,7 @@ async def get_datasources(self, **kwargs): :end-before: [END list_data_source_async] :language: python :dedent: 4 - :caption: List all DataSources + :caption: List all SearchIndexerDataSources """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.data_sources.list(**kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py index 90b74f62dbd8..cba6743bffcf 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexers_client.py @@ -15,7 +15,7 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Indexer, IndexerExecutionInfo + from .._generated.models import SearchIndexer, SearchIndexerStatus from typing import Any, Dict, Optional, Sequence from azure.core.credentials import AzureKeyCredential @@ -57,12 +57,12 @@ async def close(self): @distributed_trace_async async def create_indexer(self, indexer, **kwargs): - # type: (Indexer, **Any) -> Indexer - """Creates a new Indexers. + # type: (SearchIndexer, **Any) -> SearchIndexer + """Creates a new SearchIndexer. :param indexer: The definition of the indexer to create. - :type indexer: ~azure.search.documents.Indexer - :return: The created Indexer + :type indexer: ~azure.search.documents.SearchIndexer + :return: The created SearchIndexer :rtype: dict .. admonition:: Example: @@ -72,7 +72,7 @@ async def create_indexer(self, indexer, **kwargs): :end-before: [END create_indexer_async] :language: python :dedent: 4 - :caption: Create an Indexer + :caption: Create a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexers.create(indexer, **kwargs) @@ -80,41 +80,36 @@ async def create_indexer(self, indexer, **kwargs): @distributed_trace_async async def create_or_update_indexer(self, indexer, name=None, **kwargs): - # type: (Indexer, Optional[str], **Any) -> Indexer + # type: (SearchIndexer, Optional[str], **Any) -> SearchIndexer """Creates a new indexer or updates a indexer if it already exists. :param name: The name of the indexer to create or update. :type name: str :param indexer: The definition of the indexer to create or update. - :type indexer: ~azure.search.documents.Indexer - :return: The created Indexer + :type indexer: ~azure.search.documents.SearchIndexer + :return: The created SearchIndexer :rtype: dict """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - + kwargs.update(access_condition) if not name: name = indexer.name result = await self._client.indexers.create_or_update( - indexer_name=name, - indexer=indexer, - access_condition=access_condition, - error_map=error_map, - **kwargs + indexer_name=name, indexer=indexer, error_map=error_map, **kwargs ) return result @distributed_trace_async async def get_indexer(self, name, **kwargs): - # type: (str, **Any) -> Indexer + # type: (str, **Any) -> SearchIndexer """Retrieves a indexer definition. :param name: The name of the indexer to retrieve. :type name: str - :return: The Indexer that is fetched. + :return: The SearchIndexer that is fetched. :rtype: dict .. admonition:: Example: @@ -124,7 +119,7 @@ async def get_indexer(self, name, **kwargs): :end-before: [END get_indexer_async] :language: python :dedent: 4 - :caption: Retrieve an Indexer + :caption: Retrieve a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexers.get(name, **kwargs) @@ -132,10 +127,10 @@ async def get_indexer(self, name, **kwargs): @distributed_trace_async async def get_indexers(self, **kwargs): - # type: (**Any) -> Sequence[Indexer] + # type: (**Any) -> Sequence[SearchIndexer] """Lists all indexers available for a search service. - :return: List of all the Indexers. + :return: List of all the SearchIndexers. :rtype: `list[dict]` .. admonition:: Example: @@ -145,7 +140,7 @@ async def get_indexers(self, **kwargs): :end-before: [END list_indexer_async] :language: python :dedent: 4 - :caption: List all the Indexers + :caption: List all the SearchIndexers """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) result = await self._client.indexers.list(**kwargs) @@ -153,8 +148,8 @@ async def get_indexers(self, **kwargs): @distributed_trace_async async def delete_indexer(self, indexer, **kwargs): - # type: (Union[str, Indexer], **Any) -> None - """Deletes an indexer. To use access conditions, the Indexer model + # type: (Union[str, SearchIndexer], **Any) -> None + """Deletes an indexer. To use access conditions, the SearchIndexer model must be provided instead of the name. It is enough to provide the name of the indexer to delete unconditionally. @@ -173,18 +168,18 @@ async def delete_indexer(self, indexer, **kwargs): :end-before: [END delete_indexer_async] :language: python :dedent: 4 - :caption: Delete an Indexer + :caption: Delete a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - indexer, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + indexer, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = indexer.name except AttributeError: name = indexer - await self._client.indexers.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + await self._client.indexers.delete(name, error_map=error_map, **kwargs) @distributed_trace_async async def run_indexer(self, name, **kwargs): @@ -204,7 +199,7 @@ async def run_indexer(self, name, **kwargs): :end-before: [END run_indexer_async] :language: python :dedent: 4 - :caption: Run an Indexer + :caption: Run a SearchIndexer """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) await self._client.indexers.run(name, **kwargs) @@ -227,21 +222,21 @@ async def reset_indexer(self, name, **kwargs): :end-before: [END reset_indexer_async] :language: python :dedent: 4 - :caption: Reset an Indexer's change tracking state + :caption: Reset a SearchIndexer's change tracking state """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) await self._client.indexers.reset(name, **kwargs) @distributed_trace_async async def get_indexer_status(self, name, **kwargs): - # type: (str, **Any) -> IndexerExecutionInfo + # type: (str, **Any) -> SearchIndexerStatus """Get the status of the indexer. :param name: The name of the indexer to fetch the status. :type name: str - :return: IndexerExecutionInfo - :rtype: IndexerExecutionInfo + :return: SearchIndexerStatus + :rtype: SearchIndexerStatus .. admonition:: Example: @@ -250,7 +245,7 @@ async def get_indexer_status(self, name, **kwargs): :end-before: [END get_indexer_status_async] :language: python :dedent: 4 - :caption: Get an Indexer's status + :caption: Get a SearchIndexer's status """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) return await self._client.indexers.get_status(name, **kwargs) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py index c7571ef63566..19cdd6bc2daf 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_indexes_client.py @@ -12,14 +12,14 @@ from .._utils import ( delistize_flags_for_index, listize_flags_for_index, - get_access_conditions + get_access_conditions, ) from ..._headers_mixin import HeadersMixin from ..._version import SDK_MONIKER if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import AnalyzeRequest, AnalyzeResult, Index + from .._generated.models import AnalyzeRequest, AnalyzeResult, SearchIndex from typing import Any, Dict, List, Union from azure.core.credentials import AzureKeyCredential @@ -61,11 +61,11 @@ async def close(self): @distributed_trace_async async def list_indexes(self, **kwargs): - # type: (**Any) -> AsyncItemPaged[Index] + # type: (**Any) -> AsyncItemPaged[SearchIndex] """List the indexes in an Azure Search service. :return: List of indexes - :rtype: list[~azure.search.documents.Index] + :rtype: list[~azure.search.documents.SearchIndex] :raises: ~azure.core.exceptions.HttpResponseError """ @@ -81,13 +81,13 @@ async def extract_data(response): @distributed_trace_async async def get_index(self, index_name, **kwargs): - # type: (str, **Any) -> Index + # type: (str, **Any) -> SearchIndex """ :param index_name: The name of the index to retrieve. :type index_name: str - :return: Index object - :rtype: ~azure.search.documents.Index + :return: SearchIndex object + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -122,12 +122,12 @@ async def get_index_statistics(self, index_name, **kwargs): @distributed_trace_async async def delete_index(self, index, **kwargs): - # type: (Union[str, Index], **Any) -> None + # type: (Union[str, SearchIndex], **Any) -> None """Deletes a search index and all the documents it contains. The model must be provided instead of the name to use the access conditions :param index: The index to retrieve. - :type index: str or ~search.models.Index + :type index: str or ~search.models.SearchIndex :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :raises: ~azure.core.exceptions.HttpResponseError @@ -143,29 +143,26 @@ async def delete_index(self, index, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: index_name = index.name except AttributeError: index_name = index await self._client.indexes.delete( - index_name=index_name, - access_condition=access_condition, - error_map=error_map, - **kwargs + index_name=index_name, error_map=error_map, **kwargs ) @distributed_trace_async async def create_index(self, index, **kwargs): - # type: (Index, **Any) -> Index + # type: (SearchIndex, **Any) -> SearchIndex """Creates a new search index. :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :return: The index created - :rtype: ~azure.search.documents.Index + :rtype: ~azure.search.documents.SearchIndex :raises: ~azure.core.exceptions.HttpResponseError .. admonition:: Example: @@ -184,19 +181,15 @@ async def create_index(self, index, **kwargs): @distributed_trace_async async def create_or_update_index( - self, - index_name, - index, - allow_index_downtime=None, - **kwargs + self, index_name, index, allow_index_downtime=None, **kwargs ): - # type: (str, Index, bool, MatchConditions, **Any) -> Index + # type: (str, SearchIndex, bool, MatchConditions, **Any) -> SearchIndex """Creates a new search index or updates an index if it already exists. :param index_name: The name of the index. :type index_name: str :param index: The index object. - :type index: ~azure.search.documents.Index + :type index: ~azure.search.documents.SearchIndex :param allow_index_downtime: Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of @@ -206,7 +199,7 @@ async def create_or_update_index( :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions :return: The index created or updated - :rtype: :class:`~azure.search.documents.Index` + :rtype: :class:`~azure.search.documents.SearchIndex` :raises: :class:`~azure.core.exceptions.ResourceNotFoundError`, \ :class:`~azure.core.exceptions.ResourceModifiedError`, \ :class:`~azure.core.exceptions.ResourceNotModifiedError`, \ @@ -222,17 +215,16 @@ async def create_or_update_index( :dedent: 4 :caption: Update an index. """ + kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - index, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + index, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) + kwargs.update(access_condition) patched_index = delistize_flags_for_index(index) result = await self._client.indexes.create_or_update( index_name=index_name, index=patched_index, allow_index_downtime=allow_index_downtime, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py index 75bfadfaef47..a53f74508589 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_skillsets_client.py @@ -10,14 +10,14 @@ from azure.core.tracing.decorator_async import distributed_trace_async from .._generated.aio import SearchServiceClient as _SearchServiceClient -from .._generated.models import Skillset +from .._generated.models import SearchIndexerSkillset from .._utils import get_access_conditions from ..._headers_mixin import HeadersMixin from ..._version import SDK_MONIKER if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Skill + from .._generated.models import SearchIndexerSkill from typing import Any, List, Sequence, Union from azure.core.credentials import AzureKeyCredential @@ -59,10 +59,10 @@ async def close(self): @distributed_trace_async async def get_skillsets(self, **kwargs): - # type: (**Any) -> List[Skillset] - """List the Skillsets in an Azure Search service. + # type: (**Any) -> List[SearchIndexerSkillset] + """List the SearchIndexerSkillsets in an Azure Search service. - :return: List of Skillsets + :return: List of SearchIndexerSkillsets :rtype: list[dict] :raises: ~azure.core.exceptions.HttpResponseError @@ -73,7 +73,7 @@ async def get_skillsets(self, **kwargs): :end-before: [END get_skillsets] :language: python :dedent: 4 - :caption: List Skillsets + :caption: List SearchIndexerSkillsets """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -82,12 +82,12 @@ async def get_skillsets(self, **kwargs): @distributed_trace_async async def get_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Retrieve a named Skillset in an Azure Search service + # type: (str, **Any) -> SearchIndexerSkillset + """Retrieve a named SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to get + :param name: The name of the SearchIndexerSkillset to get :type name: str - :return: The retrieved Skillset + :return: The retrieved SearchIndexerSkillset :rtype: dict :raises: :class:`~azure.core.exceptions.ResourceNotFoundError` @@ -98,7 +98,7 @@ async def get_skillset(self, name, **kwargs): :end-before: [END get_skillset] :language: python :dedent: 4 - :caption: Get a Skillset + :caption: Get a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) @@ -106,13 +106,13 @@ async def get_skillset(self, name, **kwargs): @distributed_trace_async async def delete_skillset(self, skillset, **kwargs): - # type: (Union[str, Skillset], **Any) -> None - """Delete a named Skillset in an Azure Search service. To use access conditions, - the Skillset model must be provided instead of the name. It is enough to provide + # type: (Union[str, SearchIndexerSkillset], **Any) -> None + """Delete a named SearchIndexerSkillset in an Azure Search service. To use access conditions, + the SearchIndexerSkillset model must be provided instead of the name. It is enough to provide the name of the skillset to delete unconditionally - :param name: The Skillset to delete - :type name: str or ~search.models.Skillset + :param name: The SearchIndexerSkillset to delete + :type name: str or ~search.models.SearchIndexerSkillset :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions @@ -123,32 +123,32 @@ async def delete_skillset(self, skillset, **kwargs): :end-before: [END delete_skillset] :language: python :dedent: 4 - :caption: Delete a Skillset + :caption: Delete a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = skillset.name except AttributeError: name = skillset - await self._client.skillsets.delete(name, access_condition=access_condition, error_map=error_map, **kwargs) + await self._client.skillsets.delete(name, error_map=error_map, **kwargs) @distributed_trace_async async def create_skillset(self, name, skills, description, **kwargs): - # type: (str, Sequence[Skill], str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service + # type: (str, Sequence[SearchIndexerSkill], str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service - :param name: The name of the Skillset to create + :param name: The name of the SearchIndexerSkillset to create :type name: str - :param skills: A list of Skill objects to include in the Skillset - :type skills: List[Skill]] - :param description: A description for the Skillset + :param skills: A list of Skill objects to include in the SearchIndexerSkillset + :type skills: List[SearchIndexerSkill]] + :param description: A description for the SearchIndexerSkillset :type description: Optional[str] - :return: The created Skillset + :return: The created SearchIndexerSkillset :rtype: dict .. admonition:: Example: @@ -158,33 +158,35 @@ async def create_skillset(self, name, skills, description, **kwargs): :end-before: [END create_skillset] :language: python :dedent: 4 - :caption: Create a Skillset + :caption: Create a SearchIndexerSkillset """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - skillset = Skillset(name=name, skills=list(skills), description=description) + skillset = SearchIndexerSkillset( + name=name, skills=list(skills), description=description + ) return await self._client.skillsets.create(skillset, **kwargs) @distributed_trace_async async def create_or_update_skillset(self, name, **kwargs): - # type: (str, **Any) -> Skillset - """Create a new Skillset in an Azure Search service, or update an + # type: (str, **Any) -> SearchIndexerSkillset + """Create a new SearchIndexerSkillset in an Azure Search service, or update an existing one. The skillset param must be provided to perform the operation with access conditions. - :param name: The name of the Skillset to create or update + :param name: The name of the SearchIndexerSkillset to create or update :type name: str - :keyword skills: A list of Skill objects to include in the Skillset - :type skills: List[Skill] - :keyword description: A description for the Skillset + :keyword skills: A list of Skill objects to include in the SearchIndexerSkillset + :type skills: List[SearchIndexerSkill] + :keyword description: A description for the SearchIndexerSkillset :type description: Optional[str] - :keyword skillset: A Skillset to create or update. - :type skillset: :class:`~azure.search.documents.Skillset` + :keyword skillset: A SearchIndexerSkillset to create or update. + :type skillset: :class:`~azure.search.documents.SearchIndexerSkillset` :keyword match_condition: The match condition to use upon the etag :type match_condition: ~azure.core.MatchConditions - :return: The created or updated Skillset + :return: The created or updated SearchIndexerSkillset :rtype: dict If a `skillset` is passed in, any optional `skills`, or @@ -193,35 +195,28 @@ async def create_or_update_skillset(self, name, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) - error_map = { - 401: ClientAuthenticationError, - 404: ResourceNotFoundError - } + error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError} access_condition = None if "skillset" in kwargs: skillset = kwargs.pop("skillset") error_map, access_condition = get_access_conditions( - skillset, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + skillset, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) - skillset = Skillset.deserialize(skillset.serialize()) + kwargs.update(access_condition) + skillset = SearchIndexerSkillset.deserialize(skillset.serialize()) skillset.name = name for param in ("description", "skills"): if param in kwargs: setattr(skillset, param, kwargs.pop(param)) else: - skillset = Skillset( + skillset = SearchIndexerSkillset( name=name, description=kwargs.pop("description", None), skills=kwargs.pop("skills", None), ) return await self._client.skillsets.create_or_update( - skillset_name=name, - skillset=skillset, - access_condition=access_condition, - error_map=error_map, - **kwargs + skillset_name=name, skillset=skillset, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py index 7818d6834cd0..a3d6f88dead0 100644 --- a/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py +++ b/sdk/search/azure-search-documents/azure/search/documents/_service/aio/_synonym_maps_client.py @@ -16,7 +16,6 @@ if TYPE_CHECKING: # pylint:disable=unused-import,ungrouped-imports - from .._generated.models import Skill from typing import Any, Dict, List, Sequence, Union, Optional from azure.core.credentials import AzureKeyCredential @@ -131,18 +130,15 @@ async def delete_synonym_map(self, synonym_map, **kwargs): """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name except AttributeError: name = synonym_map await self._client.synonym_maps.delete( - synonym_map_name=name, - access_condition=access_condition, - error_map=error_map, - **kwargs + synonym_map_name=name, error_map=error_map, **kwargs ) @distributed_trace_async @@ -191,9 +187,9 @@ async def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwarg """ kwargs["headers"] = self._merge_client_headers(kwargs.get("headers")) error_map, access_condition = get_access_conditions( - synonym_map, - kwargs.pop('match_condition', MatchConditions.Unconditionally) + synonym_map, kwargs.pop("match_condition", MatchConditions.Unconditionally) ) + kwargs.update(access_condition) try: name = synonym_map.name if synonyms: @@ -205,7 +201,6 @@ async def create_or_update_synonym_map(self, synonym_map, synonyms=None, **kwarg result = await self._client.synonym_maps.create_or_update( synonym_map_name=name, synonym_map=synonym_map, - access_condition=access_condition, error_map=error_map, **kwargs ) diff --git a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py index d9c8d18700f4..60994ffe248f 100644 --- a/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py +++ b/sdk/search/azure-search-documents/tests/async_tests/test_service_live_async.py @@ -24,17 +24,16 @@ AnalyzeResult, CorsOptions, EntityRecognitionSkill, - Field, - Index, + SearchIndex, InputFieldMappingEntry, OutputFieldMappingEntry, SearchServiceClient, ScoringProfile, - Skillset, + SearchIndexerSkillset, DataSourceCredentials, - DataSource, - DataContainer, - Indexer, + SearchIndexerDataSource, + SearchIndexerDataContainer, + SearchIndexer, SynonymMap, SimpleField, edm @@ -143,7 +142,7 @@ async def test_delete_indexes_if_unchanged(self, api_key, endpoint, index_name, scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -173,7 +172,7 @@ async def test_create_index(self, api_key, endpoint, index_name, **kwargs): scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -196,7 +195,7 @@ async def test_create_or_update_index(self, api_key, endpoint, index_name, **kwa cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profiles = [] - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -211,7 +210,7 @@ async def test_create_or_update_index(self, api_key, endpoint, index_name, **kwa ) scoring_profiles = [] scoring_profiles.append(scoring_profile) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -245,7 +244,7 @@ async def test_create_or_update_indexes_if_unchanged(self, api_key, endpoint, in scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -379,7 +378,7 @@ async def test_create_skillset(self, api_key, endpoint, index_name, **kwargs): outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")]) result = await client.create_skillset(name='test-ss', skills=[s], description="desc") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -430,7 +429,7 @@ async def test_get_skillset(self, api_key, endpoint, index_name, **kwargs): assert len(await client.get_skillsets()) == 1 result = await client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -448,7 +447,7 @@ async def test_get_skillsets(self, api_key, endpoint, index_name, **kwargs): await client.create_skillset(name='test-ss-2', skills=[s], description="desc2") result = await client.get_skillsets() assert isinstance(result, list) - assert all(isinstance(x, Skillset) for x in result) + assert all(isinstance(x, SearchIndexerSkillset) for x in result) assert set(x.name for x in result) == {"test-ss-1", "test-ss-2"} @SearchResourceGroupPreparer(random_name_enabled=True) @@ -463,7 +462,7 @@ async def test_create_or_update_skillset(self, api_key, endpoint, index_name, ** assert len(await client.get_skillsets()) == 1 result = await client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -479,7 +478,7 @@ async def test_create_or_update_skillset_inplace(self, api_key, endpoint, index_ assert len(await client.get_skillsets()) == 1 result = await client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -505,8 +504,8 @@ class SearchDataSourcesClientTest(AzureMgmtTestCase): def _create_datasource(self, name="sample-datasource"): credentials = DataSourceCredentials(connection_string=CONNECTION_STRING) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=name, type="azureblob", credentials=credentials, @@ -611,8 +610,8 @@ async def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_na con_str = self.settings.AZURE_STORAGE_CONNECTION_STRING self.scrubber.register_name_pair(con_str, 'connection_string') credentials = DataSourceCredentials(connection_string=con_str) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=ds_name, type="azureblob", credentials=credentials, @@ -630,10 +629,10 @@ async def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_na "key": True, "searchable": False }] - index = Index(name=index_name, fields=fields) + index = SearchIndex(name=index_name, fields=fields) ind_client = client.get_indexes_client() ind = await ind_client.create_index(index) - return Indexer(name=name, data_source_name=ds.name, target_index_name=ind.name) + return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name) @SearchResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) diff --git a/sdk/search/azure-search-documents/tests/test_regex_flags.py b/sdk/search/azure-search-documents/tests/test_regex_flags.py index 6474bbed6c6d..26775607d3b7 100644 --- a/sdk/search/azure-search-documents/tests/test_regex_flags.py +++ b/sdk/search/azure-search-documents/tests/test_regex_flags.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # ------------------------------------ -from azure.search.documents import Index, RegexFlags, PatternAnalyzer, PatternTokenizer +from azure.search.documents import SearchIndex, RegexFlags, PatternAnalyzer, PatternTokenizer from azure.search.documents._service._generated.models import ( PatternAnalyzer as _PatternAnalyzer, PatternTokenizer as _PatternTokenizer, @@ -23,7 +23,7 @@ def test_listize_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -50,7 +50,7 @@ def test_listize_multi_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -79,7 +79,7 @@ def test_listize_flags_for_index_enum(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -106,7 +106,7 @@ def test_delistize_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, @@ -133,7 +133,7 @@ def test_delistize_multi_flags_for_index(): ) tokenizers = [] tokenizers.append(pattern_tokenizer) - index = Index( + index = SearchIndex( name="test", fields=None, analyzers=analyzers, diff --git a/sdk/search/azure-search-documents/tests/test_service_live.py b/sdk/search/azure-search-documents/tests/test_service_live.py index 947ce3a85c88..08638d837af4 100644 --- a/sdk/search/azure-search-documents/tests/test_service_live.py +++ b/sdk/search/azure-search-documents/tests/test_service_live.py @@ -21,17 +21,16 @@ AnalyzeResult, CorsOptions, EntityRecognitionSkill, - Field, - Index, + SearchIndex, InputFieldMappingEntry, OutputFieldMappingEntry, SearchServiceClient, ScoringProfile, - Skillset, + SearchIndexerSkillset, DataSourceCredentials, - DataSource, - Indexer, - DataContainer, + SearchIndexerDataSource, + SearchIndexer, + SearchIndexerDataContainer, SynonymMap, SimpleField, edm @@ -126,7 +125,7 @@ def test_delete_indexes_if_unchanged(self, api_key, endpoint, index_name, **kwar scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -155,7 +154,7 @@ def test_create_index(self, api_key, endpoint, index_name, **kwargs): scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -177,7 +176,7 @@ def test_create_or_update_index(self, api_key, endpoint, index_name, **kwargs): ] cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) scoring_profiles = [] - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -192,7 +191,7 @@ def test_create_or_update_index(self, api_key, endpoint, index_name, **kwargs): ) scoring_profiles = [] scoring_profiles.append(scoring_profile) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -226,7 +225,7 @@ def test_create_or_update_indexes_if_unchanged(self, api_key, endpoint, index_na scoring_profiles = [] scoring_profiles.append(scoring_profile) cors_options = CorsOptions(allowed_origins=["*"], max_age_in_seconds=60) - index = Index( + index = SearchIndex( name=name, fields=fields, scoring_profiles=scoring_profiles, @@ -378,7 +377,7 @@ def test_create_skillset(self, api_key, endpoint, index_name, **kwargs): outputs=[OutputFieldMappingEntry(name="organizations", target_name="organizations")]) result = client.create_skillset(name='test-ss', skills=[s], description="desc") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -427,7 +426,7 @@ def test_get_skillset(self, api_key, endpoint, index_name, **kwargs): assert len(client.get_skillsets()) == 1 result = client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc" assert result.e_tag @@ -445,7 +444,7 @@ def test_get_skillsets(self, api_key, endpoint, index_name, **kwargs): client.create_skillset(name='test-ss-2', skills=[s], description="desc2") result = client.get_skillsets() assert isinstance(result, list) - assert all(isinstance(x, Skillset) for x in result) + assert all(isinstance(x, SearchIndexerSkillset) for x in result) assert set(x.name for x in result) == {"test-ss-1", "test-ss-2"} @SearchResourceGroupPreparer(random_name_enabled=True) @@ -460,7 +459,7 @@ def test_create_or_update_skillset(self, api_key, endpoint, index_name, **kwargs assert len(client.get_skillsets()) == 1 result = client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -476,7 +475,7 @@ def test_create_or_update_skillset_inplace(self, api_key, endpoint, index_name, assert len(client.get_skillsets()) == 1 result = client.get_skillset("test-ss") - assert isinstance(result, Skillset) + assert isinstance(result, SearchIndexerSkillset) assert result.name == "test-ss" assert result.description == "desc2" @@ -501,8 +500,8 @@ class SearchDataSourcesClientTest(AzureMgmtTestCase): def _create_datasource(self, name="sample-datasource"): credentials = DataSourceCredentials(connection_string=CONNECTION_STRING) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=name, type="azureblob", credentials=credentials, @@ -624,8 +623,8 @@ def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_name="sa con_str = self.settings.AZURE_STORAGE_CONNECTION_STRING self.scrubber.register_name_pair(con_str, 'connection_string') credentials = DataSourceCredentials(connection_string=con_str) - container = DataContainer(name='searchcontainer') - data_source = DataSource( + container = SearchIndexerDataContainer(name='searchcontainer') + data_source = SearchIndexerDataSource( name=ds_name, type="azureblob", credentials=credentials, @@ -642,9 +641,9 @@ def _prepare_indexer(self, endpoint, api_key, name="sample-indexer", ds_name="sa "key": True, "searchable": False }] - index = Index(name=index_name, fields=fields) + index = SearchIndex(name=index_name, fields=fields) ind = client.get_indexes_client().create_index(index) - return Indexer(name=name, data_source_name=ds.name, target_index_name=ind.name) + return SearchIndexer(name=name, data_source_name=ds.name, target_index_name=ind.name) @SearchResourceGroupPreparer(random_name_enabled=True) @SearchServicePreparer(schema=SCHEMA, index_batch=BATCH) From 928587156b73036915c00f92187260e23f996c69 Mon Sep 17 00:00:00 2001 From: chunyu3 Date: Wed, 20 May 2020 15:17:22 +0800 Subject: [PATCH 18/28] Datashare 2019 11 01 (#11540) * Update from master * release datashare 2019-11-01 * Packaging update of azure-mgmt-datashare * Update version.py * Update CHANGELOG.md Co-authored-by: SDK Automation Co-authored-by: Azure SDK Bot Co-authored-by: Zim Kalinowski --- .../azure-mgmt-datashare/CHANGELOG.md | 81 ++ sdk/datashare/azure-mgmt-datashare/README.md | 30 +- .../_data_share_management_client.py | 2 +- .../azure/mgmt/datashare/models/__init__.py | 18 +- .../_data_share_management_client_enums.py | 15 +- .../azure/mgmt/datashare/models/_models.py | 782 +++++++++++++---- .../mgmt/datashare/models/_models_py3.py | 806 ++++++++++++++---- .../mgmt/datashare/models/_paged_models.py | 24 +- .../operations/_accounts_operations.py | 4 +- .../_consumer_invitations_operations.py | 4 +- .../_consumer_source_data_sets_operations.py | 4 +- .../_data_set_mappings_operations.py | 8 +- .../operations/_data_sets_operations.py | 84 +- .../operations/_invitations_operations.py | 4 +- .../mgmt/datashare/operations/_operations.py | 4 +- ...provider_share_subscriptions_operations.py | 4 +- .../_share_subscriptions_operations.py | 164 ++-- .../operations/_shares_operations.py | 4 +- .../_synchronization_settings_operations.py | 7 +- .../operations/_triggers_operations.py | 4 +- .../azure/mgmt/datashare/version.py | 2 +- sdk/datashare/azure-mgmt-datashare/setup.py | 5 +- 22 files changed, 1553 insertions(+), 507 deletions(-) diff --git a/sdk/datashare/azure-mgmt-datashare/CHANGELOG.md b/sdk/datashare/azure-mgmt-datashare/CHANGELOG.md index 3a6749ef6277..06a1de32ff25 100644 --- a/sdk/datashare/azure-mgmt-datashare/CHANGELOG.md +++ b/sdk/datashare/azure-mgmt-datashare/CHANGELOG.md @@ -1,5 +1,86 @@ # Release History +## 0.2.0 (2020-05-14) + +**Features** + + - Model ShareSubscriptionSynchronization has a new parameter synchronization_mode + - Model ProviderShareSubscription has a new parameter consumer_name + - Model ProviderShareSubscription has a new parameter consumer_email + - Model ProviderShareSubscription has a new parameter consumer_tenant_name + - Model ProviderShareSubscription has a new parameter provider_email + - Model ProviderShareSubscription has a new parameter provider_name + - Model ADLSGen2FileSystemDataSetMapping has a new parameter provisioning_state + - Model SqlDWTableDataSetMapping has a new parameter provisioning_state + - Model Invitation has a new parameter user_name + - Model Invitation has a new parameter user_email + - Model Account has a new parameter user_name + - Model Account has a new parameter user_email + - Model ShareSubscription has a new parameter provider_tenant_name + - Model ShareSubscription has a new parameter user_name + - Model ShareSubscription has a new parameter provider_email + - Model ShareSubscription has a new parameter user_email + - Model ShareSubscription has a new parameter provider_name + - Model ADLSGen2FolderDataSetMapping has a new parameter provisioning_state + - Model ConsumerSourceDataSet has a new parameter data_set_location + - Model ConsumerSourceDataSet has a new parameter data_set_path + - Model BlobFolderDataSetMapping has a new parameter provisioning_state + - Model ScheduledTrigger has a new parameter user_name + - Model Share has a new parameter user_name + - Model Share has a new parameter user_email + - Model BlobContainerDataSetMapping has a new parameter provisioning_state + - Model ScheduledSynchronizationSetting has a new parameter user_name + - Model ShareSynchronization has a new parameter consumer_name + - Model ShareSynchronization has a new parameter consumer_email + - Model ShareSynchronization has a new parameter consumer_tenant_name + - Model ShareSynchronization has a new parameter synchronization_mode + - Model ADLSGen2FileDataSetMapping has a new parameter provisioning_state + - Model SqlDBTableDataSetMapping has a new parameter provisioning_state + - Model ConsumerInvitation has a new parameter provider_tenant_name + - Model ConsumerInvitation has a new parameter user_name + - Model ConsumerInvitation has a new parameter provider_email + - Model ConsumerInvitation has a new parameter user_email + - Model ConsumerInvitation has a new parameter provider_name + - Model BlobDataSetMapping has a new parameter provisioning_state + +**Breaking changes** + + - Parameter data_set_id of model ADLSGen2FileSystemDataSetMapping is now required + - Parameter data_set_id of model SqlDWTableDataSetMapping is now required + - Parameter data_set_id of model ADLSGen2FolderDataSetMapping is now required + - Parameter data_warehouse_name of model SqlDWTableDataSet is now required + - Parameter table_name of model SqlDWTableDataSet is now required + - Parameter sql_server_resource_id of model SqlDWTableDataSet is now required + - Parameter data_set_id of model BlobFolderDataSetMapping is now required + - Parameter data_set_id of model BlobContainerDataSetMapping is now required + - Parameter data_set_id of model ADLSGen2FileDataSetMapping is now required + - Parameter data_set_id of model SqlDBTableDataSetMapping is now required + - Parameter database_name of model SqlDBTableDataSet is now required + - Parameter table_name of model SqlDBTableDataSet is now required + - Parameter sql_server_resource_id of model SqlDBTableDataSet is now required + - Parameter data_set_id of model BlobDataSetMapping is now required + - Operation ShareSubscriptionsOperations.create has a new signature + - Model ProviderShareSubscription no longer has parameter shared_by + - Model ProviderShareSubscription no longer has parameter company + - Model ProviderShareSubscription no longer has parameter created_by + - Model SqlDWTableDataSetMapping has a new required parameter schema_name + - Model Invitation no longer has parameter sender + - Model Account no longer has parameter created_by + - Model ShareSubscription no longer has parameter share_sender + - Model ShareSubscription no longer has parameter share_sender_company_name + - Model ShareSubscription no longer has parameter created_by + - Model ShareSubscription has a new required parameter source_share_location + - Model SqlDWTableDataSet has a new required parameter schema_name + - Model ScheduledTrigger no longer has parameter created_by + - Model Share no longer has parameter created_by + - Model ScheduledSynchronizationSetting no longer has parameter created_by + - Model ShareSynchronization no longer has parameter company + - Model ShareSynchronization no longer has parameter recipient + - Model SqlDBTableDataSetMapping has a new required parameter schema_name + - Model ConsumerInvitation no longer has parameter sender + - Model ConsumerInvitation no longer has parameter sender_company_name + - Model SqlDBTableDataSet has a new required parameter schema_name + ## 0.1.0rc1 (2019-09-29) - Initial Release diff --git a/sdk/datashare/azure-mgmt-datashare/README.md b/sdk/datashare/azure-mgmt-datashare/README.md index 83a99a4cab22..509fd2571308 100644 --- a/sdk/datashare/azure-mgmt-datashare/README.md +++ b/sdk/datashare/azure-mgmt-datashare/README.md @@ -1,29 +1,21 @@ -## Microsoft Azure SDK for Python +# Microsoft Azure SDK for Python This is the Microsoft Azure MyService Management Client Library. +This package has been tested with Python 2.7, 3.5, 3.6, 3.7 and 3.8. +For a more complete view of Azure libraries, see the [Github repo](https://github.com/Azure/azure-sdk-for-python/) -Azure Resource Manager (ARM) is the next generation of management APIs -that replace the old Azure Service Management (ASM). -This package has been tested with Python 2.7, 3.5, 3.6 and 3.7. +# Usage -For the older Azure Service Management (ASM) libraries, see -[azure-servicemanagement-legacy](https://pypi.python.org/pypi/azure-servicemanagement-legacy) -library. +For code examples, see [MyService Management](https://docs.microsoft.com/python/api/overview/azure/) +on docs.microsoft.com. -For a more complete set of Azure libraries, see the -[azure](https://pypi.python.org/pypi/azure) bundle package. -## Usage +# Provide Feedback -For code examples, see [MyService -Management](https://docs.microsoft.com/python/api/overview/azure/) on -docs.microsoft.com. - -## Provide Feedback - -If you encounter any bugs or have suggestions, please file an issue in -the [Issues](https://github.com/Azure/azure-sdk-for-python/issues) +If you encounter any bugs or have suggestions, please file an issue in the +[Issues](https://github.com/Azure/azure-sdk-for-python/issues) section of the project. -![image](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-mgmt-datashare%2FREADME.png) + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-mgmt-datashare%2FREADME.png) diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/_data_share_management_client.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/_data_share_management_client.py index 19a375c22f11..70b9adaed9bb 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/_data_share_management_client.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/_data_share_management_client.py @@ -74,7 +74,7 @@ def __init__( super(DataShareManagementClient, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2018-11-01-preview' + self.api_version = '2019-11-01' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/__init__.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/__init__.py index b48f8c52adac..3886fa1dba1b 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/__init__.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/__init__.py @@ -36,6 +36,10 @@ from ._models_py3 import DimensionProperties from ._models_py3 import Identity from ._models_py3 import Invitation + from ._models_py3 import KustoClusterDataSet + from ._models_py3 import KustoClusterDataSetMapping + from ._models_py3 import KustoDatabaseDataSet + from ._models_py3 import KustoDatabaseDataSetMapping from ._models_py3 import OperationMetaLogSpecification from ._models_py3 import OperationMetaMetricSpecification from ._models_py3 import OperationMetaServiceSpecification @@ -87,6 +91,10 @@ from ._models import DimensionProperties from ._models import Identity from ._models import Invitation + from ._models import KustoClusterDataSet + from ._models import KustoClusterDataSetMapping + from ._models import KustoDatabaseDataSet + from ._models import KustoDatabaseDataSetMapping from ._models import OperationMetaLogSpecification from ._models import OperationMetaMetricSpecification from ._models import OperationMetaServiceSpecification @@ -133,9 +141,9 @@ Status, InvitationStatus, ShareKind, + SynchronizationMode, DataSetType, ShareSubscriptionStatus, - SynchronizationMode, RecurrenceInterval, TriggerStatus, DataSetMappingStatus, @@ -169,6 +177,10 @@ 'DimensionProperties', 'Identity', 'Invitation', + 'KustoClusterDataSet', + 'KustoClusterDataSetMapping', + 'KustoDatabaseDataSet', + 'KustoDatabaseDataSetMapping', 'OperationMetaLogSpecification', 'OperationMetaMetricSpecification', 'OperationMetaServiceSpecification', @@ -203,9 +215,9 @@ 'ShareSynchronizationPaged', 'SynchronizationDetailsPaged', 'ProviderShareSubscriptionPaged', + 'ShareSubscriptionPaged', 'SourceShareSynchronizationSettingPaged', 'ShareSubscriptionSynchronizationPaged', - 'ShareSubscriptionPaged', 'ConsumerSourceDataSetPaged', 'SynchronizationSettingPaged', 'TriggerPaged', @@ -214,9 +226,9 @@ 'Status', 'InvitationStatus', 'ShareKind', + 'SynchronizationMode', 'DataSetType', 'ShareSubscriptionStatus', - 'SynchronizationMode', 'RecurrenceInterval', 'TriggerStatus', 'DataSetMappingStatus', diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_data_share_management_client_enums.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_data_share_management_client_enums.py index e826669fcf9f..84d018a2e46b 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_data_share_management_client_enums.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_data_share_management_client_enums.py @@ -47,6 +47,13 @@ class InvitationStatus(str, Enum): class ShareKind(str, Enum): copy_based = "CopyBased" + in_place = "InPlace" + + +class SynchronizationMode(str, Enum): + + incremental = "Incremental" + full_sync = "FullSync" class DataSetType(str, Enum): @@ -59,6 +66,8 @@ class DataSetType(str, Enum): adls_gen2_file = "AdlsGen2File" adls_gen1_folder = "AdlsGen1Folder" adls_gen1_file = "AdlsGen1File" + kusto_cluster = "KustoCluster" + kusto_database = "KustoDatabase" sql_db_table = "SqlDBTable" sql_dw_table = "SqlDWTable" @@ -71,12 +80,6 @@ class ShareSubscriptionStatus(str, Enum): revoking = "Revoking" -class SynchronizationMode(str, Enum): - - incremental = "Incremental" - full_sync = "FullSync" - - class RecurrenceInterval(str, Enum): hour = "Hour" diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models.py index 0a6a6c191321..b67c05eb25df 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models.py @@ -76,12 +76,14 @@ class Account(DefaultDto): :type identity: ~azure.mgmt.datashare.models.Identity :ivar created_at: Time at which the account was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the account. - :vartype created_by: str :ivar provisioning_state: Provisioning state of the Account. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or ~azure.mgmt.datashare.models.ProvisioningState + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -90,8 +92,9 @@ class Account(DefaultDto): 'type': {'readonly': True}, 'identity': {'required': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -102,16 +105,18 @@ class Account(DefaultDto): 'type': {'key': 'type', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'Identity'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): super(Account, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.created_at = None - self.created_by = None self.provisioning_state = None + self.user_email = None + self.user_name = None class AccountUpdateParameters(Model): @@ -169,8 +174,8 @@ class DataSet(ProxyDto): You probably want to use the sub-classes and not this class directly. Known sub-classes are: BlobDataSet, BlobFolderDataSet, BlobContainerDataSet, ADLSGen2FileDataSet, ADLSGen2FolderDataSet, ADLSGen2FileSystemDataSet, - ADLSGen1FolderDataSet, ADLSGen1FileDataSet, SqlDWTableDataSet, - SqlDBTableDataSet + ADLSGen1FolderDataSet, ADLSGen1FileDataSet, KustoClusterDataSet, + KustoDatabaseDataSet, SqlDWTableDataSet, SqlDBTableDataSet Variables are only populated by the server, and will be ignored when sending a request. @@ -202,7 +207,7 @@ class DataSet(ProxyDto): } _subtype_map = { - 'kind': {'Blob': 'BlobDataSet', 'BlobFolder': 'BlobFolderDataSet', 'Container': 'BlobContainerDataSet', 'AdlsGen2File': 'ADLSGen2FileDataSet', 'AdlsGen2Folder': 'ADLSGen2FolderDataSet', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSet', 'AdlsGen1Folder': 'ADLSGen1FolderDataSet', 'AdlsGen1File': 'ADLSGen1FileDataSet', 'SqlDWTable': 'SqlDWTableDataSet', 'SqlDBTable': 'SqlDBTableDataSet'} + 'kind': {'Blob': 'BlobDataSet', 'BlobFolder': 'BlobFolderDataSet', 'Container': 'BlobContainerDataSet', 'AdlsGen2File': 'ADLSGen2FileDataSet', 'AdlsGen2Folder': 'ADLSGen2FolderDataSet', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSet', 'AdlsGen1Folder': 'ADLSGen1FolderDataSet', 'AdlsGen1File': 'ADLSGen1FileDataSet', 'KustoCluster': 'KustoClusterDataSet', 'KustoDatabase': 'KustoDatabaseDataSet', 'SqlDWTable': 'SqlDWTableDataSet', 'SqlDBTable': 'SqlDBTableDataSet'} } def __init__(self, **kwargs): @@ -212,7 +217,7 @@ def __init__(self, **kwargs): class ADLSGen1FileDataSet(DataSet): - """An ADLS Gen 1 file dataset. + """An ADLS Gen 1 file data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -279,7 +284,7 @@ def __init__(self, **kwargs): class ADLSGen1FolderDataSet(DataSet): - """An ADLS Gen 1 folder dataset. + """An ADLS Gen 1 folder data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -341,7 +346,7 @@ def __init__(self, **kwargs): class ADLSGen2FileDataSet(DataSet): - """An ADLS Gen 2 file dataset. + """An ADLS Gen 2 file data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -409,12 +414,13 @@ def __init__(self, **kwargs): class DataSetMapping(ProxyDto): - """A dataset mapping data transfer object. + """A data set mapping data transfer object. You probably want to use the sub-classes and not this class directly. Known sub-classes are: BlobDataSetMapping, BlobFolderDataSetMapping, BlobContainerDataSetMapping, ADLSGen2FileDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2FileSystemDataSetMapping, + KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDWTableDataSetMapping, SqlDBTableDataSetMapping Variables are only populated by the server, and will be ignored when @@ -447,7 +453,7 @@ class DataSetMapping(ProxyDto): } _subtype_map = { - 'kind': {'Blob': 'BlobDataSetMapping', 'BlobFolder': 'BlobFolderDataSetMapping', 'Container': 'BlobContainerDataSetMapping', 'AdlsGen2File': 'ADLSGen2FileDataSetMapping', 'AdlsGen2Folder': 'ADLSGen2FolderDataSetMapping', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSetMapping', 'SqlDWTable': 'SqlDWTableDataSetMapping', 'SqlDBTable': 'SqlDBTableDataSetMapping'} + 'kind': {'Blob': 'BlobDataSetMapping', 'BlobFolder': 'BlobFolderDataSetMapping', 'Container': 'BlobContainerDataSetMapping', 'AdlsGen2File': 'ADLSGen2FileDataSetMapping', 'AdlsGen2Folder': 'ADLSGen2FolderDataSetMapping', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSetMapping', 'KustoCluster': 'KustoClusterDataSetMapping', 'KustoDatabase': 'KustoDatabaseDataSetMapping', 'SqlDWTable': 'SqlDWTableDataSetMapping', 'SqlDBTable': 'SqlDBTableDataSetMapping'} } def __init__(self, **kwargs): @@ -457,7 +463,7 @@ def __init__(self, **kwargs): class ADLSGen2FileDataSetMapping(DataSetMapping): - """An ADLS Gen2 file dataset mapping. + """An ADLS Gen2 file data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -472,11 +478,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_path: Required. File path within the file system. :type file_path: str @@ -485,6 +491,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): :param output_type: Type of output file. Possible values include: 'Csv', 'Parquet' :type output_type: str or ~azure.mgmt.datashare.models.OutputType + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -499,8 +510,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_path': {'required': True}, 'file_system': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -516,6 +530,7 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): 'file_path': {'key': 'properties.filePath', 'type': 'str'}, 'file_system': {'key': 'properties.fileSystem', 'type': 'str'}, 'output_type': {'key': 'properties.outputType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, @@ -524,10 +539,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): def __init__(self, **kwargs): super(ADLSGen2FileDataSetMapping, self).__init__(**kwargs) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None self.file_path = kwargs.get('file_path', None) self.file_system = kwargs.get('file_system', None) self.output_type = kwargs.get('output_type', None) + self.provisioning_state = None self.resource_group = kwargs.get('resource_group', None) self.storage_account_name = kwargs.get('storage_account_name', None) self.subscription_id = kwargs.get('subscription_id', None) @@ -535,7 +551,7 @@ def __init__(self, **kwargs): class ADLSGen2FileSystemDataSet(DataSet): - """An ADLS Gen 2 file system dataset. + """An ADLS Gen 2 file system data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -598,7 +614,7 @@ def __init__(self, **kwargs): class ADLSGen2FileSystemDataSetMapping(DataSetMapping): - """An ADLS Gen2 file system dataset mapping. + """An ADLS Gen2 file system data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -613,14 +629,19 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_system: Required. The file system name. :type file_system: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -635,7 +656,10 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_system': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -649,6 +673,7 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'file_system': {'key': 'properties.fileSystem', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, @@ -657,8 +682,9 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): def __init__(self, **kwargs): super(ADLSGen2FileSystemDataSetMapping, self).__init__(**kwargs) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None self.file_system = kwargs.get('file_system', None) + self.provisioning_state = None self.resource_group = kwargs.get('resource_group', None) self.storage_account_name = kwargs.get('storage_account_name', None) self.subscription_id = kwargs.get('subscription_id', None) @@ -666,7 +692,7 @@ def __init__(self, **kwargs): class ADLSGen2FolderDataSet(DataSet): - """An ADLS Gen 2 folder dataset. + """An ADLS Gen 2 folder data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -734,7 +760,7 @@ def __init__(self, **kwargs): class ADLSGen2FolderDataSetMapping(DataSetMapping): - """An ADLS Gen2 folder dataset mapping. + """An ADLS Gen2 folder data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -749,16 +775,21 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_system: Required. File system to which the folder belongs. :type file_system: str :param folder_path: Required. Folder path within the file system. :type folder_path: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -773,8 +804,11 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_system': {'required': True}, 'folder_path': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -789,6 +823,7 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'file_system': {'key': 'properties.fileSystem', 'type': 'str'}, 'folder_path': {'key': 'properties.folderPath', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, @@ -797,9 +832,10 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): def __init__(self, **kwargs): super(ADLSGen2FolderDataSetMapping, self).__init__(**kwargs) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None self.file_system = kwargs.get('file_system', None) self.folder_path = kwargs.get('folder_path', None) + self.provisioning_state = None self.resource_group = kwargs.get('resource_group', None) self.storage_account_name = kwargs.get('storage_account_name', None) self.subscription_id = kwargs.get('subscription_id', None) @@ -807,7 +843,7 @@ def __init__(self, **kwargs): class BlobContainerDataSet(DataSet): - """An Azure storage blob container dataset. + """An Azure storage blob container data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -870,7 +906,7 @@ def __init__(self, **kwargs): class BlobContainerDataSetMapping(DataSetMapping): - """A Blob container dataset mapping. + """A Blob container data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -887,12 +923,17 @@ class BlobContainerDataSetMapping(DataSetMapping): :type kind: str :param container_name: Required. BLOB Container name. :type container_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -908,6 +949,9 @@ class BlobContainerDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'container_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -921,6 +965,7 @@ class BlobContainerDataSetMapping(DataSetMapping): 'container_name': {'key': 'properties.containerName', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, @@ -930,7 +975,8 @@ def __init__(self, **kwargs): super(BlobContainerDataSetMapping, self).__init__(**kwargs) self.container_name = kwargs.get('container_name', None) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None + self.provisioning_state = None self.resource_group = kwargs.get('resource_group', None) self.storage_account_name = kwargs.get('storage_account_name', None) self.subscription_id = kwargs.get('subscription_id', None) @@ -938,7 +984,7 @@ def __init__(self, **kwargs): class BlobDataSet(DataSet): - """An Azure storage blob dataset. + """An Azure storage blob data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -1006,7 +1052,7 @@ def __init__(self, **kwargs): class BlobDataSetMapping(DataSetMapping): - """A Blob dataset mapping. + """A Blob data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -1023,17 +1069,22 @@ class BlobDataSetMapping(DataSetMapping): :type kind: str :param container_name: Required. Container that has the file path. :type container_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_path: Required. File path within the source data set :type file_path: str :param output_type: File output type. Possible values include: 'Csv', 'Parquet' :type output_type: str or ~azure.mgmt.datashare.models.OutputType + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -1049,7 +1100,10 @@ class BlobDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'container_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_path': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -1065,6 +1119,7 @@ class BlobDataSetMapping(DataSetMapping): 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'file_path': {'key': 'properties.filePath', 'type': 'str'}, 'output_type': {'key': 'properties.outputType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, @@ -1074,9 +1129,10 @@ def __init__(self, **kwargs): super(BlobDataSetMapping, self).__init__(**kwargs) self.container_name = kwargs.get('container_name', None) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None self.file_path = kwargs.get('file_path', None) self.output_type = kwargs.get('output_type', None) + self.provisioning_state = None self.resource_group = kwargs.get('resource_group', None) self.storage_account_name = kwargs.get('storage_account_name', None) self.subscription_id = kwargs.get('subscription_id', None) @@ -1084,7 +1140,7 @@ def __init__(self, **kwargs): class BlobFolderDataSet(DataSet): - """An Azure storage blob folder dataset. + """An Azure storage blob folder data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -1152,7 +1208,7 @@ def __init__(self, **kwargs): class BlobFolderDataSetMapping(DataSetMapping): - """A Blob folder dataset mapping. + """A Blob folder data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -1169,14 +1225,19 @@ class BlobFolderDataSetMapping(DataSetMapping): :type kind: str :param container_name: Required. Container that has the file path. :type container_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param prefix: Required. Prefix for blob folder :type prefix: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -1192,7 +1253,10 @@ class BlobFolderDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'container_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'prefix': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -1207,6 +1271,7 @@ class BlobFolderDataSetMapping(DataSetMapping): 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'prefix': {'key': 'properties.prefix', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, @@ -1216,8 +1281,9 @@ def __init__(self, **kwargs): super(BlobFolderDataSetMapping, self).__init__(**kwargs) self.container_name = kwargs.get('container_name', None) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None self.prefix = kwargs.get('prefix', None) + self.provisioning_state = None self.resource_group = kwargs.get('resource_group', None) self.storage_account_name = kwargs.get('storage_account_name', None) self.subscription_id = kwargs.get('subscription_id', None) @@ -1258,18 +1324,25 @@ class ConsumerInvitation(ProxyDto): ~azure.mgmt.datashare.models.InvitationStatus :ivar location: invitation location :vartype location: str + :ivar provider_email: Email of the provider who created the resource + :vartype provider_email: str + :ivar provider_name: Name of the provider who created the resource + :vartype provider_name: str + :ivar provider_tenant_name: Tenant name of the provider who created the + resource + :vartype provider_tenant_name: str :ivar responded_at: The time the recipient responded to the invitation. :vartype responded_at: datetime - :ivar sender: Gets the name of the sender. - :vartype sender: str - :ivar sender_company_name: Gets the company name of the sender. - :vartype sender_company_name: str :ivar sent_at: Gets the time at which the invitation was sent. :vartype sent_at: datetime :ivar share_name: Gets the source share Name. :vartype share_name: str :ivar terms_of_use: Terms of use shared when the invitation was created :vartype terms_of_use: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -1281,12 +1354,15 @@ class ConsumerInvitation(ProxyDto): 'invitation_id': {'required': True}, 'invitation_status': {'readonly': True}, 'location': {'readonly': True}, + 'provider_email': {'readonly': True}, + 'provider_name': {'readonly': True}, + 'provider_tenant_name': {'readonly': True}, 'responded_at': {'readonly': True}, - 'sender': {'readonly': True}, - 'sender_company_name': {'readonly': True}, 'sent_at': {'readonly': True}, 'share_name': {'readonly': True}, 'terms_of_use': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -1298,12 +1374,15 @@ class ConsumerInvitation(ProxyDto): 'invitation_id': {'key': 'properties.invitationId', 'type': 'str'}, 'invitation_status': {'key': 'properties.invitationStatus', 'type': 'str'}, 'location': {'key': 'properties.location', 'type': 'str'}, + 'provider_email': {'key': 'properties.providerEmail', 'type': 'str'}, + 'provider_name': {'key': 'properties.providerName', 'type': 'str'}, + 'provider_tenant_name': {'key': 'properties.providerTenantName', 'type': 'str'}, 'responded_at': {'key': 'properties.respondedAt', 'type': 'iso-8601'}, - 'sender': {'key': 'properties.sender', 'type': 'str'}, - 'sender_company_name': {'key': 'properties.senderCompanyName', 'type': 'str'}, 'sent_at': {'key': 'properties.sentAt', 'type': 'iso-8601'}, 'share_name': {'key': 'properties.shareName', 'type': 'str'}, 'terms_of_use': {'key': 'properties.termsOfUse', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): @@ -1313,12 +1392,15 @@ def __init__(self, **kwargs): self.invitation_id = kwargs.get('invitation_id', None) self.invitation_status = None self.location = None + self.provider_email = None + self.provider_name = None + self.provider_tenant_name = None self.responded_at = None - self.sender = None - self.sender_company_name = None self.sent_at = None self.share_name = None self.terms_of_use = None + self.user_email = None + self.user_name = None class ConsumerSourceDataSet(ProxyDto): @@ -1335,12 +1417,16 @@ class ConsumerSourceDataSet(ProxyDto): :vartype type: str :ivar data_set_id: DataSet Id :vartype data_set_id: str + :ivar data_set_location: Location of the data set. + :vartype data_set_location: str :ivar data_set_name: DataSet name :vartype data_set_name: str - :ivar data_set_type: Type of dataSet. Possible values include: 'Blob', + :ivar data_set_path: DataSet path + :vartype data_set_path: str + :ivar data_set_type: Type of data set. Possible values include: 'Blob', 'Container', 'BlobFolder', 'AdlsGen2FileSystem', 'AdlsGen2Folder', - 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'SqlDBTable', - 'SqlDWTable' + 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'KustoCluster', + 'KustoDatabase', 'SqlDBTable', 'SqlDWTable' :vartype data_set_type: str or ~azure.mgmt.datashare.models.DataSetType """ @@ -1349,7 +1435,9 @@ class ConsumerSourceDataSet(ProxyDto): 'name': {'readonly': True}, 'type': {'readonly': True}, 'data_set_id': {'readonly': True}, + 'data_set_location': {'readonly': True}, 'data_set_name': {'readonly': True}, + 'data_set_path': {'readonly': True}, 'data_set_type': {'readonly': True}, } @@ -1358,14 +1446,18 @@ class ConsumerSourceDataSet(ProxyDto): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'data_set_location': {'key': 'properties.dataSetLocation', 'type': 'str'}, 'data_set_name': {'key': 'properties.dataSetName', 'type': 'str'}, + 'data_set_path': {'key': 'properties.dataSetPath', 'type': 'str'}, 'data_set_type': {'key': 'properties.dataSetType', 'type': 'str'}, } def __init__(self, **kwargs): super(ConsumerSourceDataSet, self).__init__(**kwargs) self.data_set_id = None + self.data_set_location = None self.data_set_name = None + self.data_set_path = None self.data_set_type = None @@ -1510,8 +1602,6 @@ class Invitation(ProxyDto): ~azure.mgmt.datashare.models.InvitationStatus :ivar responded_at: The time the recipient responded to the invitation. :vartype responded_at: datetime - :ivar sender: Gets the name of the sender. - :vartype sender: str :ivar sent_at: Gets the time at which the invitation was sent. :vartype sent_at: datetime :param target_active_directory_id: The target Azure AD Id. Can't be @@ -1524,6 +1614,10 @@ class Invitation(ProxyDto): Must be specified along TargetActiveDirectoryId. This enables sending invitations to specific users or applications in an AD tenant. :type target_object_id: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -1533,8 +1627,9 @@ class Invitation(ProxyDto): 'invitation_id': {'readonly': True}, 'invitation_status': {'readonly': True}, 'responded_at': {'readonly': True}, - 'sender': {'readonly': True}, 'sent_at': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -1544,11 +1639,12 @@ class Invitation(ProxyDto): 'invitation_id': {'key': 'properties.invitationId', 'type': 'str'}, 'invitation_status': {'key': 'properties.invitationStatus', 'type': 'str'}, 'responded_at': {'key': 'properties.respondedAt', 'type': 'iso-8601'}, - 'sender': {'key': 'properties.sender', 'type': 'str'}, 'sent_at': {'key': 'properties.sentAt', 'type': 'iso-8601'}, 'target_active_directory_id': {'key': 'properties.targetActiveDirectoryId', 'type': 'str'}, 'target_email': {'key': 'properties.targetEmail', 'type': 'str'}, 'target_object_id': {'key': 'properties.targetObjectId', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): @@ -1556,11 +1652,270 @@ def __init__(self, **kwargs): self.invitation_id = None self.invitation_status = None self.responded_at = None - self.sender = None self.sent_at = None self.target_active_directory_id = kwargs.get('target_active_directory_id', None) self.target_email = kwargs.get('target_email', None) self.target_object_id = kwargs.get('target_object_id', None) + self.user_email = None + self.user_name = None + + +class KustoClusterDataSet(DataSet): + """A kusto cluster data set. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :ivar data_set_id: Unique id for identifying a data set resource + :vartype data_set_id: str + :param kusto_cluster_resource_id: Required. Resource id of the kusto + cluster. + :type kusto_cluster_resource_id: str + :ivar location: Location of the kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the kusto cluster data + set. Possible values include: 'Succeeded', 'Creating', 'Deleting', + 'Moving', 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'readonly': True}, + 'kusto_cluster_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'kusto_cluster_resource_id': {'key': 'properties.kustoClusterResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(KustoClusterDataSet, self).__init__(**kwargs) + self.data_set_id = None + self.kusto_cluster_resource_id = kwargs.get('kusto_cluster_resource_id', None) + self.location = None + self.provisioning_state = None + self.kind = 'KustoCluster' + + +class KustoClusterDataSetMapping(DataSetMapping): + """A Kusto cluster data set mapping. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :param data_set_id: Required. The id of the source data set. + :type data_set_id: str + :ivar data_set_mapping_status: Gets the status of the data set mapping. + Possible values include: 'Ok', 'Broken' + :vartype data_set_mapping_status: str or + ~azure.mgmt.datashare.models.DataSetMappingStatus + :param kusto_cluster_resource_id: Required. Resource id of the sink kusto + cluster. + :type kusto_cluster_resource_id: str + :ivar location: Location of the sink kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'kusto_cluster_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'kusto_cluster_resource_id': {'key': 'properties.kustoClusterResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(KustoClusterDataSetMapping, self).__init__(**kwargs) + self.data_set_id = kwargs.get('data_set_id', None) + self.data_set_mapping_status = None + self.kusto_cluster_resource_id = kwargs.get('kusto_cluster_resource_id', None) + self.location = None + self.provisioning_state = None + self.kind = 'KustoCluster' + + +class KustoDatabaseDataSet(DataSet): + """A kusto database data set. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :ivar data_set_id: Unique id for identifying a data set resource + :vartype data_set_id: str + :param kusto_database_resource_id: Required. Resource id of the kusto + database. + :type kusto_database_resource_id: str + :ivar location: Location of the kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the kusto database data + set. Possible values include: 'Succeeded', 'Creating', 'Deleting', + 'Moving', 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'readonly': True}, + 'kusto_database_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'kusto_database_resource_id': {'key': 'properties.kustoDatabaseResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(KustoDatabaseDataSet, self).__init__(**kwargs) + self.data_set_id = None + self.kusto_database_resource_id = kwargs.get('kusto_database_resource_id', None) + self.location = None + self.provisioning_state = None + self.kind = 'KustoDatabase' + + +class KustoDatabaseDataSetMapping(DataSetMapping): + """A Kusto database data set mapping. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :param data_set_id: Required. The id of the source data set. + :type data_set_id: str + :ivar data_set_mapping_status: Gets the status of the data set mapping. + Possible values include: 'Ok', 'Broken' + :vartype data_set_mapping_status: str or + ~azure.mgmt.datashare.models.DataSetMappingStatus + :param kusto_cluster_resource_id: Required. Resource id of the sink kusto + cluster. + :type kusto_cluster_resource_id: str + :ivar location: Location of the sink kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'kusto_cluster_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'kusto_cluster_resource_id': {'key': 'properties.kustoClusterResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(KustoDatabaseDataSetMapping, self).__init__(**kwargs) + self.data_set_id = kwargs.get('data_set_id', None) + self.data_set_mapping_status = None + self.kusto_cluster_resource_id = kwargs.get('kusto_cluster_resource_id', None) + self.location = None + self.provisioning_state = None + self.kind = 'KustoDatabase' class OperationMetaLogSpecification(Model): @@ -1771,16 +2126,23 @@ class ProviderShareSubscription(ProxyDto): :vartype name: str :ivar type: Type of the azure resource :vartype type: str - :ivar company: Company name - :vartype company: str + :ivar consumer_email: Email of the consumer who created the share + subscription + :vartype consumer_email: str + :ivar consumer_name: Name of the consumer who created the share + subscription + :vartype consumer_name: str + :ivar consumer_tenant_name: Tenant name of the consumer who created the + share subscription + :vartype consumer_tenant_name: str :ivar created_at: created at :vartype created_at: datetime - :ivar created_by: Created by - :vartype created_by: str + :ivar provider_email: Email of the provider who created the share + :vartype provider_email: str + :ivar provider_name: Name of the provider who created the share + :vartype provider_name: str :ivar shared_at: Shared at :vartype shared_at: datetime - :ivar shared_by: Shared by - :vartype shared_by: str :ivar share_subscription_object_id: share Subscription Object Id :vartype share_subscription_object_id: str :ivar share_subscription_status: Gets the status of share subscription. @@ -1793,11 +2155,13 @@ class ProviderShareSubscription(ProxyDto): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, - 'company': {'readonly': True}, + 'consumer_email': {'readonly': True}, + 'consumer_name': {'readonly': True}, + 'consumer_tenant_name': {'readonly': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, + 'provider_email': {'readonly': True}, + 'provider_name': {'readonly': True}, 'shared_at': {'readonly': True}, - 'shared_by': {'readonly': True}, 'share_subscription_object_id': {'readonly': True}, 'share_subscription_status': {'readonly': True}, } @@ -1806,22 +2170,26 @@ class ProviderShareSubscription(ProxyDto): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, - 'company': {'key': 'properties.company', 'type': 'str'}, + 'consumer_email': {'key': 'properties.consumerEmail', 'type': 'str'}, + 'consumer_name': {'key': 'properties.consumerName', 'type': 'str'}, + 'consumer_tenant_name': {'key': 'properties.consumerTenantName', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, + 'provider_email': {'key': 'properties.providerEmail', 'type': 'str'}, + 'provider_name': {'key': 'properties.providerName', 'type': 'str'}, 'shared_at': {'key': 'properties.sharedAt', 'type': 'iso-8601'}, - 'shared_by': {'key': 'properties.sharedBy', 'type': 'str'}, 'share_subscription_object_id': {'key': 'properties.shareSubscriptionObjectId', 'type': 'str'}, 'share_subscription_status': {'key': 'properties.shareSubscriptionStatus', 'type': 'str'}, } def __init__(self, **kwargs): super(ProviderShareSubscription, self).__init__(**kwargs) - self.company = None + self.consumer_email = None + self.consumer_name = None + self.consumer_tenant_name = None self.created_at = None - self.created_by = None + self.provider_email = None + self.provider_name = None self.shared_at = None - self.shared_by = None self.share_subscription_object_id = None self.share_subscription_status = None @@ -1950,9 +2318,6 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): :type kind: str :ivar created_at: Time at which the synchronization setting was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the synchronization - setting. - :vartype created_by: str :ivar provisioning_state: Gets or sets the provisioning state. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or @@ -1963,6 +2328,8 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): ~azure.mgmt.datashare.models.RecurrenceInterval :param synchronization_time: Required. Synchronization time :type synchronization_time: datetime + :ivar user_name: Name of the user who created the synchronization setting. + :vartype user_name: str """ _validation = { @@ -1971,10 +2338,10 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): 'type': {'readonly': True}, 'kind': {'required': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'recurrence_interval': {'required': True}, 'synchronization_time': {'required': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -1983,19 +2350,19 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): 'type': {'key': 'type', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'recurrence_interval': {'key': 'properties.recurrenceInterval', 'type': 'str'}, 'synchronization_time': {'key': 'properties.synchronizationTime', 'type': 'iso-8601'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): super(ScheduledSynchronizationSetting, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.provisioning_state = None self.recurrence_interval = kwargs.get('recurrence_interval', None) self.synchronization_time = kwargs.get('synchronization_time', None) + self.user_name = None self.kind = 'ScheduleBased' @@ -2062,8 +2429,6 @@ class ScheduledTrigger(Trigger): :type kind: str :ivar created_at: Time at which the trigger was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the trigger. - :vartype created_by: str :ivar provisioning_state: Gets the provisioning state. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or @@ -2081,6 +2446,8 @@ class ScheduledTrigger(Trigger): :ivar trigger_status: Gets the trigger state. Possible values include: 'Active', 'Inactive', 'SourceSynchronizationSettingDeleted' :vartype trigger_status: str or ~azure.mgmt.datashare.models.TriggerStatus + :ivar user_name: Name of the user who created the trigger. + :vartype user_name: str """ _validation = { @@ -2089,11 +2456,11 @@ class ScheduledTrigger(Trigger): 'type': {'readonly': True}, 'kind': {'required': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'recurrence_interval': {'required': True}, 'synchronization_time': {'required': True}, 'trigger_status': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -2102,23 +2469,23 @@ class ScheduledTrigger(Trigger): 'type': {'key': 'type', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'recurrence_interval': {'key': 'properties.recurrenceInterval', 'type': 'str'}, 'synchronization_mode': {'key': 'properties.synchronizationMode', 'type': 'str'}, 'synchronization_time': {'key': 'properties.synchronizationTime', 'type': 'iso-8601'}, 'trigger_status': {'key': 'properties.triggerStatus', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): super(ScheduledTrigger, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.provisioning_state = None self.recurrence_interval = kwargs.get('recurrence_interval', None) self.synchronization_mode = kwargs.get('synchronization_mode', None) self.synchronization_time = kwargs.get('synchronization_time', None) self.trigger_status = None + self.user_name = None self.kind = 'ScheduleBased' @@ -2136,18 +2503,21 @@ class Share(ProxyDto): :vartype type: str :ivar created_at: Time at which the share was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the share. - :vartype created_by: str :param description: Share description. :type description: str :ivar provisioning_state: Gets or sets the provisioning state. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or ~azure.mgmt.datashare.models.ProvisioningState - :param share_kind: Share kind. Possible values include: 'CopyBased' + :param share_kind: Share kind. Possible values include: 'CopyBased', + 'InPlace' :type share_kind: str or ~azure.mgmt.datashare.models.ShareKind :param terms: Share terms. :type terms: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -2155,8 +2525,9 @@ class Share(ProxyDto): 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -2164,21 +2535,23 @@ class Share(ProxyDto): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'share_kind': {'key': 'properties.shareKind', 'type': 'str'}, 'terms': {'key': 'properties.terms', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): super(Share, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.description = kwargs.get('description', None) self.provisioning_state = None self.share_kind = kwargs.get('share_kind', None) self.terms = kwargs.get('terms', None) + self.user_email = None + self.user_name = None class ShareSubscription(ProxyDto): @@ -2197,10 +2570,15 @@ class ShareSubscription(ProxyDto): :vartype type: str :ivar created_at: Time at which the share subscription was created. :vartype created_at: datetime - :ivar created_by: The user who created the share subscription. - :vartype created_by: str :param invitation_id: Required. The invitation id. :type invitation_id: str + :ivar provider_email: Email of the provider who created the resource + :vartype provider_email: str + :ivar provider_name: Name of the provider who created the resource + :vartype provider_name: str + :ivar provider_tenant_name: Tenant name of the provider who created the + resource + :vartype provider_tenant_name: str :ivar provisioning_state: Provisioning state of the share subscription. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' @@ -2208,14 +2586,11 @@ class ShareSubscription(ProxyDto): ~azure.mgmt.datashare.models.ProvisioningState :ivar share_description: Description of share :vartype share_description: str - :ivar share_kind: Kind of share. Possible values include: 'CopyBased' + :ivar share_kind: Kind of share. Possible values include: 'CopyBased', + 'InPlace' :vartype share_kind: str or ~azure.mgmt.datashare.models.ShareKind :ivar share_name: Name of the share :vartype share_name: str - :ivar share_sender: Sender of the share - :vartype share_sender: str - :ivar share_sender_company_name: Company name of the share sender - :vartype share_sender_company_name: str :ivar share_subscription_status: Gets the current status of share subscription. Possible values include: 'Active', 'Revoked', 'SourceDeleted', 'Revoking' @@ -2223,6 +2598,12 @@ class ShareSubscription(ProxyDto): ~azure.mgmt.datashare.models.ShareSubscriptionStatus :ivar share_terms: Terms of a share :vartype share_terms: str + :param source_share_location: Required. Source share location. + :type source_share_location: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -2230,16 +2611,19 @@ class ShareSubscription(ProxyDto): 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'invitation_id': {'required': True}, + 'provider_email': {'readonly': True}, + 'provider_name': {'readonly': True}, + 'provider_tenant_name': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'share_description': {'readonly': True}, 'share_kind': {'readonly': True}, 'share_name': {'readonly': True}, - 'share_sender': {'readonly': True}, - 'share_sender_company_name': {'readonly': True}, 'share_subscription_status': {'readonly': True}, 'share_terms': {'readonly': True}, + 'source_share_location': {'required': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -2247,31 +2631,37 @@ class ShareSubscription(ProxyDto): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'invitation_id': {'key': 'properties.invitationId', 'type': 'str'}, + 'provider_email': {'key': 'properties.providerEmail', 'type': 'str'}, + 'provider_name': {'key': 'properties.providerName', 'type': 'str'}, + 'provider_tenant_name': {'key': 'properties.providerTenantName', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'share_description': {'key': 'properties.shareDescription', 'type': 'str'}, 'share_kind': {'key': 'properties.shareKind', 'type': 'str'}, 'share_name': {'key': 'properties.shareName', 'type': 'str'}, - 'share_sender': {'key': 'properties.shareSender', 'type': 'str'}, - 'share_sender_company_name': {'key': 'properties.shareSenderCompanyName', 'type': 'str'}, 'share_subscription_status': {'key': 'properties.shareSubscriptionStatus', 'type': 'str'}, 'share_terms': {'key': 'properties.shareTerms', 'type': 'str'}, + 'source_share_location': {'key': 'properties.sourceShareLocation', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, **kwargs): super(ShareSubscription, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.invitation_id = kwargs.get('invitation_id', None) + self.provider_email = None + self.provider_name = None + self.provider_tenant_name = None self.provisioning_state = None self.share_description = None self.share_kind = None self.share_name = None - self.share_sender = None - self.share_sender_company_name = None self.share_subscription_status = None self.share_terms = None + self.source_share_location = kwargs.get('source_share_location', None) + self.user_email = None + self.user_name = None class ShareSubscriptionSynchronization(Model): @@ -2294,6 +2684,10 @@ class ShareSubscriptionSynchronization(Model): :vartype status: str :param synchronization_id: Required. Synchronization id :type synchronization_id: str + :ivar synchronization_mode: Synchronization Mode. Possible values include: + 'Incremental', 'FullSync' + :vartype synchronization_mode: str or + ~azure.mgmt.datashare.models.SynchronizationMode """ _validation = { @@ -2303,6 +2697,7 @@ class ShareSubscriptionSynchronization(Model): 'start_time': {'readonly': True}, 'status': {'readonly': True}, 'synchronization_id': {'required': True}, + 'synchronization_mode': {'readonly': True}, } _attribute_map = { @@ -2312,6 +2707,7 @@ class ShareSubscriptionSynchronization(Model): 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, 'synchronization_id': {'key': 'synchronizationId', 'type': 'str'}, + 'synchronization_mode': {'key': 'synchronizationMode', 'type': 'str'}, } def __init__(self, **kwargs): @@ -2322,54 +2718,73 @@ def __init__(self, **kwargs): self.start_time = None self.status = None self.synchronization_id = kwargs.get('synchronization_id', None) + self.synchronization_mode = None class ShareSynchronization(Model): """A ShareSynchronization data transfer object. - :param company: Company name - :type company: str + Variables are only populated by the server, and will be ignored when + sending a request. + + :param consumer_email: Email of the user who created the synchronization + :type consumer_email: str + :param consumer_name: Name of the user who created the synchronization + :type consumer_name: str + :param consumer_tenant_name: Tenant name of the consumer who created the + synchronization + :type consumer_tenant_name: str :param duration_ms: synchronization duration :type duration_ms: int :param end_time: End time of synchronization :type end_time: datetime :param message: message of synchronization :type message: str - :param recipient: Recipient id - :type recipient: str :param start_time: start time of synchronization :type start_time: datetime :param status: Raw Status :type status: str :param synchronization_id: Synchronization id :type synchronization_id: str + :ivar synchronization_mode: Synchronization mode. Possible values include: + 'Incremental', 'FullSync' + :vartype synchronization_mode: str or + ~azure.mgmt.datashare.models.SynchronizationMode """ + _validation = { + 'synchronization_mode': {'readonly': True}, + } + _attribute_map = { - 'company': {'key': 'company', 'type': 'str'}, + 'consumer_email': {'key': 'consumerEmail', 'type': 'str'}, + 'consumer_name': {'key': 'consumerName', 'type': 'str'}, + 'consumer_tenant_name': {'key': 'consumerTenantName', 'type': 'str'}, 'duration_ms': {'key': 'durationMs', 'type': 'int'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'message': {'key': 'message', 'type': 'str'}, - 'recipient': {'key': 'recipient', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, 'synchronization_id': {'key': 'synchronizationId', 'type': 'str'}, + 'synchronization_mode': {'key': 'synchronizationMode', 'type': 'str'}, } def __init__(self, **kwargs): super(ShareSynchronization, self).__init__(**kwargs) - self.company = kwargs.get('company', None) + self.consumer_email = kwargs.get('consumer_email', None) + self.consumer_name = kwargs.get('consumer_name', None) + self.consumer_tenant_name = kwargs.get('consumer_tenant_name', None) self.duration_ms = kwargs.get('duration_ms', None) self.end_time = kwargs.get('end_time', None) self.message = kwargs.get('message', None) - self.recipient = kwargs.get('recipient', None) self.start_time = kwargs.get('start_time', None) self.status = kwargs.get('status', None) self.synchronization_id = kwargs.get('synchronization_id', None) + self.synchronization_mode = None class SqlDBTableDataSet(DataSet): - """A SQL DB table dataset. + """A SQL DB table data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -2384,13 +2799,15 @@ class SqlDBTableDataSet(DataSet): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param database_name: Database name of the source data set + :param database_name: Required. Database name of the source data set :type database_name: str :ivar data_set_id: Unique id for identifying a data set resource :vartype data_set_id: str - :param sql_server_resource_id: Resource id of SQL server + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str + :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str - :param table_name: SQL DB table name. + :param table_name: Required. SQL DB table name. :type table_name: str """ @@ -2399,7 +2816,11 @@ class SqlDBTableDataSet(DataSet): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'database_name': {'required': True}, 'data_set_id': {'readonly': True}, + 'schema_name': {'required': True}, + 'sql_server_resource_id': {'required': True}, + 'table_name': {'required': True}, } _attribute_map = { @@ -2409,6 +2830,7 @@ class SqlDBTableDataSet(DataSet): 'kind': {'key': 'kind', 'type': 'str'}, 'database_name': {'key': 'properties.databaseName', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } @@ -2417,13 +2839,14 @@ def __init__(self, **kwargs): super(SqlDBTableDataSet, self).__init__(**kwargs) self.database_name = kwargs.get('database_name', None) self.data_set_id = None + self.schema_name = kwargs.get('schema_name', None) self.sql_server_resource_id = kwargs.get('sql_server_resource_id', None) self.table_name = kwargs.get('table_name', None) self.kind = 'SqlDBTable' class SqlDBTableDataSetMapping(DataSetMapping): - """A SQL DB Table dataset mapping. + """A SQL DB Table data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -2440,12 +2863,19 @@ class SqlDBTableDataSetMapping(DataSetMapping): :type kind: str :param database_name: Required. DatabaseName name of the sink data set :type database_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str :param table_name: Required. SQL DB table name. @@ -2458,6 +2888,10 @@ class SqlDBTableDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'database_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'schema_name': {'required': True}, 'sql_server_resource_id': {'required': True}, 'table_name': {'required': True}, } @@ -2470,6 +2904,8 @@ class SqlDBTableDataSetMapping(DataSetMapping): 'database_name': {'key': 'properties.databaseName', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } @@ -2478,14 +2914,16 @@ def __init__(self, **kwargs): super(SqlDBTableDataSetMapping, self).__init__(**kwargs) self.database_name = kwargs.get('database_name', None) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None + self.provisioning_state = None + self.schema_name = kwargs.get('schema_name', None) self.sql_server_resource_id = kwargs.get('sql_server_resource_id', None) self.table_name = kwargs.get('table_name', None) self.kind = 'SqlDBTable' class SqlDWTableDataSet(DataSet): - """A SQL DW table dataset. + """A SQL DW table data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -2502,11 +2940,14 @@ class SqlDWTableDataSet(DataSet): :type kind: str :ivar data_set_id: Unique id for identifying a data set resource :vartype data_set_id: str - :param data_warehouse_name: DataWarehouse name of the source data set + :param data_warehouse_name: Required. DataWarehouse name of the source + data set :type data_warehouse_name: str - :param sql_server_resource_id: Resource id of SQL server + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str + :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str - :param table_name: SQL DW table name. + :param table_name: Required. SQL DW table name. :type table_name: str """ @@ -2516,6 +2957,10 @@ class SqlDWTableDataSet(DataSet): 'type': {'readonly': True}, 'kind': {'required': True}, 'data_set_id': {'readonly': True}, + 'data_warehouse_name': {'required': True}, + 'schema_name': {'required': True}, + 'sql_server_resource_id': {'required': True}, + 'table_name': {'required': True}, } _attribute_map = { @@ -2525,6 +2970,7 @@ class SqlDWTableDataSet(DataSet): 'kind': {'key': 'kind', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_warehouse_name': {'key': 'properties.dataWarehouseName', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } @@ -2533,13 +2979,14 @@ def __init__(self, **kwargs): super(SqlDWTableDataSet, self).__init__(**kwargs) self.data_set_id = None self.data_warehouse_name = kwargs.get('data_warehouse_name', None) + self.schema_name = kwargs.get('schema_name', None) self.sql_server_resource_id = kwargs.get('sql_server_resource_id', None) self.table_name = kwargs.get('table_name', None) self.kind = 'SqlDWTable' class SqlDWTableDataSetMapping(DataSetMapping): - """A SQL DW Table dataset mapping. + """A SQL DW Table data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -2554,15 +3001,22 @@ class SqlDWTableDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param data_warehouse_name: Required. DataWarehouse name of the source data set :type data_warehouse_name: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str :param table_name: Required. SQL DW table name. @@ -2574,7 +3028,11 @@ class SqlDWTableDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'data_warehouse_name': {'required': True}, + 'provisioning_state': {'readonly': True}, + 'schema_name': {'required': True}, 'sql_server_resource_id': {'required': True}, 'table_name': {'required': True}, } @@ -2587,6 +3045,8 @@ class SqlDWTableDataSetMapping(DataSetMapping): 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'data_warehouse_name': {'key': 'properties.dataWarehouseName', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } @@ -2594,53 +3054,55 @@ class SqlDWTableDataSetMapping(DataSetMapping): def __init__(self, **kwargs): super(SqlDWTableDataSetMapping, self).__init__(**kwargs) self.data_set_id = kwargs.get('data_set_id', None) - self.data_set_mapping_status = kwargs.get('data_set_mapping_status', None) + self.data_set_mapping_status = None self.data_warehouse_name = kwargs.get('data_warehouse_name', None) + self.provisioning_state = None + self.schema_name = kwargs.get('schema_name', None) self.sql_server_resource_id = kwargs.get('sql_server_resource_id', None) self.table_name = kwargs.get('table_name', None) self.kind = 'SqlDWTable' class SynchronizationDetails(Model): - """Synchronization details at dataset level. + """Synchronization details at data set level. Variables are only populated by the server, and will be ignored when sending a request. - :ivar data_set_id: id of dataSet + :ivar data_set_id: Id of data set :vartype data_set_id: str - :ivar data_set_type: type of DataSet. Possible values include: 'Blob', - 'Container', 'BlobFolder', 'AdlsGen2FileSystem', 'AdlsGen2Folder', - 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'SqlDBTable', - 'SqlDWTable' + :ivar data_set_type: Type of the data set. Possible values include: + 'Blob', 'Container', 'BlobFolder', 'AdlsGen2FileSystem', 'AdlsGen2Folder', + 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'KustoCluster', + 'KustoDatabase', 'SqlDBTable', 'SqlDWTable' :vartype data_set_type: str or ~azure.mgmt.datashare.models.DataSetType - :ivar duration_ms: duration of dataset level copy + :ivar duration_ms: Duration of data set level copy :vartype duration_ms: int - :ivar end_time: End time of dataset level copy + :ivar end_time: End time of data set level copy :vartype end_time: datetime - :ivar files_read: The number of files read from the source dataset. + :ivar files_read: The number of files read from the source data set :vartype files_read: long - :ivar files_written: The number of files written into the sink dataset. + :ivar files_written: The number of files written into the sink data set :vartype files_written: long - :ivar message: Error Message if any + :ivar message: Error message if any :vartype message: str - :ivar name: name of dataSet + :ivar name: Name of the data set :vartype name: str - :ivar rows_copied: The number of files copied into the sink dataset. + :ivar rows_copied: The number of files copied into the sink data set :vartype rows_copied: long - :ivar rows_read: The number of rows read from the source dataset. + :ivar rows_read: The number of rows read from the source data set. :vartype rows_read: long - :ivar size_read: The size of the data read from the source dataset in - bytes. + :ivar size_read: The size of the data read from the source data set in + bytes :vartype size_read: long - :ivar size_written: The size of the data written into the sink dataset in - bytes. + :ivar size_written: The size of the data written into the sink data set in + bytes :vartype size_written: long - :ivar start_time: start time of dataset level copy + :ivar start_time: Start time of data set level copy :vartype start_time: datetime :ivar status: Raw Status :vartype status: str - :ivar v_core: The vCore units consumed for the dataset synchronization. + :ivar v_core: The vCore units consumed for the data set synchronization :vartype v_core: long """ diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models_py3.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models_py3.py index 41d0603e9c0c..91cd2c4d872c 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models_py3.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_models_py3.py @@ -76,12 +76,14 @@ class Account(DefaultDto): :type identity: ~azure.mgmt.datashare.models.Identity :ivar created_at: Time at which the account was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the account. - :vartype created_by: str :ivar provisioning_state: Provisioning state of the Account. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or ~azure.mgmt.datashare.models.ProvisioningState + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -90,8 +92,9 @@ class Account(DefaultDto): 'type': {'readonly': True}, 'identity': {'required': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -102,16 +105,18 @@ class Account(DefaultDto): 'type': {'key': 'type', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'Identity'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, *, identity, location: str=None, tags=None, **kwargs) -> None: super(Account, self).__init__(location=location, tags=tags, **kwargs) self.identity = identity self.created_at = None - self.created_by = None self.provisioning_state = None + self.user_email = None + self.user_name = None class AccountUpdateParameters(Model): @@ -169,8 +174,8 @@ class DataSet(ProxyDto): You probably want to use the sub-classes and not this class directly. Known sub-classes are: BlobDataSet, BlobFolderDataSet, BlobContainerDataSet, ADLSGen2FileDataSet, ADLSGen2FolderDataSet, ADLSGen2FileSystemDataSet, - ADLSGen1FolderDataSet, ADLSGen1FileDataSet, SqlDWTableDataSet, - SqlDBTableDataSet + ADLSGen1FolderDataSet, ADLSGen1FileDataSet, KustoClusterDataSet, + KustoDatabaseDataSet, SqlDWTableDataSet, SqlDBTableDataSet Variables are only populated by the server, and will be ignored when sending a request. @@ -202,7 +207,7 @@ class DataSet(ProxyDto): } _subtype_map = { - 'kind': {'Blob': 'BlobDataSet', 'BlobFolder': 'BlobFolderDataSet', 'Container': 'BlobContainerDataSet', 'AdlsGen2File': 'ADLSGen2FileDataSet', 'AdlsGen2Folder': 'ADLSGen2FolderDataSet', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSet', 'AdlsGen1Folder': 'ADLSGen1FolderDataSet', 'AdlsGen1File': 'ADLSGen1FileDataSet', 'SqlDWTable': 'SqlDWTableDataSet', 'SqlDBTable': 'SqlDBTableDataSet'} + 'kind': {'Blob': 'BlobDataSet', 'BlobFolder': 'BlobFolderDataSet', 'Container': 'BlobContainerDataSet', 'AdlsGen2File': 'ADLSGen2FileDataSet', 'AdlsGen2Folder': 'ADLSGen2FolderDataSet', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSet', 'AdlsGen1Folder': 'ADLSGen1FolderDataSet', 'AdlsGen1File': 'ADLSGen1FileDataSet', 'KustoCluster': 'KustoClusterDataSet', 'KustoDatabase': 'KustoDatabaseDataSet', 'SqlDWTable': 'SqlDWTableDataSet', 'SqlDBTable': 'SqlDBTableDataSet'} } def __init__(self, **kwargs) -> None: @@ -212,7 +217,7 @@ def __init__(self, **kwargs) -> None: class ADLSGen1FileDataSet(DataSet): - """An ADLS Gen 1 file dataset. + """An ADLS Gen 1 file data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -279,7 +284,7 @@ def __init__(self, *, account_name: str, file_name: str, folder_path: str, resou class ADLSGen1FolderDataSet(DataSet): - """An ADLS Gen 1 folder dataset. + """An ADLS Gen 1 folder data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -341,7 +346,7 @@ def __init__(self, *, account_name: str, folder_path: str, resource_group: str, class ADLSGen2FileDataSet(DataSet): - """An ADLS Gen 2 file dataset. + """An ADLS Gen 2 file data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -409,12 +414,13 @@ def __init__(self, *, file_path: str, file_system: str, resource_group: str, sto class DataSetMapping(ProxyDto): - """A dataset mapping data transfer object. + """A data set mapping data transfer object. You probably want to use the sub-classes and not this class directly. Known sub-classes are: BlobDataSetMapping, BlobFolderDataSetMapping, BlobContainerDataSetMapping, ADLSGen2FileDataSetMapping, ADLSGen2FolderDataSetMapping, ADLSGen2FileSystemDataSetMapping, + KustoClusterDataSetMapping, KustoDatabaseDataSetMapping, SqlDWTableDataSetMapping, SqlDBTableDataSetMapping Variables are only populated by the server, and will be ignored when @@ -447,7 +453,7 @@ class DataSetMapping(ProxyDto): } _subtype_map = { - 'kind': {'Blob': 'BlobDataSetMapping', 'BlobFolder': 'BlobFolderDataSetMapping', 'Container': 'BlobContainerDataSetMapping', 'AdlsGen2File': 'ADLSGen2FileDataSetMapping', 'AdlsGen2Folder': 'ADLSGen2FolderDataSetMapping', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSetMapping', 'SqlDWTable': 'SqlDWTableDataSetMapping', 'SqlDBTable': 'SqlDBTableDataSetMapping'} + 'kind': {'Blob': 'BlobDataSetMapping', 'BlobFolder': 'BlobFolderDataSetMapping', 'Container': 'BlobContainerDataSetMapping', 'AdlsGen2File': 'ADLSGen2FileDataSetMapping', 'AdlsGen2Folder': 'ADLSGen2FolderDataSetMapping', 'AdlsGen2FileSystem': 'ADLSGen2FileSystemDataSetMapping', 'KustoCluster': 'KustoClusterDataSetMapping', 'KustoDatabase': 'KustoDatabaseDataSetMapping', 'SqlDWTable': 'SqlDWTableDataSetMapping', 'SqlDBTable': 'SqlDBTableDataSetMapping'} } def __init__(self, **kwargs) -> None: @@ -457,7 +463,7 @@ def __init__(self, **kwargs) -> None: class ADLSGen2FileDataSetMapping(DataSetMapping): - """An ADLS Gen2 file dataset mapping. + """An ADLS Gen2 file data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -472,11 +478,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_path: Required. File path within the file system. :type file_path: str @@ -485,6 +491,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): :param output_type: Type of output file. Possible values include: 'Csv', 'Parquet' :type output_type: str or ~azure.mgmt.datashare.models.OutputType + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -499,8 +510,11 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_path': {'required': True}, 'file_system': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -516,18 +530,20 @@ class ADLSGen2FileDataSetMapping(DataSetMapping): 'file_path': {'key': 'properties.filePath', 'type': 'str'}, 'file_system': {'key': 'properties.fileSystem', 'type': 'str'}, 'output_type': {'key': 'properties.outputType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, } - def __init__(self, *, file_path: str, file_system: str, resource_group: str, storage_account_name: str, subscription_id: str, data_set_id: str=None, data_set_mapping_status=None, output_type=None, **kwargs) -> None: + def __init__(self, *, data_set_id: str, file_path: str, file_system: str, resource_group: str, storage_account_name: str, subscription_id: str, output_type=None, **kwargs) -> None: super(ADLSGen2FileDataSetMapping, self).__init__(**kwargs) self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None self.file_path = file_path self.file_system = file_system self.output_type = output_type + self.provisioning_state = None self.resource_group = resource_group self.storage_account_name = storage_account_name self.subscription_id = subscription_id @@ -535,7 +551,7 @@ def __init__(self, *, file_path: str, file_system: str, resource_group: str, sto class ADLSGen2FileSystemDataSet(DataSet): - """An ADLS Gen 2 file system dataset. + """An ADLS Gen 2 file system data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -598,7 +614,7 @@ def __init__(self, *, file_system: str, resource_group: str, storage_account_nam class ADLSGen2FileSystemDataSetMapping(DataSetMapping): - """An ADLS Gen2 file system dataset mapping. + """An ADLS Gen2 file system data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -613,14 +629,19 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_system: Required. The file system name. :type file_system: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -635,7 +656,10 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_system': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -649,16 +673,18 @@ class ADLSGen2FileSystemDataSetMapping(DataSetMapping): 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'file_system': {'key': 'properties.fileSystem', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, } - def __init__(self, *, file_system: str, resource_group: str, storage_account_name: str, subscription_id: str, data_set_id: str=None, data_set_mapping_status=None, **kwargs) -> None: + def __init__(self, *, data_set_id: str, file_system: str, resource_group: str, storage_account_name: str, subscription_id: str, **kwargs) -> None: super(ADLSGen2FileSystemDataSetMapping, self).__init__(**kwargs) self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None self.file_system = file_system + self.provisioning_state = None self.resource_group = resource_group self.storage_account_name = storage_account_name self.subscription_id = subscription_id @@ -666,7 +692,7 @@ def __init__(self, *, file_system: str, resource_group: str, storage_account_nam class ADLSGen2FolderDataSet(DataSet): - """An ADLS Gen 2 folder dataset. + """An ADLS Gen 2 folder data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -734,7 +760,7 @@ def __init__(self, *, file_system: str, folder_path: str, resource_group: str, s class ADLSGen2FolderDataSetMapping(DataSetMapping): - """An ADLS Gen2 folder dataset mapping. + """An ADLS Gen2 folder data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -749,16 +775,21 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_system: Required. File system to which the folder belongs. :type file_system: str :param folder_path: Required. Folder path within the file system. :type folder_path: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -773,8 +804,11 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_system': {'required': True}, 'folder_path': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -789,17 +823,19 @@ class ADLSGen2FolderDataSetMapping(DataSetMapping): 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'file_system': {'key': 'properties.fileSystem', 'type': 'str'}, 'folder_path': {'key': 'properties.folderPath', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, } - def __init__(self, *, file_system: str, folder_path: str, resource_group: str, storage_account_name: str, subscription_id: str, data_set_id: str=None, data_set_mapping_status=None, **kwargs) -> None: + def __init__(self, *, data_set_id: str, file_system: str, folder_path: str, resource_group: str, storage_account_name: str, subscription_id: str, **kwargs) -> None: super(ADLSGen2FolderDataSetMapping, self).__init__(**kwargs) self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None self.file_system = file_system self.folder_path = folder_path + self.provisioning_state = None self.resource_group = resource_group self.storage_account_name = storage_account_name self.subscription_id = subscription_id @@ -807,7 +843,7 @@ def __init__(self, *, file_system: str, folder_path: str, resource_group: str, s class BlobContainerDataSet(DataSet): - """An Azure storage blob container dataset. + """An Azure storage blob container data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -870,7 +906,7 @@ def __init__(self, *, container_name: str, resource_group: str, storage_account_ class BlobContainerDataSetMapping(DataSetMapping): - """A Blob container dataset mapping. + """A Blob container data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -887,12 +923,17 @@ class BlobContainerDataSetMapping(DataSetMapping): :type kind: str :param container_name: Required. BLOB Container name. :type container_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -908,6 +949,9 @@ class BlobContainerDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'container_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -921,16 +965,18 @@ class BlobContainerDataSetMapping(DataSetMapping): 'container_name': {'key': 'properties.containerName', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, } - def __init__(self, *, container_name: str, resource_group: str, storage_account_name: str, subscription_id: str, data_set_id: str=None, data_set_mapping_status=None, **kwargs) -> None: + def __init__(self, *, container_name: str, data_set_id: str, resource_group: str, storage_account_name: str, subscription_id: str, **kwargs) -> None: super(BlobContainerDataSetMapping, self).__init__(**kwargs) self.container_name = container_name self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None + self.provisioning_state = None self.resource_group = resource_group self.storage_account_name = storage_account_name self.subscription_id = subscription_id @@ -938,7 +984,7 @@ def __init__(self, *, container_name: str, resource_group: str, storage_account_ class BlobDataSet(DataSet): - """An Azure storage blob dataset. + """An Azure storage blob data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -1006,7 +1052,7 @@ def __init__(self, *, container_name: str, file_path: str, resource_group: str, class BlobDataSetMapping(DataSetMapping): - """A Blob dataset mapping. + """A Blob data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -1023,17 +1069,22 @@ class BlobDataSetMapping(DataSetMapping): :type kind: str :param container_name: Required. Container that has the file path. :type container_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param file_path: Required. File path within the source data set :type file_path: str :param output_type: File output type. Possible values include: 'Csv', 'Parquet' :type output_type: str or ~azure.mgmt.datashare.models.OutputType + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -1049,7 +1100,10 @@ class BlobDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'container_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'file_path': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -1065,18 +1119,20 @@ class BlobDataSetMapping(DataSetMapping): 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'file_path': {'key': 'properties.filePath', 'type': 'str'}, 'output_type': {'key': 'properties.outputType', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, } - def __init__(self, *, container_name: str, file_path: str, resource_group: str, storage_account_name: str, subscription_id: str, data_set_id: str=None, data_set_mapping_status=None, output_type=None, **kwargs) -> None: + def __init__(self, *, container_name: str, data_set_id: str, file_path: str, resource_group: str, storage_account_name: str, subscription_id: str, output_type=None, **kwargs) -> None: super(BlobDataSetMapping, self).__init__(**kwargs) self.container_name = container_name self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None self.file_path = file_path self.output_type = output_type + self.provisioning_state = None self.resource_group = resource_group self.storage_account_name = storage_account_name self.subscription_id = subscription_id @@ -1084,7 +1140,7 @@ def __init__(self, *, container_name: str, file_path: str, resource_group: str, class BlobFolderDataSet(DataSet): - """An Azure storage blob folder dataset. + """An Azure storage blob folder data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -1152,7 +1208,7 @@ def __init__(self, *, container_name: str, prefix: str, resource_group: str, sto class BlobFolderDataSetMapping(DataSetMapping): - """A Blob folder dataset mapping. + """A Blob folder data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -1169,14 +1225,19 @@ class BlobFolderDataSetMapping(DataSetMapping): :type kind: str :param container_name: Required. Container that has the file path. :type container_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param prefix: Required. Prefix for blob folder :type prefix: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState :param resource_group: Required. Resource group of storage account. :type resource_group: str :param storage_account_name: Required. Storage account name of the source @@ -1192,7 +1253,10 @@ class BlobFolderDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'container_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'prefix': {'required': True}, + 'provisioning_state': {'readonly': True}, 'resource_group': {'required': True}, 'storage_account_name': {'required': True}, 'subscription_id': {'required': True}, @@ -1207,17 +1271,19 @@ class BlobFolderDataSetMapping(DataSetMapping): 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'prefix': {'key': 'properties.prefix', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'resource_group': {'key': 'properties.resourceGroup', 'type': 'str'}, 'storage_account_name': {'key': 'properties.storageAccountName', 'type': 'str'}, 'subscription_id': {'key': 'properties.subscriptionId', 'type': 'str'}, } - def __init__(self, *, container_name: str, prefix: str, resource_group: str, storage_account_name: str, subscription_id: str, data_set_id: str=None, data_set_mapping_status=None, **kwargs) -> None: + def __init__(self, *, container_name: str, data_set_id: str, prefix: str, resource_group: str, storage_account_name: str, subscription_id: str, **kwargs) -> None: super(BlobFolderDataSetMapping, self).__init__(**kwargs) self.container_name = container_name self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None self.prefix = prefix + self.provisioning_state = None self.resource_group = resource_group self.storage_account_name = storage_account_name self.subscription_id = subscription_id @@ -1258,18 +1324,25 @@ class ConsumerInvitation(ProxyDto): ~azure.mgmt.datashare.models.InvitationStatus :ivar location: invitation location :vartype location: str + :ivar provider_email: Email of the provider who created the resource + :vartype provider_email: str + :ivar provider_name: Name of the provider who created the resource + :vartype provider_name: str + :ivar provider_tenant_name: Tenant name of the provider who created the + resource + :vartype provider_tenant_name: str :ivar responded_at: The time the recipient responded to the invitation. :vartype responded_at: datetime - :ivar sender: Gets the name of the sender. - :vartype sender: str - :ivar sender_company_name: Gets the company name of the sender. - :vartype sender_company_name: str :ivar sent_at: Gets the time at which the invitation was sent. :vartype sent_at: datetime :ivar share_name: Gets the source share Name. :vartype share_name: str :ivar terms_of_use: Terms of use shared when the invitation was created :vartype terms_of_use: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -1281,12 +1354,15 @@ class ConsumerInvitation(ProxyDto): 'invitation_id': {'required': True}, 'invitation_status': {'readonly': True}, 'location': {'readonly': True}, + 'provider_email': {'readonly': True}, + 'provider_name': {'readonly': True}, + 'provider_tenant_name': {'readonly': True}, 'responded_at': {'readonly': True}, - 'sender': {'readonly': True}, - 'sender_company_name': {'readonly': True}, 'sent_at': {'readonly': True}, 'share_name': {'readonly': True}, 'terms_of_use': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -1298,12 +1374,15 @@ class ConsumerInvitation(ProxyDto): 'invitation_id': {'key': 'properties.invitationId', 'type': 'str'}, 'invitation_status': {'key': 'properties.invitationStatus', 'type': 'str'}, 'location': {'key': 'properties.location', 'type': 'str'}, + 'provider_email': {'key': 'properties.providerEmail', 'type': 'str'}, + 'provider_name': {'key': 'properties.providerName', 'type': 'str'}, + 'provider_tenant_name': {'key': 'properties.providerTenantName', 'type': 'str'}, 'responded_at': {'key': 'properties.respondedAt', 'type': 'iso-8601'}, - 'sender': {'key': 'properties.sender', 'type': 'str'}, - 'sender_company_name': {'key': 'properties.senderCompanyName', 'type': 'str'}, 'sent_at': {'key': 'properties.sentAt', 'type': 'iso-8601'}, 'share_name': {'key': 'properties.shareName', 'type': 'str'}, 'terms_of_use': {'key': 'properties.termsOfUse', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, *, invitation_id: str, **kwargs) -> None: @@ -1313,12 +1392,15 @@ def __init__(self, *, invitation_id: str, **kwargs) -> None: self.invitation_id = invitation_id self.invitation_status = None self.location = None + self.provider_email = None + self.provider_name = None + self.provider_tenant_name = None self.responded_at = None - self.sender = None - self.sender_company_name = None self.sent_at = None self.share_name = None self.terms_of_use = None + self.user_email = None + self.user_name = None class ConsumerSourceDataSet(ProxyDto): @@ -1335,12 +1417,16 @@ class ConsumerSourceDataSet(ProxyDto): :vartype type: str :ivar data_set_id: DataSet Id :vartype data_set_id: str + :ivar data_set_location: Location of the data set. + :vartype data_set_location: str :ivar data_set_name: DataSet name :vartype data_set_name: str - :ivar data_set_type: Type of dataSet. Possible values include: 'Blob', + :ivar data_set_path: DataSet path + :vartype data_set_path: str + :ivar data_set_type: Type of data set. Possible values include: 'Blob', 'Container', 'BlobFolder', 'AdlsGen2FileSystem', 'AdlsGen2Folder', - 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'SqlDBTable', - 'SqlDWTable' + 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'KustoCluster', + 'KustoDatabase', 'SqlDBTable', 'SqlDWTable' :vartype data_set_type: str or ~azure.mgmt.datashare.models.DataSetType """ @@ -1349,7 +1435,9 @@ class ConsumerSourceDataSet(ProxyDto): 'name': {'readonly': True}, 'type': {'readonly': True}, 'data_set_id': {'readonly': True}, + 'data_set_location': {'readonly': True}, 'data_set_name': {'readonly': True}, + 'data_set_path': {'readonly': True}, 'data_set_type': {'readonly': True}, } @@ -1358,14 +1446,18 @@ class ConsumerSourceDataSet(ProxyDto): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'data_set_location': {'key': 'properties.dataSetLocation', 'type': 'str'}, 'data_set_name': {'key': 'properties.dataSetName', 'type': 'str'}, + 'data_set_path': {'key': 'properties.dataSetPath', 'type': 'str'}, 'data_set_type': {'key': 'properties.dataSetType', 'type': 'str'}, } def __init__(self, **kwargs) -> None: super(ConsumerSourceDataSet, self).__init__(**kwargs) self.data_set_id = None + self.data_set_location = None self.data_set_name = None + self.data_set_path = None self.data_set_type = None @@ -1510,8 +1602,6 @@ class Invitation(ProxyDto): ~azure.mgmt.datashare.models.InvitationStatus :ivar responded_at: The time the recipient responded to the invitation. :vartype responded_at: datetime - :ivar sender: Gets the name of the sender. - :vartype sender: str :ivar sent_at: Gets the time at which the invitation was sent. :vartype sent_at: datetime :param target_active_directory_id: The target Azure AD Id. Can't be @@ -1524,6 +1614,10 @@ class Invitation(ProxyDto): Must be specified along TargetActiveDirectoryId. This enables sending invitations to specific users or applications in an AD tenant. :type target_object_id: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -1533,8 +1627,9 @@ class Invitation(ProxyDto): 'invitation_id': {'readonly': True}, 'invitation_status': {'readonly': True}, 'responded_at': {'readonly': True}, - 'sender': {'readonly': True}, 'sent_at': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -1544,11 +1639,12 @@ class Invitation(ProxyDto): 'invitation_id': {'key': 'properties.invitationId', 'type': 'str'}, 'invitation_status': {'key': 'properties.invitationStatus', 'type': 'str'}, 'responded_at': {'key': 'properties.respondedAt', 'type': 'iso-8601'}, - 'sender': {'key': 'properties.sender', 'type': 'str'}, 'sent_at': {'key': 'properties.sentAt', 'type': 'iso-8601'}, 'target_active_directory_id': {'key': 'properties.targetActiveDirectoryId', 'type': 'str'}, 'target_email': {'key': 'properties.targetEmail', 'type': 'str'}, 'target_object_id': {'key': 'properties.targetObjectId', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, *, target_active_directory_id: str=None, target_email: str=None, target_object_id: str=None, **kwargs) -> None: @@ -1556,11 +1652,270 @@ def __init__(self, *, target_active_directory_id: str=None, target_email: str=No self.invitation_id = None self.invitation_status = None self.responded_at = None - self.sender = None self.sent_at = None self.target_active_directory_id = target_active_directory_id self.target_email = target_email self.target_object_id = target_object_id + self.user_email = None + self.user_name = None + + +class KustoClusterDataSet(DataSet): + """A kusto cluster data set. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :ivar data_set_id: Unique id for identifying a data set resource + :vartype data_set_id: str + :param kusto_cluster_resource_id: Required. Resource id of the kusto + cluster. + :type kusto_cluster_resource_id: str + :ivar location: Location of the kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the kusto cluster data + set. Possible values include: 'Succeeded', 'Creating', 'Deleting', + 'Moving', 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'readonly': True}, + 'kusto_cluster_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'kusto_cluster_resource_id': {'key': 'properties.kustoClusterResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, *, kusto_cluster_resource_id: str, **kwargs) -> None: + super(KustoClusterDataSet, self).__init__(**kwargs) + self.data_set_id = None + self.kusto_cluster_resource_id = kusto_cluster_resource_id + self.location = None + self.provisioning_state = None + self.kind = 'KustoCluster' + + +class KustoClusterDataSetMapping(DataSetMapping): + """A Kusto cluster data set mapping. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :param data_set_id: Required. The id of the source data set. + :type data_set_id: str + :ivar data_set_mapping_status: Gets the status of the data set mapping. + Possible values include: 'Ok', 'Broken' + :vartype data_set_mapping_status: str or + ~azure.mgmt.datashare.models.DataSetMappingStatus + :param kusto_cluster_resource_id: Required. Resource id of the sink kusto + cluster. + :type kusto_cluster_resource_id: str + :ivar location: Location of the sink kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'kusto_cluster_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'kusto_cluster_resource_id': {'key': 'properties.kustoClusterResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, *, data_set_id: str, kusto_cluster_resource_id: str, **kwargs) -> None: + super(KustoClusterDataSetMapping, self).__init__(**kwargs) + self.data_set_id = data_set_id + self.data_set_mapping_status = None + self.kusto_cluster_resource_id = kusto_cluster_resource_id + self.location = None + self.provisioning_state = None + self.kind = 'KustoCluster' + + +class KustoDatabaseDataSet(DataSet): + """A kusto database data set. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :ivar data_set_id: Unique id for identifying a data set resource + :vartype data_set_id: str + :param kusto_database_resource_id: Required. Resource id of the kusto + database. + :type kusto_database_resource_id: str + :ivar location: Location of the kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the kusto database data + set. Possible values include: 'Succeeded', 'Creating', 'Deleting', + 'Moving', 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'readonly': True}, + 'kusto_database_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'kusto_database_resource_id': {'key': 'properties.kustoDatabaseResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, *, kusto_database_resource_id: str, **kwargs) -> None: + super(KustoDatabaseDataSet, self).__init__(**kwargs) + self.data_set_id = None + self.kusto_database_resource_id = kusto_database_resource_id + self.location = None + self.provisioning_state = None + self.kind = 'KustoDatabase' + + +class KustoDatabaseDataSetMapping(DataSetMapping): + """A Kusto database data set mapping. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar id: The resource id of the azure resource + :vartype id: str + :ivar name: Name of the azure resource + :vartype name: str + :ivar type: Type of the azure resource + :vartype type: str + :param kind: Required. Constant filled by server. + :type kind: str + :param data_set_id: Required. The id of the source data set. + :type data_set_id: str + :ivar data_set_mapping_status: Gets the status of the data set mapping. + Possible values include: 'Ok', 'Broken' + :vartype data_set_mapping_status: str or + ~azure.mgmt.datashare.models.DataSetMappingStatus + :param kusto_cluster_resource_id: Required. Resource id of the sink kusto + cluster. + :type kusto_cluster_resource_id: str + :ivar location: Location of the sink kusto cluster. + :vartype location: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + """ + + _validation = { + 'id': {'readonly': True}, + 'name': {'readonly': True}, + 'type': {'readonly': True}, + 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'kusto_cluster_resource_id': {'required': True}, + 'location': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + } + + _attribute_map = { + 'id': {'key': 'id', 'type': 'str'}, + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'kind': {'key': 'kind', 'type': 'str'}, + 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'kusto_cluster_resource_id': {'key': 'properties.kustoClusterResourceId', 'type': 'str'}, + 'location': {'key': 'properties.location', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + } + + def __init__(self, *, data_set_id: str, kusto_cluster_resource_id: str, **kwargs) -> None: + super(KustoDatabaseDataSetMapping, self).__init__(**kwargs) + self.data_set_id = data_set_id + self.data_set_mapping_status = None + self.kusto_cluster_resource_id = kusto_cluster_resource_id + self.location = None + self.provisioning_state = None + self.kind = 'KustoDatabase' class OperationMetaLogSpecification(Model): @@ -1771,16 +2126,23 @@ class ProviderShareSubscription(ProxyDto): :vartype name: str :ivar type: Type of the azure resource :vartype type: str - :ivar company: Company name - :vartype company: str + :ivar consumer_email: Email of the consumer who created the share + subscription + :vartype consumer_email: str + :ivar consumer_name: Name of the consumer who created the share + subscription + :vartype consumer_name: str + :ivar consumer_tenant_name: Tenant name of the consumer who created the + share subscription + :vartype consumer_tenant_name: str :ivar created_at: created at :vartype created_at: datetime - :ivar created_by: Created by - :vartype created_by: str + :ivar provider_email: Email of the provider who created the share + :vartype provider_email: str + :ivar provider_name: Name of the provider who created the share + :vartype provider_name: str :ivar shared_at: Shared at :vartype shared_at: datetime - :ivar shared_by: Shared by - :vartype shared_by: str :ivar share_subscription_object_id: share Subscription Object Id :vartype share_subscription_object_id: str :ivar share_subscription_status: Gets the status of share subscription. @@ -1793,11 +2155,13 @@ class ProviderShareSubscription(ProxyDto): 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, - 'company': {'readonly': True}, + 'consumer_email': {'readonly': True}, + 'consumer_name': {'readonly': True}, + 'consumer_tenant_name': {'readonly': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, + 'provider_email': {'readonly': True}, + 'provider_name': {'readonly': True}, 'shared_at': {'readonly': True}, - 'shared_by': {'readonly': True}, 'share_subscription_object_id': {'readonly': True}, 'share_subscription_status': {'readonly': True}, } @@ -1806,22 +2170,26 @@ class ProviderShareSubscription(ProxyDto): 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, - 'company': {'key': 'properties.company', 'type': 'str'}, + 'consumer_email': {'key': 'properties.consumerEmail', 'type': 'str'}, + 'consumer_name': {'key': 'properties.consumerName', 'type': 'str'}, + 'consumer_tenant_name': {'key': 'properties.consumerTenantName', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, + 'provider_email': {'key': 'properties.providerEmail', 'type': 'str'}, + 'provider_name': {'key': 'properties.providerName', 'type': 'str'}, 'shared_at': {'key': 'properties.sharedAt', 'type': 'iso-8601'}, - 'shared_by': {'key': 'properties.sharedBy', 'type': 'str'}, 'share_subscription_object_id': {'key': 'properties.shareSubscriptionObjectId', 'type': 'str'}, 'share_subscription_status': {'key': 'properties.shareSubscriptionStatus', 'type': 'str'}, } def __init__(self, **kwargs) -> None: super(ProviderShareSubscription, self).__init__(**kwargs) - self.company = None + self.consumer_email = None + self.consumer_name = None + self.consumer_tenant_name = None self.created_at = None - self.created_by = None + self.provider_email = None + self.provider_name = None self.shared_at = None - self.shared_by = None self.share_subscription_object_id = None self.share_subscription_status = None @@ -1950,9 +2318,6 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): :type kind: str :ivar created_at: Time at which the synchronization setting was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the synchronization - setting. - :vartype created_by: str :ivar provisioning_state: Gets or sets the provisioning state. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or @@ -1963,6 +2328,8 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): ~azure.mgmt.datashare.models.RecurrenceInterval :param synchronization_time: Required. Synchronization time :type synchronization_time: datetime + :ivar user_name: Name of the user who created the synchronization setting. + :vartype user_name: str """ _validation = { @@ -1971,10 +2338,10 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): 'type': {'readonly': True}, 'kind': {'required': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'recurrence_interval': {'required': True}, 'synchronization_time': {'required': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -1983,19 +2350,19 @@ class ScheduledSynchronizationSetting(SynchronizationSetting): 'type': {'key': 'type', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'recurrence_interval': {'key': 'properties.recurrenceInterval', 'type': 'str'}, 'synchronization_time': {'key': 'properties.synchronizationTime', 'type': 'iso-8601'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, *, recurrence_interval, synchronization_time, **kwargs) -> None: super(ScheduledSynchronizationSetting, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.provisioning_state = None self.recurrence_interval = recurrence_interval self.synchronization_time = synchronization_time + self.user_name = None self.kind = 'ScheduleBased' @@ -2062,8 +2429,6 @@ class ScheduledTrigger(Trigger): :type kind: str :ivar created_at: Time at which the trigger was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the trigger. - :vartype created_by: str :ivar provisioning_state: Gets the provisioning state. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or @@ -2081,6 +2446,8 @@ class ScheduledTrigger(Trigger): :ivar trigger_status: Gets the trigger state. Possible values include: 'Active', 'Inactive', 'SourceSynchronizationSettingDeleted' :vartype trigger_status: str or ~azure.mgmt.datashare.models.TriggerStatus + :ivar user_name: Name of the user who created the trigger. + :vartype user_name: str """ _validation = { @@ -2089,11 +2456,11 @@ class ScheduledTrigger(Trigger): 'type': {'readonly': True}, 'kind': {'required': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'recurrence_interval': {'required': True}, 'synchronization_time': {'required': True}, 'trigger_status': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -2102,23 +2469,23 @@ class ScheduledTrigger(Trigger): 'type': {'key': 'type', 'type': 'str'}, 'kind': {'key': 'kind', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'recurrence_interval': {'key': 'properties.recurrenceInterval', 'type': 'str'}, 'synchronization_mode': {'key': 'properties.synchronizationMode', 'type': 'str'}, 'synchronization_time': {'key': 'properties.synchronizationTime', 'type': 'iso-8601'}, 'trigger_status': {'key': 'properties.triggerStatus', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, *, recurrence_interval, synchronization_time, synchronization_mode=None, **kwargs) -> None: super(ScheduledTrigger, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.provisioning_state = None self.recurrence_interval = recurrence_interval self.synchronization_mode = synchronization_mode self.synchronization_time = synchronization_time self.trigger_status = None + self.user_name = None self.kind = 'ScheduleBased' @@ -2136,18 +2503,21 @@ class Share(ProxyDto): :vartype type: str :ivar created_at: Time at which the share was created. :vartype created_at: datetime - :ivar created_by: Name of the user who created the share. - :vartype created_by: str :param description: Share description. :type description: str :ivar provisioning_state: Gets or sets the provisioning state. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' :vartype provisioning_state: str or ~azure.mgmt.datashare.models.ProvisioningState - :param share_kind: Share kind. Possible values include: 'CopyBased' + :param share_kind: Share kind. Possible values include: 'CopyBased', + 'InPlace' :type share_kind: str or ~azure.mgmt.datashare.models.ShareKind :param terms: Share terms. :type terms: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -2155,8 +2525,9 @@ class Share(ProxyDto): 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'provisioning_state': {'readonly': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -2164,21 +2535,23 @@ class Share(ProxyDto): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'share_kind': {'key': 'properties.shareKind', 'type': 'str'}, 'terms': {'key': 'properties.terms', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } def __init__(self, *, description: str=None, share_kind=None, terms: str=None, **kwargs) -> None: super(Share, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.description = description self.provisioning_state = None self.share_kind = share_kind self.terms = terms + self.user_email = None + self.user_name = None class ShareSubscription(ProxyDto): @@ -2197,10 +2570,15 @@ class ShareSubscription(ProxyDto): :vartype type: str :ivar created_at: Time at which the share subscription was created. :vartype created_at: datetime - :ivar created_by: The user who created the share subscription. - :vartype created_by: str :param invitation_id: Required. The invitation id. :type invitation_id: str + :ivar provider_email: Email of the provider who created the resource + :vartype provider_email: str + :ivar provider_name: Name of the provider who created the resource + :vartype provider_name: str + :ivar provider_tenant_name: Tenant name of the provider who created the + resource + :vartype provider_tenant_name: str :ivar provisioning_state: Provisioning state of the share subscription. Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', 'Failed' @@ -2208,14 +2586,11 @@ class ShareSubscription(ProxyDto): ~azure.mgmt.datashare.models.ProvisioningState :ivar share_description: Description of share :vartype share_description: str - :ivar share_kind: Kind of share. Possible values include: 'CopyBased' + :ivar share_kind: Kind of share. Possible values include: 'CopyBased', + 'InPlace' :vartype share_kind: str or ~azure.mgmt.datashare.models.ShareKind :ivar share_name: Name of the share :vartype share_name: str - :ivar share_sender: Sender of the share - :vartype share_sender: str - :ivar share_sender_company_name: Company name of the share sender - :vartype share_sender_company_name: str :ivar share_subscription_status: Gets the current status of share subscription. Possible values include: 'Active', 'Revoked', 'SourceDeleted', 'Revoking' @@ -2223,6 +2598,12 @@ class ShareSubscription(ProxyDto): ~azure.mgmt.datashare.models.ShareSubscriptionStatus :ivar share_terms: Terms of a share :vartype share_terms: str + :param source_share_location: Required. Source share location. + :type source_share_location: str + :ivar user_email: Email of the user who created the resource + :vartype user_email: str + :ivar user_name: Name of the user who created the resource + :vartype user_name: str """ _validation = { @@ -2230,16 +2611,19 @@ class ShareSubscription(ProxyDto): 'name': {'readonly': True}, 'type': {'readonly': True}, 'created_at': {'readonly': True}, - 'created_by': {'readonly': True}, 'invitation_id': {'required': True}, + 'provider_email': {'readonly': True}, + 'provider_name': {'readonly': True}, + 'provider_tenant_name': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'share_description': {'readonly': True}, 'share_kind': {'readonly': True}, 'share_name': {'readonly': True}, - 'share_sender': {'readonly': True}, - 'share_sender_company_name': {'readonly': True}, 'share_subscription_status': {'readonly': True}, 'share_terms': {'readonly': True}, + 'source_share_location': {'required': True}, + 'user_email': {'readonly': True}, + 'user_name': {'readonly': True}, } _attribute_map = { @@ -2247,31 +2631,37 @@ class ShareSubscription(ProxyDto): 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'created_at': {'key': 'properties.createdAt', 'type': 'iso-8601'}, - 'created_by': {'key': 'properties.createdBy', 'type': 'str'}, 'invitation_id': {'key': 'properties.invitationId', 'type': 'str'}, + 'provider_email': {'key': 'properties.providerEmail', 'type': 'str'}, + 'provider_name': {'key': 'properties.providerName', 'type': 'str'}, + 'provider_tenant_name': {'key': 'properties.providerTenantName', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'share_description': {'key': 'properties.shareDescription', 'type': 'str'}, 'share_kind': {'key': 'properties.shareKind', 'type': 'str'}, 'share_name': {'key': 'properties.shareName', 'type': 'str'}, - 'share_sender': {'key': 'properties.shareSender', 'type': 'str'}, - 'share_sender_company_name': {'key': 'properties.shareSenderCompanyName', 'type': 'str'}, 'share_subscription_status': {'key': 'properties.shareSubscriptionStatus', 'type': 'str'}, 'share_terms': {'key': 'properties.shareTerms', 'type': 'str'}, + 'source_share_location': {'key': 'properties.sourceShareLocation', 'type': 'str'}, + 'user_email': {'key': 'properties.userEmail', 'type': 'str'}, + 'user_name': {'key': 'properties.userName', 'type': 'str'}, } - def __init__(self, *, invitation_id: str, **kwargs) -> None: + def __init__(self, *, invitation_id: str, source_share_location: str, **kwargs) -> None: super(ShareSubscription, self).__init__(**kwargs) self.created_at = None - self.created_by = None self.invitation_id = invitation_id + self.provider_email = None + self.provider_name = None + self.provider_tenant_name = None self.provisioning_state = None self.share_description = None self.share_kind = None self.share_name = None - self.share_sender = None - self.share_sender_company_name = None self.share_subscription_status = None self.share_terms = None + self.source_share_location = source_share_location + self.user_email = None + self.user_name = None class ShareSubscriptionSynchronization(Model): @@ -2294,6 +2684,10 @@ class ShareSubscriptionSynchronization(Model): :vartype status: str :param synchronization_id: Required. Synchronization id :type synchronization_id: str + :ivar synchronization_mode: Synchronization Mode. Possible values include: + 'Incremental', 'FullSync' + :vartype synchronization_mode: str or + ~azure.mgmt.datashare.models.SynchronizationMode """ _validation = { @@ -2303,6 +2697,7 @@ class ShareSubscriptionSynchronization(Model): 'start_time': {'readonly': True}, 'status': {'readonly': True}, 'synchronization_id': {'required': True}, + 'synchronization_mode': {'readonly': True}, } _attribute_map = { @@ -2312,6 +2707,7 @@ class ShareSubscriptionSynchronization(Model): 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, 'synchronization_id': {'key': 'synchronizationId', 'type': 'str'}, + 'synchronization_mode': {'key': 'synchronizationMode', 'type': 'str'}, } def __init__(self, *, synchronization_id: str, **kwargs) -> None: @@ -2322,54 +2718,73 @@ def __init__(self, *, synchronization_id: str, **kwargs) -> None: self.start_time = None self.status = None self.synchronization_id = synchronization_id + self.synchronization_mode = None class ShareSynchronization(Model): """A ShareSynchronization data transfer object. - :param company: Company name - :type company: str + Variables are only populated by the server, and will be ignored when + sending a request. + + :param consumer_email: Email of the user who created the synchronization + :type consumer_email: str + :param consumer_name: Name of the user who created the synchronization + :type consumer_name: str + :param consumer_tenant_name: Tenant name of the consumer who created the + synchronization + :type consumer_tenant_name: str :param duration_ms: synchronization duration :type duration_ms: int :param end_time: End time of synchronization :type end_time: datetime :param message: message of synchronization :type message: str - :param recipient: Recipient id - :type recipient: str :param start_time: start time of synchronization :type start_time: datetime :param status: Raw Status :type status: str :param synchronization_id: Synchronization id :type synchronization_id: str + :ivar synchronization_mode: Synchronization mode. Possible values include: + 'Incremental', 'FullSync' + :vartype synchronization_mode: str or + ~azure.mgmt.datashare.models.SynchronizationMode """ + _validation = { + 'synchronization_mode': {'readonly': True}, + } + _attribute_map = { - 'company': {'key': 'company', 'type': 'str'}, + 'consumer_email': {'key': 'consumerEmail', 'type': 'str'}, + 'consumer_name': {'key': 'consumerName', 'type': 'str'}, + 'consumer_tenant_name': {'key': 'consumerTenantName', 'type': 'str'}, 'duration_ms': {'key': 'durationMs', 'type': 'int'}, 'end_time': {'key': 'endTime', 'type': 'iso-8601'}, 'message': {'key': 'message', 'type': 'str'}, - 'recipient': {'key': 'recipient', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'iso-8601'}, 'status': {'key': 'status', 'type': 'str'}, 'synchronization_id': {'key': 'synchronizationId', 'type': 'str'}, + 'synchronization_mode': {'key': 'synchronizationMode', 'type': 'str'}, } - def __init__(self, *, company: str=None, duration_ms: int=None, end_time=None, message: str=None, recipient: str=None, start_time=None, status: str=None, synchronization_id: str=None, **kwargs) -> None: + def __init__(self, *, consumer_email: str=None, consumer_name: str=None, consumer_tenant_name: str=None, duration_ms: int=None, end_time=None, message: str=None, start_time=None, status: str=None, synchronization_id: str=None, **kwargs) -> None: super(ShareSynchronization, self).__init__(**kwargs) - self.company = company + self.consumer_email = consumer_email + self.consumer_name = consumer_name + self.consumer_tenant_name = consumer_tenant_name self.duration_ms = duration_ms self.end_time = end_time self.message = message - self.recipient = recipient self.start_time = start_time self.status = status self.synchronization_id = synchronization_id + self.synchronization_mode = None class SqlDBTableDataSet(DataSet): - """A SQL DB table dataset. + """A SQL DB table data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -2384,13 +2799,15 @@ class SqlDBTableDataSet(DataSet): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param database_name: Database name of the source data set + :param database_name: Required. Database name of the source data set :type database_name: str :ivar data_set_id: Unique id for identifying a data set resource :vartype data_set_id: str - :param sql_server_resource_id: Resource id of SQL server + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str + :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str - :param table_name: SQL DB table name. + :param table_name: Required. SQL DB table name. :type table_name: str """ @@ -2399,7 +2816,11 @@ class SqlDBTableDataSet(DataSet): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'database_name': {'required': True}, 'data_set_id': {'readonly': True}, + 'schema_name': {'required': True}, + 'sql_server_resource_id': {'required': True}, + 'table_name': {'required': True}, } _attribute_map = { @@ -2409,21 +2830,23 @@ class SqlDBTableDataSet(DataSet): 'kind': {'key': 'kind', 'type': 'str'}, 'database_name': {'key': 'properties.databaseName', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } - def __init__(self, *, database_name: str=None, sql_server_resource_id: str=None, table_name: str=None, **kwargs) -> None: + def __init__(self, *, database_name: str, schema_name: str, sql_server_resource_id: str, table_name: str, **kwargs) -> None: super(SqlDBTableDataSet, self).__init__(**kwargs) self.database_name = database_name self.data_set_id = None + self.schema_name = schema_name self.sql_server_resource_id = sql_server_resource_id self.table_name = table_name self.kind = 'SqlDBTable' class SqlDBTableDataSetMapping(DataSetMapping): - """A SQL DB Table dataset mapping. + """A SQL DB Table data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -2440,12 +2863,19 @@ class SqlDBTableDataSetMapping(DataSetMapping): :type kind: str :param database_name: Required. DatabaseName name of the sink data set :type database_name: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str :param table_name: Required. SQL DB table name. @@ -2458,6 +2888,10 @@ class SqlDBTableDataSetMapping(DataSetMapping): 'type': {'readonly': True}, 'kind': {'required': True}, 'database_name': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, + 'provisioning_state': {'readonly': True}, + 'schema_name': {'required': True}, 'sql_server_resource_id': {'required': True}, 'table_name': {'required': True}, } @@ -2470,22 +2904,26 @@ class SqlDBTableDataSetMapping(DataSetMapping): 'database_name': {'key': 'properties.databaseName', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } - def __init__(self, *, database_name: str, sql_server_resource_id: str, table_name: str, data_set_id: str=None, data_set_mapping_status=None, **kwargs) -> None: + def __init__(self, *, database_name: str, data_set_id: str, schema_name: str, sql_server_resource_id: str, table_name: str, **kwargs) -> None: super(SqlDBTableDataSetMapping, self).__init__(**kwargs) self.database_name = database_name self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None + self.provisioning_state = None + self.schema_name = schema_name self.sql_server_resource_id = sql_server_resource_id self.table_name = table_name self.kind = 'SqlDBTable' class SqlDWTableDataSet(DataSet): - """A SQL DW table dataset. + """A SQL DW table data set. Variables are only populated by the server, and will be ignored when sending a request. @@ -2502,11 +2940,14 @@ class SqlDWTableDataSet(DataSet): :type kind: str :ivar data_set_id: Unique id for identifying a data set resource :vartype data_set_id: str - :param data_warehouse_name: DataWarehouse name of the source data set + :param data_warehouse_name: Required. DataWarehouse name of the source + data set :type data_warehouse_name: str - :param sql_server_resource_id: Resource id of SQL server + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str + :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str - :param table_name: SQL DW table name. + :param table_name: Required. SQL DW table name. :type table_name: str """ @@ -2516,6 +2957,10 @@ class SqlDWTableDataSet(DataSet): 'type': {'readonly': True}, 'kind': {'required': True}, 'data_set_id': {'readonly': True}, + 'data_warehouse_name': {'required': True}, + 'schema_name': {'required': True}, + 'sql_server_resource_id': {'required': True}, + 'table_name': {'required': True}, } _attribute_map = { @@ -2525,21 +2970,23 @@ class SqlDWTableDataSet(DataSet): 'kind': {'key': 'kind', 'type': 'str'}, 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_warehouse_name': {'key': 'properties.dataWarehouseName', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } - def __init__(self, *, data_warehouse_name: str=None, sql_server_resource_id: str=None, table_name: str=None, **kwargs) -> None: + def __init__(self, *, data_warehouse_name: str, schema_name: str, sql_server_resource_id: str, table_name: str, **kwargs) -> None: super(SqlDWTableDataSet, self).__init__(**kwargs) self.data_set_id = None self.data_warehouse_name = data_warehouse_name + self.schema_name = schema_name self.sql_server_resource_id = sql_server_resource_id self.table_name = table_name self.kind = 'SqlDWTable' class SqlDWTableDataSetMapping(DataSetMapping): - """A SQL DW Table dataset mapping. + """A SQL DW Table data set mapping. Variables are only populated by the server, and will be ignored when sending a request. @@ -2554,15 +3001,22 @@ class SqlDWTableDataSetMapping(DataSetMapping): :vartype type: str :param kind: Required. Constant filled by server. :type kind: str - :param data_set_id: Gets the id of source dataset. + :param data_set_id: Required. The id of the source data set. :type data_set_id: str - :param data_set_mapping_status: Gets the status of the dataset mapping. + :ivar data_set_mapping_status: Gets the status of the data set mapping. Possible values include: 'Ok', 'Broken' - :type data_set_mapping_status: str or + :vartype data_set_mapping_status: str or ~azure.mgmt.datashare.models.DataSetMappingStatus :param data_warehouse_name: Required. DataWarehouse name of the source data set :type data_warehouse_name: str + :ivar provisioning_state: Provisioning state of the data set mapping. + Possible values include: 'Succeeded', 'Creating', 'Deleting', 'Moving', + 'Failed' + :vartype provisioning_state: str or + ~azure.mgmt.datashare.models.ProvisioningState + :param schema_name: Required. Schema of the table. Default value is dbo. + :type schema_name: str :param sql_server_resource_id: Required. Resource id of SQL server :type sql_server_resource_id: str :param table_name: Required. SQL DW table name. @@ -2574,7 +3028,11 @@ class SqlDWTableDataSetMapping(DataSetMapping): 'name': {'readonly': True}, 'type': {'readonly': True}, 'kind': {'required': True}, + 'data_set_id': {'required': True}, + 'data_set_mapping_status': {'readonly': True}, 'data_warehouse_name': {'required': True}, + 'provisioning_state': {'readonly': True}, + 'schema_name': {'required': True}, 'sql_server_resource_id': {'required': True}, 'table_name': {'required': True}, } @@ -2587,60 +3045,64 @@ class SqlDWTableDataSetMapping(DataSetMapping): 'data_set_id': {'key': 'properties.dataSetId', 'type': 'str'}, 'data_set_mapping_status': {'key': 'properties.dataSetMappingStatus', 'type': 'str'}, 'data_warehouse_name': {'key': 'properties.dataWarehouseName', 'type': 'str'}, + 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, + 'schema_name': {'key': 'properties.schemaName', 'type': 'str'}, 'sql_server_resource_id': {'key': 'properties.sqlServerResourceId', 'type': 'str'}, 'table_name': {'key': 'properties.tableName', 'type': 'str'}, } - def __init__(self, *, data_warehouse_name: str, sql_server_resource_id: str, table_name: str, data_set_id: str=None, data_set_mapping_status=None, **kwargs) -> None: + def __init__(self, *, data_set_id: str, data_warehouse_name: str, schema_name: str, sql_server_resource_id: str, table_name: str, **kwargs) -> None: super(SqlDWTableDataSetMapping, self).__init__(**kwargs) self.data_set_id = data_set_id - self.data_set_mapping_status = data_set_mapping_status + self.data_set_mapping_status = None self.data_warehouse_name = data_warehouse_name + self.provisioning_state = None + self.schema_name = schema_name self.sql_server_resource_id = sql_server_resource_id self.table_name = table_name self.kind = 'SqlDWTable' class SynchronizationDetails(Model): - """Synchronization details at dataset level. + """Synchronization details at data set level. Variables are only populated by the server, and will be ignored when sending a request. - :ivar data_set_id: id of dataSet + :ivar data_set_id: Id of data set :vartype data_set_id: str - :ivar data_set_type: type of DataSet. Possible values include: 'Blob', - 'Container', 'BlobFolder', 'AdlsGen2FileSystem', 'AdlsGen2Folder', - 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'SqlDBTable', - 'SqlDWTable' + :ivar data_set_type: Type of the data set. Possible values include: + 'Blob', 'Container', 'BlobFolder', 'AdlsGen2FileSystem', 'AdlsGen2Folder', + 'AdlsGen2File', 'AdlsGen1Folder', 'AdlsGen1File', 'KustoCluster', + 'KustoDatabase', 'SqlDBTable', 'SqlDWTable' :vartype data_set_type: str or ~azure.mgmt.datashare.models.DataSetType - :ivar duration_ms: duration of dataset level copy + :ivar duration_ms: Duration of data set level copy :vartype duration_ms: int - :ivar end_time: End time of dataset level copy + :ivar end_time: End time of data set level copy :vartype end_time: datetime - :ivar files_read: The number of files read from the source dataset. + :ivar files_read: The number of files read from the source data set :vartype files_read: long - :ivar files_written: The number of files written into the sink dataset. + :ivar files_written: The number of files written into the sink data set :vartype files_written: long - :ivar message: Error Message if any + :ivar message: Error message if any :vartype message: str - :ivar name: name of dataSet + :ivar name: Name of the data set :vartype name: str - :ivar rows_copied: The number of files copied into the sink dataset. + :ivar rows_copied: The number of files copied into the sink data set :vartype rows_copied: long - :ivar rows_read: The number of rows read from the source dataset. + :ivar rows_read: The number of rows read from the source data set. :vartype rows_read: long - :ivar size_read: The size of the data read from the source dataset in - bytes. + :ivar size_read: The size of the data read from the source data set in + bytes :vartype size_read: long - :ivar size_written: The size of the data written into the sink dataset in - bytes. + :ivar size_written: The size of the data written into the sink data set in + bytes :vartype size_written: long - :ivar start_time: start time of dataset level copy + :ivar start_time: Start time of data set level copy :vartype start_time: datetime :ivar status: Raw Status :vartype status: str - :ivar v_core: The vCore units consumed for the dataset synchronization. + :ivar v_core: The vCore units consumed for the data set synchronization :vartype v_core: long """ diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_paged_models.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_paged_models.py index 25b47dc1b6d7..ade4b8a64811 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_paged_models.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/models/_paged_models.py @@ -142,45 +142,45 @@ class ProviderShareSubscriptionPaged(Paged): def __init__(self, *args, **kwargs): super(ProviderShareSubscriptionPaged, self).__init__(*args, **kwargs) -class SourceShareSynchronizationSettingPaged(Paged): +class ShareSubscriptionPaged(Paged): """ - A paging container for iterating over a list of :class:`SourceShareSynchronizationSetting ` object + A paging container for iterating over a list of :class:`ShareSubscription ` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[SourceShareSynchronizationSetting]'} + 'current_page': {'key': 'value', 'type': '[ShareSubscription]'} } def __init__(self, *args, **kwargs): - super(SourceShareSynchronizationSettingPaged, self).__init__(*args, **kwargs) -class ShareSubscriptionSynchronizationPaged(Paged): + super(ShareSubscriptionPaged, self).__init__(*args, **kwargs) +class SourceShareSynchronizationSettingPaged(Paged): """ - A paging container for iterating over a list of :class:`ShareSubscriptionSynchronization ` object + A paging container for iterating over a list of :class:`SourceShareSynchronizationSetting ` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[ShareSubscriptionSynchronization]'} + 'current_page': {'key': 'value', 'type': '[SourceShareSynchronizationSetting]'} } def __init__(self, *args, **kwargs): - super(ShareSubscriptionSynchronizationPaged, self).__init__(*args, **kwargs) -class ShareSubscriptionPaged(Paged): + super(SourceShareSynchronizationSettingPaged, self).__init__(*args, **kwargs) +class ShareSubscriptionSynchronizationPaged(Paged): """ - A paging container for iterating over a list of :class:`ShareSubscription ` object + A paging container for iterating over a list of :class:`ShareSubscriptionSynchronization ` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, - 'current_page': {'key': 'value', 'type': '[ShareSubscription]'} + 'current_page': {'key': 'value', 'type': '[ShareSubscriptionSynchronization]'} } def __init__(self, *args, **kwargs): - super(ShareSubscriptionPaged, self).__init__(*args, **kwargs) + super(ShareSubscriptionSynchronizationPaged, self).__init__(*args, **kwargs) class ConsumerSourceDataSetPaged(Paged): """ A paging container for iterating over a list of :class:`ConsumerSourceDataSet ` object diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_accounts_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_accounts_operations.py index 041eeb9f054d..247999b1aad7 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_accounts_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_accounts_operations.py @@ -26,7 +26,7 @@ class AccountsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_invitations_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_invitations_operations.py index c0c6d93723e7..babbd197d2b7 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_invitations_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_invitations_operations.py @@ -24,7 +24,7 @@ class ConsumerInvitationsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_source_data_sets_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_source_data_sets_operations.py index 2cb4202b633c..e88759d10625 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_source_data_sets_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_consumer_source_data_sets_operations.py @@ -24,7 +24,7 @@ class ConsumerSourceDataSetsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_set_mappings_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_set_mappings_operations.py index bd36b6b4ae08..c8cd5cf2906f 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_set_mappings_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_set_mappings_operations.py @@ -24,7 +24,7 @@ class DataSetMappingsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config @@ -121,8 +121,8 @@ def create( :param share_subscription_name: The name of the share subscription which will hold the data set sink. :type share_subscription_name: str - :param data_set_mapping_name: The Id of the source data set being - mapped. + :param data_set_mapping_name: The name of the data set mapping to be + created. :type data_set_mapping_name: str :param data_set_mapping: Destination data set configuration details. :type data_set_mapping: ~azure.mgmt.datashare.models.DataSetMapping diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_sets_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_sets_operations.py index c2be6f2ce06f..df50352d19d3 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_sets_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_data_sets_operations.py @@ -11,6 +11,8 @@ import uuid from msrest.pipeline import ClientRawResponse +from msrest.polling import LROPoller, NoPolling +from msrestazure.polling.arm_polling import ARMPolling from .. import models @@ -24,7 +26,7 @@ class DataSetsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -34,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config @@ -108,7 +110,7 @@ def get( def create( self, resource_group_name, account_name, share_name, data_set_name, data_set, custom_headers=None, raw=False, **operation_config): - """Adds a new data set to an existing share or updates it if existing. + """Adds a new data set to an existing share. Create a DataSet . @@ -182,30 +184,9 @@ def create( return deserialized create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets/{dataSetName}'} - def delete( - self, resource_group_name, account_name, share_name, data_set_name, custom_headers=None, raw=False, **operation_config): - """Delete DataSet in a share. - - Delete a DataSet in a share. - :param resource_group_name: The resource group name. - :type resource_group_name: str - :param account_name: The name of the share account. - :type account_name: str - :param share_name: The name of the share. - :type share_name: str - :param data_set_name: The name of the dataSet. - :type data_set_name: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse - :raises: - :class:`DataShareErrorException` - """ + def _delete_initial( + self, resource_group_name, account_name, share_name, data_set_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.delete.metadata['url'] path_format_arguments = { @@ -234,12 +215,61 @@ def delete( request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) - if response.status_code not in [200, 204]: + if response.status_code not in [200, 202, 204]: raise models.DataShareErrorException(self._deserialize, response) if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + + def delete( + self, resource_group_name, account_name, share_name, data_set_name, custom_headers=None, raw=False, polling=True, **operation_config): + """Delete DataSet in a share. + + Delete a DataSet in a share. + + :param resource_group_name: The resource group name. + :type resource_group_name: str + :param account_name: The name of the share account. + :type account_name: str + :param share_name: The name of the share. + :type share_name: str + :param data_set_name: The name of the dataSet. + :type data_set_name: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: The poller return type is ClientRawResponse, the + direct response alongside the deserialized response + :param polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :return: An instance of LROPoller that returns None or + ClientRawResponse if raw==True + :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or + ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] + :raises: + :class:`DataShareErrorException` + """ + raw_result = self._delete_initial( + resource_group_name=resource_group_name, + account_name=account_name, + share_name=share_name, + data_set_name=data_set_name, + custom_headers=custom_headers, + raw=True, + **operation_config + ) + + def get_long_running_output(response): + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + + lro_delay = operation_config.get( + 'long_running_operation_timeout', + self.config.long_running_operation_timeout) + if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/dataSets/{dataSetName}'} def list_by_share( diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_invitations_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_invitations_operations.py index ad724a72d10e..19d8707745e6 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_invitations_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_invitations_operations.py @@ -24,7 +24,7 @@ class InvitationsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_operations.py index 6e1c462d5f96..0adc6b8c25e2 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_operations.py @@ -24,7 +24,7 @@ class Operations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -34,7 +34,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_provider_share_subscriptions_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_provider_share_subscriptions_operations.py index 974f567e0f9b..e7ee3aec5a69 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_provider_share_subscriptions_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_provider_share_subscriptions_operations.py @@ -26,7 +26,7 @@ class ProviderShareSubscriptionsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_share_subscriptions_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_share_subscriptions_operations.py index 20d62b7af51b..c78cc7dab0c3 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_share_subscriptions_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_share_subscriptions_operations.py @@ -26,7 +26,7 @@ class ShareSubscriptionsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config @@ -106,7 +106,7 @@ def get( get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}'} def create( - self, resource_group_name, account_name, share_subscription_name, invitation_id, custom_headers=None, raw=False, **operation_config): + self, resource_group_name, account_name, share_subscription_name, invitation_id, source_share_location, custom_headers=None, raw=False, **operation_config): """Create shareSubscription in an account. Create a shareSubscription in an account. @@ -119,6 +119,8 @@ def create( :type share_subscription_name: str :param invitation_id: The invitation id. :type invitation_id: str + :param source_share_location: Source share location. + :type source_share_location: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response @@ -130,7 +132,7 @@ def create( :raises: :class:`DataShareErrorException` """ - share_subscription = models.ShareSubscription(invitation_id=invitation_id) + share_subscription = models.ShareSubscription(invitation_id=invitation_id, source_share_location=source_share_location) # Construct URL url = self.create.metadata['url'] @@ -278,6 +280,83 @@ def get_long_running_output(response): return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}'} + def list_by_account( + self, resource_group_name, account_name, skip_token=None, custom_headers=None, raw=False, **operation_config): + """List of available share subscriptions under an account. + + List share subscriptions in an account. + + :param resource_group_name: The resource group name. + :type resource_group_name: str + :param account_name: The name of the share account. + :type account_name: str + :param skip_token: Continuation Token + :type skip_token: str + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: An iterator like instance of ShareSubscription + :rtype: + ~azure.mgmt.datashare.models.ShareSubscriptionPaged[~azure.mgmt.datashare.models.ShareSubscription] + :raises: + :class:`DataShareErrorException` + """ + def prepare_request(next_link=None): + if not next_link: + # Construct URL + url = self.list_by_account.metadata['url'] + path_format_arguments = { + 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), + 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), + 'accountName': self._serialize.url("account_name", account_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') + if skip_token is not None: + query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str') + + else: + url = next_link + query_parameters = {} + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if self.config.generate_client_request_id: + header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) + if custom_headers: + header_parameters.update(custom_headers) + if self.config.accept_language is not None: + header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + return request + + def internal_paging(next_link=None): + request = prepare_request(next_link) + + response = self._client.send(request, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.DataShareErrorException(self._deserialize, response) + + return response + + # Deserialize response + header_dict = None + if raw: + header_dict = {} + deserialized = models.ShareSubscriptionPaged(internal_paging, self._deserialize.dependencies, header_dict) + + return deserialized + list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions'} + def list_source_share_synchronization_settings( self, resource_group_name, account_name, share_subscription_name, skip_token=None, custom_headers=None, raw=False, **operation_config): """Get source share synchronization settings for a shareSubscription. @@ -749,80 +828,3 @@ def get_long_running_output(response): else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) cancel_synchronization.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/cancelSynchronization'} - - def list_by_account( - self, resource_group_name, account_name, skip_token=None, custom_headers=None, raw=False, **operation_config): - """List of available share subscriptions under an account. - - List share subscriptions in an account. - - :param resource_group_name: The resource group name. - :type resource_group_name: str - :param account_name: The name of the share account. - :type account_name: str - :param skip_token: Continuation Token - :type skip_token: str - :param dict custom_headers: headers that will be added to the request - :param bool raw: returns the direct response alongside the - deserialized response - :param operation_config: :ref:`Operation configuration - overrides`. - :return: An iterator like instance of ShareSubscription - :rtype: - ~azure.mgmt.datashare.models.ShareSubscriptionPaged[~azure.mgmt.datashare.models.ShareSubscription] - :raises: - :class:`DataShareErrorException` - """ - def prepare_request(next_link=None): - if not next_link: - # Construct URL - url = self.list_by_account.metadata['url'] - path_format_arguments = { - 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), - 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), - 'accountName': self._serialize.url("account_name", account_name, 'str') - } - url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} - query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') - if skip_token is not None: - query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str') - - else: - url = next_link - query_parameters = {} - - # Construct headers - header_parameters = {} - header_parameters['Accept'] = 'application/json' - if self.config.generate_client_request_id: - header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) - if custom_headers: - header_parameters.update(custom_headers) - if self.config.accept_language is not None: - header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') - - # Construct and send request - request = self._client.get(url, query_parameters, header_parameters) - return request - - def internal_paging(next_link=None): - request = prepare_request(next_link) - - response = self._client.send(request, stream=False, **operation_config) - - if response.status_code not in [200]: - raise models.DataShareErrorException(self._deserialize, response) - - return response - - # Deserialize response - header_dict = None - if raw: - header_dict = {} - deserialized = models.ShareSubscriptionPaged(internal_paging, self._deserialize.dependencies, header_dict) - - return deserialized - list_by_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions'} diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_shares_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_shares_operations.py index dd320f8c8334..4d00b63fb7ad 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_shares_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_shares_operations.py @@ -26,7 +26,7 @@ class SharesOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_synchronization_settings_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_synchronization_settings_operations.py index d7ebbb9438d4..460a240b6bd5 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_synchronization_settings_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_synchronization_settings_operations.py @@ -26,7 +26,7 @@ class SynchronizationSettingsOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config @@ -111,8 +111,7 @@ def get( def create( self, resource_group_name, account_name, share_name, synchronization_setting_name, synchronization_setting, custom_headers=None, raw=False, **operation_config): - """Adds a new synchronization setting to an existing share or updates it - if existing. + """Adds a new synchronization setting to an existing share. Create or update a synchronizationSetting . diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_triggers_operations.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_triggers_operations.py index 1c0b64e563e0..f9e8118cb828 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_triggers_operations.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/operations/_triggers_operations.py @@ -26,7 +26,7 @@ class TriggersOperations(object): :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. - :ivar api_version: The api version to use. Constant value: "2018-11-01-preview". + :ivar api_version: The api version to use. Constant value: "2019-11-01". """ models = models @@ -36,7 +36,7 @@ def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer - self.api_version = "2018-11-01-preview" + self.api_version = "2019-11-01" self.config = config diff --git a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/version.py b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/version.py index 3266861fd6b3..c995f7836cef 100644 --- a/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/version.py +++ b/sdk/datashare/azure-mgmt-datashare/azure/mgmt/datashare/version.py @@ -9,4 +9,4 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "0.1.0rc1" +VERSION = "0.2.0" diff --git a/sdk/datashare/azure-mgmt-datashare/setup.py b/sdk/datashare/azure-mgmt-datashare/setup.py index fc81c7556c2e..2b1786cda2b6 100644 --- a/sdk/datashare/azure-mgmt-datashare/setup.py +++ b/sdk/datashare/azure-mgmt-datashare/setup.py @@ -36,7 +36,9 @@ pass # Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd: +with open(os.path.join(package_folder_path, 'version.py') + if os.path.exists(os.path.join(package_folder_path, 'version.py')) + else os.path.join(package_folder_path, '_version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) @@ -67,6 +69,7 @@ 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', ], zip_safe=False, From 197c0c57178daf9decebb5ca8ba7dd6987ea6bfe Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Wed, 20 May 2020 08:39:47 -0700 Subject: [PATCH 19/28] update tests (#11534) --- .../azure-ai-formrecognizer/tests/test_receipt.py | 3 +-- .../azure-ai-formrecognizer/tests/test_receipt_async.py | 3 +-- .../azure-ai-formrecognizer/tests/test_receipt_from_url.py | 3 +-- .../tests/test_receipt_from_url_async.py | 3 +-- 4 files changed, 4 insertions(+), 8 deletions(-) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py index 5273ddb654bc..84e8f5131a53 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt.py @@ -344,8 +344,7 @@ def test_receipt_multipage(self, resource_group, location, form_recognizer_accou self.assertEqual(receipt.merchant_name.value, 'Bilbo Baggins') self.assertEqual(receipt.merchant_phone_number.value, '+15555555555') self.assertEqual(receipt.subtotal.value, 300.0) - # TODO: revert after service side fix - self.assertIsNotNone(receipt.total.value) + self.assertEqual(receipt.total.value, 100.0) self.assertEqual(receipt.page_range.first_page, 1) self.assertEqual(receipt.page_range.last_page, 1) self.assertFormPagesHasValues(receipt.pages) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py index f66a582a1ef9..58dd1c85c8f4 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_async.py @@ -338,8 +338,7 @@ async def test_receipt_multipage(self, resource_group, location, form_recognizer self.assertEqual(receipt.merchant_name.value, 'Bilbo Baggins') self.assertEqual(receipt.merchant_phone_number.value, '+15555555555') self.assertEqual(receipt.subtotal.value, 300.0) - # TODO: revert after service side fix - self.assertIsNotNone(receipt.total.value) + self.assertEqual(receipt.total.value, 100.0) self.assertEqual(receipt.page_range.first_page, 1) self.assertEqual(receipt.page_range.last_page, 1) self.assertFormPagesHasValues(receipt.pages) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py index 241715bd34ad..d8cbed2bce51 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url.py @@ -263,8 +263,7 @@ def test_receipt_multipage_url(self, resource_group, location, form_recognizer_a self.assertEqual(receipt.merchant_name.value, 'Bilbo Baggins') self.assertEqual(receipt.merchant_phone_number.value, '+15555555555') self.assertEqual(receipt.subtotal.value, 300.0) - # TODO: revert after service side fix - self.assertIsNotNone(receipt.total.value) + self.assertEqual(receipt.total.value, 100.0) self.assertEqual(receipt.page_range.first_page, 1) self.assertEqual(receipt.page_range.last_page, 1) self.assertFormPagesHasValues(receipt.pages) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py index 69530227aa9e..022573333ecc 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_receipt_from_url_async.py @@ -269,8 +269,7 @@ async def test_receipt_multipage_url(self, resource_group, location, form_recogn self.assertEqual(receipt.merchant_name.value, 'Bilbo Baggins') self.assertEqual(receipt.merchant_phone_number.value, '+15555555555') self.assertEqual(receipt.subtotal.value, 300.0) - # TODO: revert after service side fix - self.assertIsNotNone(receipt.total.value) + self.assertEqual(receipt.total.value, 100.0) self.assertEqual(receipt.page_range.first_page, 1) self.assertEqual(receipt.page_range.last_page, 1) self.assertFormPagesHasValues(receipt.pages) From 1ff82495248fd865967860ad2e8c1647713d8f9d Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 20 May 2020 08:52:44 -0700 Subject: [PATCH 20/28] Prevent Key Vault test failure due to operation timing (#11552) --- .../tests/test_certificates_client.py | 14 ++++++++------ .../tests/test_certificates_client_async.py | 14 ++++++++------ 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client.py b/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client.py index e87772695944..061911fbdd8c 100644 --- a/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client.py +++ b/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client.py @@ -208,13 +208,15 @@ def test_crud_operations(self, client, **kwargs): cert = client.get_certificate(certificate_name=cert_name) self._validate_certificate_bundle(cert=cert, cert_name=cert_name, cert_policy=cert_policy) - # update certificate + # update certificate, ensuring the new updated_on value is at least one second later than the original + if self.is_live: + time.sleep(1) tags = {"tag1": "updated_value1"} - cert_bundle = client.update_certificate_properties(cert_name, tags=tags) - self._validate_certificate_bundle(cert=cert_bundle, cert_name=cert_name, cert_policy=cert_policy) - self.assertEqual(tags, cert_bundle.properties.tags) - self.assertEqual(cert.id, cert_bundle.id) - self.assertNotEqual(cert.properties.updated_on, cert_bundle.properties.updated_on) + updated_cert = client.update_certificate_properties(cert_name, tags=tags) + self._validate_certificate_bundle(cert=updated_cert, cert_name=cert_name, cert_policy=cert_policy) + self.assertEqual(tags, updated_cert.properties.tags) + self.assertEqual(cert.id, updated_cert.id) + self.assertNotEqual(cert.properties.updated_on, updated_cert.properties.updated_on) # delete certificate delete_cert_poller = client.begin_delete_certificate(cert_name) diff --git a/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client_async.py b/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client_async.py index 92512130ad06..4b074f5fa8a0 100644 --- a/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client_async.py +++ b/sdk/keyvault/azure-keyvault-certificates/tests/test_certificates_client_async.py @@ -205,13 +205,15 @@ async def test_crud_operations(self, client, **kwargs): cert = await client.get_certificate(certificate_name=cert_name) self._validate_certificate_bundle(cert=cert, cert_name=cert_name, cert_policy=cert_policy) - # update certificate + # update certificate, ensuring the new updated_on value is at least one second later than the original + if self.is_live: + await asyncio.sleep(1) tags = {"tag1": "updated_value1"} - cert_bundle = await client.update_certificate_properties(cert_name, tags=tags) - self._validate_certificate_bundle(cert=cert_bundle, cert_name=cert_name, cert_policy=cert_policy) - self.assertEqual(tags, cert_bundle.properties.tags) - self.assertEqual(cert.id, cert_bundle.id) - self.assertNotEqual(cert.properties.updated_on, cert_bundle.properties.updated_on) + updated_cert = await client.update_certificate_properties(cert_name, tags=tags) + self._validate_certificate_bundle(cert=updated_cert, cert_name=cert_name, cert_policy=cert_policy) + self.assertEqual(tags, updated_cert.properties.tags) + self.assertEqual(cert.id, updated_cert.id) + self.assertNotEqual(cert.properties.updated_on, updated_cert.properties.updated_on) # delete certificate deleted_cert_bundle = await client.delete_certificate(certificate_name=cert_name) From 1e941657d1ad8c2ba3d9058820aef04ee2ef6834 Mon Sep 17 00:00:00 2001 From: Scott Beddall <45376673+scbedd@users.noreply.github.com> Date: Wed, 20 May 2020 10:47:31 -0700 Subject: [PATCH 21/28] Reduce Build Matrix (#11539) * reduce build matrix. remove 3.6 and 3.7. move pylint and mypy into analyze step --- .../templates/jobs/archetype-sdk-client.yml | 51 +------------------ eng/pipelines/templates/steps/analyze.yml | 17 ++++++- .../dev_requirements.txt | 1 - 3 files changed, 17 insertions(+), 52 deletions(-) diff --git a/eng/pipelines/templates/jobs/archetype-sdk-client.yml b/eng/pipelines/templates/jobs/archetype-sdk-client.yml index bc102d47af74..cee5e7406355 100644 --- a/eng/pipelines/templates/jobs/archetype-sdk-client.yml +++ b/eng/pipelines/templates/jobs/archetype-sdk-client.yml @@ -20,18 +20,6 @@ parameters: PythonVersion: '3.5' CoverageArg: '' RunForPR: false - Linux_Python36: - OSName: 'Linux' - OSVmImage: 'ubuntu-18.04' - PythonVersion: '3.6' - CoverageArg: '' - RunForPR: false - Linux_Python37: - OSName: 'Linux' - OSVmImage: 'ubuntu-18.04' - PythonVersion: '3.7' - CoverageArg: '' - RunForPR: false Linux_Python38: OSName: 'Linux' OSVmImage: 'ubuntu-18.04' @@ -90,6 +78,7 @@ jobs: parameters: ServiceDirectory: ${{ parameters.ServiceDirectory }} BuildTargetingString: ${{ parameters.BuildTargetingString }} + TestMarkArgument: ${{ parameters.TestMarkArgument }} - job: 'Test' condition: and(succeededOrFailed(), ne(variables['Skip.Test'], 'true')) @@ -146,44 +135,6 @@ jobs: ServiceDirectory: ${{ parameters.ServiceDirectory }} BuildTargetingString: ${{ parameters.BuildTargetingString }} - - job: 'RunMyPy' - condition: and(succeededOrFailed(), ne(variables['Skip.MyPy'], 'true')) - displayName: 'Run MyPy' - variables: - - template: ../variables/globals.yml - - dependsOn: - - 'Build' - - pool: - vmImage: 'ubuntu-18.04' - - steps: - - template: ../steps/run_mypy.yml - parameters: - ServiceDirectory: ${{ parameters.ServiceDirectory }} - BuildTargetingString: ${{ parameters.BuildTargetingString }} - TestMarkArgument: ${{ parameters.TestMarkArgument }} - - - job: 'RunPyLint' - condition: and(succeededOrFailed(), ne(variables['Skip.Pylint'], 'true')) - displayName: 'Run Pylint' - variables: - - template: ../variables/globals.yml - - dependsOn: - - 'Build' - - pool: - vmImage: 'ubuntu-18.04' - - steps: - - template: ../steps/run_pylint.yml - parameters: - ServiceDirectory: ${{ parameters.ServiceDirectory }} - BuildTargetingString: ${{ parameters.BuildTargetingString }} - TestMarkArgument: ${{ parameters.TestMarkArgument }} - - job: 'RunRegression' condition: and(succeededOrFailed(), or(eq(variables['Run.Regression'], 'true'), and(eq(variables['Build.Reason'], 'Schedule'), eq(variables['System.TeamProject'],'internal')))) displayName: 'Run Regression' diff --git a/eng/pipelines/templates/steps/analyze.yml b/eng/pipelines/templates/steps/analyze.yml index a6b06bc6aef9..c4e56dc0c2d4 100644 --- a/eng/pipelines/templates/steps/analyze.yml +++ b/eng/pipelines/templates/steps/analyze.yml @@ -1,6 +1,7 @@ parameters: BuildTargetingString: 'azure-*' ServiceDirectory: '' + TestMarkArgument: '' steps: - task: UsePythonVersion@0 @@ -69,4 +70,18 @@ steps: condition: ne(variables['Skip.VerifyWhl'],'true') inputs: scriptPath: 'scripts/devops_tasks/setup_execute_tests.py' - arguments: '"${{ parameters.BuildTargetingString }}" --service=${{parameters.ServiceDirectory}} --toxenv=verifywhl' \ No newline at end of file + arguments: '"${{ parameters.BuildTargetingString }}" --service=${{parameters.ServiceDirectory}} --toxenv=verifywhl' + + - ${{if ne(variables['Skip.MyPy'], 'true') }}: + - template: run_mypy.yml + parameters: + ServiceDirectory: ${{ parameters.ServiceDirectory }} + BuildTargetingString: ${{ parameters.BuildTargetingString }} + TestMarkArgument: ${{ parameters.TestMarkArgument }} + + - ${{if ne(variables['Skip.Pylint'], 'true') }}: + - template: run_pylint.yml + parameters: + ServiceDirectory: ${{ parameters.ServiceDirectory }} + BuildTargetingString: ${{ parameters.BuildTargetingString }} + TestMarkArgument: ${{ parameters.TestMarkArgument }} \ No newline at end of file diff --git a/sdk/appconfiguration/azure-appconfiguration/dev_requirements.txt b/sdk/appconfiguration/azure-appconfiguration/dev_requirements.txt index 7b777172dce0..2a41134e0e0d 100644 --- a/sdk/appconfiguration/azure-appconfiguration/dev_requirements.txt +++ b/sdk/appconfiguration/azure-appconfiguration/dev_requirements.txt @@ -5,4 +5,3 @@ aiohttp>=3.0; python_version >= '3.5' aiodns>=2.0; python_version >= '3.5' msrest>=0.6.10 - From f5bc51ac51e935a64c6fc24e8979c35595104a5f Mon Sep 17 00:00:00 2001 From: Charles Lowell Date: Wed, 20 May 2020 10:51:13 -0700 Subject: [PATCH 22/28] Add user authentication API to UsernamePasswordCredential (#11528) --- sdk/identity/azure-identity/CHANGELOG.md | 4 ++ .../identity/_credentials/user_password.py | 61 ++++++------------- sdk/identity/azure-identity/tests/helpers.py | 4 ++ .../tests/test_browser_credential.py | 11 ++-- .../tests/test_device_code_credential.py | 10 +-- .../test_username_password_credential.py | 57 ++++++++++++++--- 6 files changed, 84 insertions(+), 63 deletions(-) diff --git a/sdk/identity/azure-identity/CHANGELOG.md b/sdk/identity/azure-identity/CHANGELOG.md index fbd248f8fd85..7b2bef081a3c 100644 --- a/sdk/identity/azure-identity/CHANGELOG.md +++ b/sdk/identity/azure-identity/CHANGELOG.md @@ -1,6 +1,10 @@ # Release History ## 1.4.0b4 (Unreleased) +- The user authentication API added to `DeviceCodeCredential` and + `InteractiveBrowserCredential` in 1.4.0b3 is available on + `UsernamePasswordCredential` as well. + ([#11449](https://github.com/Azure/azure-sdk-for-python/issues/11449)) - The optional persistent cache for `DeviceCodeCredential` and `InteractiveBrowserCredential` added in 1.4.0b3 is now available on Linux and macOS as well as Windows. diff --git a/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py b/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py index e502b01bca85..2a5edf281a5d 100644 --- a/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py +++ b/sdk/identity/azure-identity/azure/identity/_credentials/user_password.py @@ -2,19 +2,15 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -import time from typing import TYPE_CHECKING -from azure.core.credentials import AccessToken -from azure.core.exceptions import ClientAuthenticationError - -from .._internal import wrap_exceptions, PublicClientCredential +from .._internal import InteractiveCredential, wrap_exceptions if TYPE_CHECKING: from typing import Any -class UsernamePasswordCredential(PublicClientCredential): +class UsernamePasswordCredential(InteractiveCredential): """Authenticates a user with a username and password. In general, Microsoft doesn't recommend this kind of authentication, because it's less secure than other @@ -37,50 +33,29 @@ class UsernamePasswordCredential(PublicClientCredential): defines authorities for other clouds. :keyword str tenant_id: tenant ID or a domain associated with a tenant. If not provided, defaults to the 'organizations' tenant, which supports only Azure Active Directory work or school accounts. + :keyword bool enable_persistent_cache: if True, the credential will store tokens in a persistent cache shared by + other user credentials. Defaults to False. + :keyword bool allow_unencrypted_cache: if True, the credential will fall back to a plaintext cache on platforms + where encryption is unavailable. Default to False. Has no effect when `enable_persistent_cache` is False. """ def __init__(self, client_id, username, password, **kwargs): # type: (str, str, str, Any) -> None + + # The base class will accept an AuthenticationRecord, allowing this credential to authenticate silently the + # first time it's asked for a token. However, we want to ensure this first authentication is not silent, to + # validate the given password. This class therefore doesn't document the authentication_record argument, and we + # discard it here. + kwargs.pop("authentication_record", None) super(UsernamePasswordCredential, self).__init__(client_id=client_id, **kwargs) self._username = username self._password = password @wrap_exceptions - def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument - # type: (*str, **Any) -> AccessToken - """Request an access token for `scopes`. - - .. note:: This method is called by Azure SDK clients. It isn't intended for use in application code. - - :param str scopes: desired scopes for the access token. This method requires at least one scope. - :rtype: :class:`azure.core.credentials.AccessToken` - :raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message`` - attribute gives a reason. Any error response from Azure Active Directory is available as the error's - ``response`` attribute. - """ - if not scopes: - raise ValueError("'get_token' requires at least one scope") - - # MSAL requires scopes be a list - scopes = list(scopes) # type: ignore - now = int(time.time()) - + def _request_token(self, *scopes, **kwargs): + # type: (*str, **Any) -> dict app = self._get_app() - accounts = app.get_accounts(username=self._username) - result = None - for account in accounts: - result = app.acquire_token_silent(scopes, account=account) - if result: - break - - if not result: - # cache miss -> request a new token - with self._adapter: - result = app.acquire_token_by_username_password( - username=self._username, password=self._password, scopes=scopes - ) - - if "access_token" not in result: - raise ClientAuthenticationError(message="authentication failed: {}".format(result.get("error_description"))) - - return AccessToken(result["access_token"], now + int(result["expires_in"])) + with self._adapter: + return app.acquire_token_by_username_password( + username=self._username, password=self._password, scopes=list(scopes) + ) diff --git a/sdk/identity/azure-identity/tests/helpers.py b/sdk/identity/azure-identity/tests/helpers.py index f31998fe2833..994cdbf873bd 100644 --- a/sdk/identity/azure-identity/tests/helpers.py +++ b/sdk/identity/azure-identity/tests/helpers.py @@ -145,6 +145,10 @@ def mock_response(status_code=200, headers=None, json_payload=None): response.text = lambda encoding=None: json.dumps(json_payload) response.headers["content-type"] = "application/json" response.content_type = "application/json" + else: + response.text = lambda encoding=None: "" + response.headers["content-type"] = "text/plain" + response.content_type = "text/plain" return response diff --git a/sdk/identity/azure-identity/tests/test_browser_credential.py b/sdk/identity/azure-identity/tests/test_browser_credential.py index 5dd7ed4fbfee..88fb33e35cd0 100644 --- a/sdk/identity/azure-identity/tests/test_browser_credential.py +++ b/sdk/identity/azure-identity/tests/test_browser_credential.py @@ -83,16 +83,17 @@ def test_authenticate(): ) record = credential.authenticate(scopes=(scope,)) + for auth_record in (record, credential.authentication_record): + assert auth_record.authority == environment + assert auth_record.home_account_id == object_id + "." + home_tenant + assert auth_record.tenant_id == home_tenant + assert auth_record.username == username + # credential should have a cached access token for the scope used in authenticate with patch(WEBBROWSER_OPEN, Mock(side_effect=Exception("credential should authenticate silently"))): token = credential.get_token(scope) assert token.token == access_token - assert record.authority == environment - assert record.home_account_id == object_id + "." + home_tenant - assert record.tenant_id == home_tenant - assert record.username == username - def test_disable_automatic_authentication(): """When configured for strict silent auth, the credential should raise when silent auth fails""" diff --git a/sdk/identity/azure-identity/tests/test_device_code_credential.py b/sdk/identity/azure-identity/tests/test_device_code_credential.py index 2b09a68f58ab..729b3f41143a 100644 --- a/sdk/identity/azure-identity/tests/test_device_code_credential.py +++ b/sdk/identity/azure-identity/tests/test_device_code_credential.py @@ -78,16 +78,16 @@ def test_authenticate(): _cache=TokenCache(), ) record = credential.authenticate(scopes=(scope,)) + for auth_record in (record, credential.authentication_record): + assert auth_record.authority == environment + assert auth_record.home_account_id == object_id + "." + home_tenant + assert auth_record.tenant_id == home_tenant + assert auth_record.username == username # credential should have a cached access token for the scope used in authenticate token = credential.get_token(scope) assert token.token == access_token - assert record.authority == environment - assert record.home_account_id == object_id + "." + home_tenant - assert record.tenant_id == home_tenant - assert record.username == username - def test_disable_automatic_authentication(): """When configured for strict silent auth, the credential should raise when silent auth fails""" diff --git a/sdk/identity/azure-identity/tests/test_username_password_credential.py b/sdk/identity/azure-identity/tests/test_username_password_credential.py index a3e154c89fae..88a04cfaeb62 100644 --- a/sdk/identity/azure-identity/tests/test_username_password_credential.py +++ b/sdk/identity/azure-identity/tests/test_username_password_credential.py @@ -2,7 +2,6 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ -from azure.core.exceptions import ClientAuthenticationError from azure.core.pipeline.policies import SansIOHTTPPolicy from azure.identity import UsernamePasswordCredential from azure.identity._internal.user_agent import USER_AGENT @@ -10,6 +9,7 @@ from helpers import ( build_aad_response, + build_id_token, get_discovery_response, mock_response, Request, @@ -26,7 +26,7 @@ def test_no_scopes(): """The credential should raise when get_token is called with no scopes""" credential = UsernamePasswordCredential("client-id", "username", "password") - with pytest.raises(ClientAuthenticationError): + with pytest.raises(ValueError): credential.get_token() @@ -88,13 +88,50 @@ def test_username_password_credential(): assert token.token == expected_token -def test_cache_persistence(): - """The credential should cache only in memory""" +def test_authenticate(): + client_id = "client-id" + environment = "localhost" + issuer = "https://" + environment + tenant_id = "some-tenant" + authority = issuer + "/" + tenant_id - expected_cache = Mock() - raise_when_called = Mock(side_effect=Exception("credential shouldn't attempt to load a persistent cache")) - with patch.multiple("msal_extensions.token_cache", WindowsTokenCache=raise_when_called): - with patch("msal.TokenCache", Mock(return_value=expected_cache)): - credential = UsernamePasswordCredential("...", "...", "...") + access_token = "***" + scope = "scope" - assert credential._cache is expected_cache + # mock AAD response with id token + object_id = "object-id" + home_tenant = "home-tenant-id" + username = "me@work.com" + id_token = build_id_token(aud=client_id, iss=issuer, object_id=object_id, tenant_id=home_tenant, username=username) + auth_response = build_aad_response( + uid=object_id, utid=home_tenant, access_token=access_token, refresh_token="**", id_token=id_token + ) + + transport = validating_transport( + requests=[Request(url_substring=issuer)] * 4, + responses=[ + get_discovery_response(authority), # instance discovery + get_discovery_response(authority), # tenant discovery + mock_response(status_code=404), # user realm discovery + mock_response(json_payload=auth_response), # token request following authenticate() + ], + ) + + credential = UsernamePasswordCredential( + username=username, + password="1234", + authority=environment, + client_id=client_id, + tenant_id=tenant_id, + transport=transport, + ) + record = credential.authenticate(scopes=(scope,)) + for auth_record in (record, credential.authentication_record): + assert auth_record.authority == environment + assert auth_record.home_account_id == object_id + "." + home_tenant + assert auth_record.tenant_id == home_tenant + assert auth_record.username == username + + # credential should have a cached access token for the scope passed to authenticate + token = credential.get_token(scope) + assert token.token == access_token From 01e026aea7eb5c914d4459e4f5c96b5939afdf47 Mon Sep 17 00:00:00 2001 From: Scott Beddall <45376673+scbedd@users.noreply.github.com> Date: Wed, 20 May 2020 11:30:25 -0700 Subject: [PATCH 23/28] update pinned versions in autorest_req.txt (#11557) * update pinned versions in autorest_req.txt --- eng/autorest_req.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/eng/autorest_req.txt b/eng/autorest_req.txt index b9f608c81eaa..a407809e9c9a 100644 --- a/eng/autorest_req.txt +++ b/eng/autorest_req.txt @@ -1,8 +1,8 @@ -pytest==5.4.1; python_version >= '3.5' +pytest==5.4.2; python_version >= '3.5' pytest==4.6.9; python_version == '2.7' pytest-cov==2.8.1 -pytest-asyncio==0.10.0; python_version >= '3.5' +pytest-asyncio==0.12.0; python_version >= '3.5' isodate==0.6.0 -msrest==0.6.13 +msrest==0.6.14 aiohttp==3.6.2 -wheel \ No newline at end of file +wheel==0.34.2 \ No newline at end of file From 2e0b286721a882fc13050bd103cccbecfa99e4b1 Mon Sep 17 00:00:00 2001 From: annatisch Date: Wed, 20 May 2020 13:45:13 -0700 Subject: [PATCH 24/28] Updated changelog (#11554) --- sdk/core/azure-core/CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sdk/core/azure-core/CHANGELOG.md b/sdk/core/azure-core/CHANGELOG.md index dbd114975faa..553066035cde 100644 --- a/sdk/core/azure-core/CHANGELOG.md +++ b/sdk/core/azure-core/CHANGELOG.md @@ -7,6 +7,10 @@ - Fix AttributeException in StreamDownloadGenerator #11462 +### Features + +- Added support for changesets as part of multipart message support #10485 + ## 1.5.0 (2020-05-04) ### Features From ef37aa684c51e4e6a3d11d66692d3761ab9a4e4a Mon Sep 17 00:00:00 2001 From: Azure SDK Bot <53356347+azure-sdk@users.noreply.github.com> Date: Wed, 20 May 2020 14:05:24 -0700 Subject: [PATCH 25/28] Sync eng/common directory with azure-sdk-tools repository (#11562) --- eng/common/TestResources/remove-test-resources.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/eng/common/TestResources/remove-test-resources.yml b/eng/common/TestResources/remove-test-resources.yml index e66933b60cc6..1565e7d4bd0f 100644 --- a/eng/common/TestResources/remove-test-resources.yml +++ b/eng/common/TestResources/remove-test-resources.yml @@ -32,4 +32,5 @@ steps: -Force ` -Verbose displayName: Remove test resources + condition: and(ne(variables['AZURE_RESOURCEGROUP_NAME'], ''), succeededOrFailed()) continueOnError: true From 0667e822bee50c3e727611cacaa74d2223e1d268 Mon Sep 17 00:00:00 2001 From: Scott Beddall <45376673+scbedd@users.noreply.github.com> Date: Wed, 20 May 2020 14:56:42 -0700 Subject: [PATCH 26/28] Artifact Powered Docs.MS Release (#11395) * abstracting staging of filtered artifacts * adding docs.ms release steps --- .../templates/steps/docs-metadata-release.yml | 2 +- .../stages/archetype-python-release.yml | 85 ++++++++++++++----- .../templates/stages/archetype-sdk-client.yml | 7 +- .../steps/stage-filtered-artifacts.yml | 14 +++ sdk/template/azure-template/CHANGELOG.md | 7 ++ .../azure-template/azure/template/_version.py | 2 +- 6 files changed, 90 insertions(+), 27 deletions(-) create mode 100644 eng/pipelines/templates/steps/stage-filtered-artifacts.yml diff --git a/eng/common/pipelines/templates/steps/docs-metadata-release.yml b/eng/common/pipelines/templates/steps/docs-metadata-release.yml index 89d12d4ac1d1..fc9afd1417ce 100644 --- a/eng/common/pipelines/templates/steps/docs-metadata-release.yml +++ b/eng/common/pipelines/templates/steps/docs-metadata-release.yml @@ -55,4 +55,4 @@ steps: PRTitle: "Docs.MS Readme Update." BaseBranchName: smoke-test WorkingDirectory: ${{parameters.WorkingDirectory}}/repo - ScriptDirectory: ${{parameters.WorkingDirectory}}/${{parameters.ScriptDirectory}} + ScriptDirectory: ${{parameters.WorkingDirectory}}/${{parameters.ScriptDirectory}} \ No newline at end of file diff --git a/eng/pipelines/templates/stages/archetype-python-release.yml b/eng/pipelines/templates/stages/archetype-python-release.yml index 0f26e8b47bd9..33262577352a 100644 --- a/eng/pipelines/templates/stages/archetype-python-release.yml +++ b/eng/pipelines/templates/stages/archetype-python-release.yml @@ -4,6 +4,8 @@ parameters: DependsOn: Build DocArtifact: 'documentation' DevFeedName: public/azure-sdk-for-python + TargetDocRepoOwner: '' + TargetDocRepoName: '' stages: - ${{if and(eq(variables['Build.Reason'], 'Manual'), eq(variables['System.TeamProject'], 'internal'))}}: @@ -26,15 +28,15 @@ stages: deploy: steps: - checkout: self + - template: /eng/pipelines/templates/steps/stage-filtered-artifacts.yml + parameters: + SourceFolder: ${{parameters.ArtifactName}} + TargetFolder: ${{artifact.safeName}} + PackageName: ${{artifact.name}} - pwsh: | - Get-ChildItem $(Pipeline.Workspace)/${{parameters.ArtifactName}} - New-Item -Type Directory -Name ${{artifact.safeName}} -Path $(Pipeline.Workspace) - $underscorePrefix = "${{artifact.name}}" - $dashPrefix = "${{artifact.name}}".Replace("_", "-") - Copy-Item $(Pipeline.Workspace)/${{parameters.ArtifactName}}/$dashPrefix-[0-9]*.[0-9]*.[0-9]* $(Pipeline.Workspace)/${{artifact.safeName}} - Copy-Item $(Pipeline.Workspace)/${{parameters.ArtifactName}}/$underscorePrefix-[0-9]*.[0-9]*.[0-9]* $(Pipeline.Workspace)/${{artifact.safeName}} - Get-ChildItem $(Pipeline.Workspace)/${{artifact.safeName}} - displayName: Stage artifacts + Get-ChildItem -Recurse $(Pipeline.Workspace)/${{artifact.safeName}} + workingDirectory: $(Pipeline.Workspace) + displayName: Output Visible Artifacts - template: /eng/common/pipelines/templates/steps/create-tags-and-git-release.yml parameters: ArtifactLocation: $(Pipeline.Workspace)/${{artifact.safeName}} @@ -62,14 +64,11 @@ stages: artifact: ${{parameters.ArtifactName}} timeoutInMinutes: 5 - - pwsh: | - Get-ChildItem $(Pipeline.Workspace)/${{parameters.ArtifactName}} - New-Item -Type Directory -Name ${{artifact.safeName}} -Path $(Pipeline.Workspace) - $underscorePrefix = "${{artifact.name}}" - $dashPrefix = "${{artifact.name}}".Replace("_", "-") - Copy-Item $(Pipeline.Workspace)/${{parameters.ArtifactName}}/$dashPrefix-[0-9]*.[0-9]*.[0-9]* $(Pipeline.Workspace)/${{artifact.safeName}} - Copy-Item $(Pipeline.Workspace)/${{parameters.ArtifactName}}/$underscorePrefix-[0-9]*.[0-9]*.[0-9]* $(Pipeline.Workspace)/${{artifact.safeName}} - Get-ChildItem $(Pipeline.Workspace)/${{artifact.safeName}} + - template: /eng/pipelines/templates/steps/stage-filtered-artifacts.yml + parameters: + SourceFolder: ${{parameters.ArtifactName}} + TargetFolder: ${{artifact.safeName}} + PackageName: ${{artifact.name}} - task: UsePythonVersion@0 @@ -105,7 +104,7 @@ stages: displayName: 'Publish package to feed: ${{parameters.DevFeedName}}' - ${{if ne(artifact.options.skipPublishDocs, 'true')}}: - - deployment: PublishDocs + - deployment: PublishGitHubIODocs displayName: Publish Docs to GitHubIO Blob Storage condition: and(succeeded(), ne(variables['Skip.PublishDocs'], 'true')) environment: githubio @@ -119,12 +118,12 @@ stages: deploy: steps: - checkout: self - - pwsh: | - Get-ChildItem $(Pipeline.Workspace)/${{parameters.DocArtifact}} - New-Item -Type Directory -Name ${{artifact.safeName}} -Path $(Pipeline.Workspace) - $dashPrefix = "${{artifact.name}}".Replace("_", "-") - Copy-Item $(Pipeline.Workspace)/${{parameters.DocArtifact}}/$dashPrefix.zip $(Pipeline.Workspace)/${{artifact.safeName}} - displayName: Stage artifacts + - template: /eng/pipelines/templates/steps/stage-filtered-artifacts.yml + parameters: + SourceFolder: ${{parameters.DocArtifact}} + TargetFolder: ${{artifact.safeName}} + PackageName: ${{artifact.name}} + AdditionalRegex: '.zip' - pwsh: | Get-ChildItem -Recurse $(Pipeline.Workspace)/${{artifact.safeName}} workingDirectory: $(Pipeline.Workspace) @@ -138,6 +137,46 @@ stages: # we override the regular script path because we have cloned the build tools repo as a separate artifact. ScriptPath: 'eng/common/scripts/copy-docs-to-blobstorage.ps1' + - ${{if ne(artifact.options.skipPublishDocs, 'true')}}: + - deployment: PublishDocs + displayName: "Docs.MS Release" + condition: and(succeeded(), ne(variables['Skip.PublishDocs'], 'true')) + environment: githubio + dependsOn: PublishPackage + + pool: + vmImage: ubuntu-18.04 + + strategy: + runOnce: + deploy: + steps: + - checkout: self + - template: /eng/pipelines/templates/steps/stage-filtered-artifacts.yml + parameters: + SourceFolder: ${{parameters.ArtifactName}} + TargetFolder: ${{artifact.safeName}} + PackageName: ${{artifact.name}} + - pwsh: | + Get-ChildItem -Recurse $(Pipeline.Workspace)/${{artifact.safeName}} + workingDirectory: $(Pipeline.Workspace) + displayName: Output Visible Artifacts + - template: /eng/common/pipelines/templates/steps/docs-metadata-release.yml + parameters: + ArtifactLocation: $(Pipeline.Workspace)/${{artifact.safeName}} + PackageRepository: PyPI + ReleaseSha: $(Build.SourceVersion) + RepoId: Azure/azure-sdk-for-python + WorkingDirectory: $(System.DefaultWorkingDirectory) + TargetDocRepo: 'MicrosoftDocs/azure-docs-sdk-python' + TargetDocRepoOwner: ${{parameters.TargetDocRepoOwner}} + TargetDocRepoName: ${{parameters.TargetDocRepoName}} + PRBranchName: 'smoke-test-rdme' + ArtifactName: ${{parameters.ArtifactName}} + Language: 'python' + ServiceDirectory: ${{ parameters.ServiceDirectory }} + DocRepoDestinationPath: 'docs-ref-services/' + - ${{if ne(artifact.options.skipUpdatePackageVersion, 'true')}}: - deployment: UpdatePackageVersion displayName: "Update Package Version" diff --git a/eng/pipelines/templates/stages/archetype-sdk-client.yml b/eng/pipelines/templates/stages/archetype-sdk-client.yml index deee8176614a..aaf3e3809a99 100644 --- a/eng/pipelines/templates/stages/archetype-sdk-client.yml +++ b/eng/pipelines/templates/stages/archetype-sdk-client.yml @@ -4,7 +4,8 @@ parameters: ToxEnvParallel: '--tenvparallel' InjectedPackages: '' BuildDocs: true - + TargetDocRepoOwner: 'MicrosoftDocs' + TargetDocRepoName: 'azure-docs-sdk-python' stages: - stage: Build @@ -24,4 +25,6 @@ stages: ServiceDirectory: ${{parameters.ServiceDirectory}} Artifacts: ${{parameters.Artifacts}} ArtifactName: packages - DocArtifact: documentation \ No newline at end of file + DocArtifact: documentation + TargetDocRepoOwner: ${{parameters.TargetDocRepoOwner}} + TargetDocRepoName: ${{parameters.TargetDocRepoName}} \ No newline at end of file diff --git a/eng/pipelines/templates/steps/stage-filtered-artifacts.yml b/eng/pipelines/templates/steps/stage-filtered-artifacts.yml new file mode 100644 index 000000000000..05a82b966a62 --- /dev/null +++ b/eng/pipelines/templates/steps/stage-filtered-artifacts.yml @@ -0,0 +1,14 @@ +parameters: + SourceFolder: '' # ArtifactName (aka "packages") + TargetFolder: '' # artifact.safename (azuretemplate) + PackageName: '' # artifact.name (azure-template) + AdditionalRegex: '-[0-9]*.[0-9]*.[0-9]*' + +steps: + - pwsh: | + New-Item -Type Directory -Name ${{parameters.TargetFolder}} -Path $(Pipeline.Workspace) + $underscorePrefix = "${{parameters.PackageName}}" + $dashPrefix = "${{parameters.PackageName}}".Replace("_", "-") + Copy-Item $(Pipeline.Workspace)/${{parameters.SourceFolder}}/$dashPrefix${{parameters.AdditionalRegex}} $(Pipeline.Workspace)/${{parameters.TargetFolder}} + Copy-Item $(Pipeline.Workspace)/${{parameters.SourceFolder}}/$underscorePrefix${{parameters.AdditionalRegex}} $(Pipeline.Workspace)/${{parameters.TargetFolder}} -ErrorAction SilentlyContinue + displayName: Stage artifacts diff --git a/sdk/template/azure-template/CHANGELOG.md b/sdk/template/azure-template/CHANGELOG.md index b891c3650b57..05c01874c785 100644 --- a/sdk/template/azure-template/CHANGELOG.md +++ b/sdk/template/azure-template/CHANGELOG.md @@ -1,4 +1,11 @@ # Release History + +## 0.0.6 (2020-05-20) +- Test a successful Release + +## 0.0.5 (2020-05-20) +- Test a successful Release + ## 0.0.3 (Unreleased) ## 0.0.2 (2020-03-24) diff --git a/sdk/template/azure-template/azure/template/_version.py b/sdk/template/azure-template/azure/template/_version.py index a03b1098e2b0..36f525b597c0 100644 --- a/sdk/template/azure-template/azure/template/_version.py +++ b/sdk/template/azure-template/azure/template/_version.py @@ -1,2 +1,2 @@ # matches SEMVER -VERSION = "0.0.3" \ No newline at end of file +VERSION = "0.0.6" \ No newline at end of file From 8f5fb1068b8704695865ca2decf9ebfc223b8472 Mon Sep 17 00:00:00 2001 From: Krista Pratico Date: Wed, 20 May 2020 15:59:30 -0700 Subject: [PATCH 27/28] [formrecognizer] support Copy API (#11372) * add copy feature gen code * add copy implementation (wip) * update copy implementation * add copy model samples * add back get copy auth method and update samples * add CopyAuthorization to init * changes from feedback on design * raise better error messages in polling for failed copying * update readme/changelog * updating sample snippets * renames to align with .net * change copy target to dict * add copy sync/async tests * make mypy happy * update docstrings * review feedback * rename authorize_copy_target -> get_copy_authorization * feedback + add test for copy authorization * fix to testcase * change description in samples * hardcode region * construct resource id in testcase --- .../azure-ai-formrecognizer/CHANGELOG.md | 1 + .../azure-ai-formrecognizer/README.md | 4 + .../formrecognizer/_form_training_client.py | 108 ++++- .../ai/formrecognizer/_generated/__init__.py | 2 +- .../_generated/_configuration.py | 3 +- .../_generated/_form_recognizer_client.py | 2 +- .../ai/formrecognizer/_generated/_version.py | 4 +- .../formrecognizer/_generated/aio/__init__.py | 2 +- .../_generated/aio/_configuration_async.py | 3 +- .../aio/_form_recognizer_client_async.py | 2 +- .../aio/operations_async/__init__.py | 2 +- ...form_recognizer_client_operations_async.py | 250 ++++++++++- .../_generated/models/__init__.py | 14 +- .../models/_form_recognizer_client_enums.py | 2 +- .../_generated/models/_models.py | 144 ++++++- .../_generated/models/_models_py3.py | 160 ++++++- .../_generated/operations/__init__.py | 2 +- .../_form_recognizer_client_operations.py | 254 ++++++++++- .../azure/ai/formrecognizer/_models.py | 6 +- .../azure/ai/formrecognizer/_polling.py | 33 ++ .../aio/_form_training_client_async.py | 111 ++++- .../azure-ai-formrecognizer/samples/README.md | 4 + .../async_samples/sample_copy_model_async.py | 81 ++++ .../samples/sample_copy_model.py | 73 ++++ ...st_copy_model.test_copy_authorization.yaml | 42 ++ .../test_copy_model.test_copy_model_fail.yaml | 361 ++++++++++++++++ ...copy_model.test_copy_model_successful.yaml | 402 ++++++++++++++++++ ..._copy_model.test_copy_model_transform.yaml | 244 +++++++++++ ...y_model_async.test_copy_authorization.yaml | 29 ++ ...copy_model_async.test_copy_model_fail.yaml | 252 +++++++++++ ...odel_async.test_copy_model_successful.yaml | 208 +++++++++ ...model_async.test_copy_model_transform.yaml | 246 +++++++++++ .../tests/test_copy_model.py | 92 ++++ .../tests/test_copy_model_async.py | 87 ++++ .../tests/test_mgmt.py | 4 +- .../tests/test_mgmt_async.py | 4 +- .../azure-ai-formrecognizer/tests/testcase.py | 67 ++- 37 files changed, 3238 insertions(+), 67 deletions(-) create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_copy_model_async.py create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_copy_model.py create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_authorization.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_fail.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_successful.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_transform.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_authorization.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_fail.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_successful.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_transform.yaml create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model.py create mode 100644 sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model_async.py diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md index c0bb755487c9..bc38b349e889 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/CHANGELOG.md @@ -21,6 +21,7 @@ For recognize receipt methods, parameters have been renamed to `receipt` and `re **New features** +- Support to copy a custom model from one Form Recognizer resource to another - Authentication using `azure-identity` credentials now supported - see the [Azure Identity documentation](https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/identity/azure-identity/README.md) for more information diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/README.md index d166e8ee201a..e9ed7bd36344 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/README.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/README.md @@ -131,6 +131,7 @@ See the full details regarding [authentication][cognitive_authentication] of cog - Training custom models to recognize all fields and values found in your custom forms. A `CustomFormModel` is returned indicating the form types the model will recognize, and the fields it will extract for each form type. See the [service's documents][fr-train-without-labels] for a more detailed explanation. - Training custom models to recognize specific fields and values you specify by labeling your custom forms. A `CustomFormModel` is returned indicating the fields the model will extract, as well as the estimated accuracy for each field. See the [service's documents][fr-train-with-labels] for a more detailed explanation. - Managing models created in your account. +- Copying a custom model from one Form Recognizer resource to another. Please note that models can also be trained using a graphical user interface such as the [Form Recognizer Labeling Tool][fr-labeling-tool]. @@ -389,6 +390,7 @@ with Form Recognizer and require Python 3.5 or later. * Train a model without labels: [sample_train_model_without_labels.py][sample_train_model_without_labels] ([async version][sample_train_model_without_labels_async]) * Train a model with labels: [sample_train_model_with_labels.py][sample_train_model_with_labels] ([async version][sample_train_model_with_labels_async]) * Manage custom models: [sample_manage_custom_models.py][sample_manage_custom_models] ([async_version][sample_manage_custom_models_async]) +* Copy a model between Form Recognizer resources: [sample_copy_model.py][sample_copy_model] ([async_version][sample_copy_model_async]) ### Additional documentation @@ -459,3 +461,5 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con [sample_train_model_with_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_with_labels_async.py [sample_train_model_without_labels]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_train_model_without_labels.py [sample_train_model_without_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_without_labels_async.py +[sample_copy_model]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_copy_model.py +[sample_copy_model_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_copy_model_async.py diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py index ffca7b40c457..0905b310bbff 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_form_training_client.py @@ -6,30 +6,39 @@ # pylint: disable=protected-access +import json from typing import ( Optional, Any, Iterable, + Dict, Union, TYPE_CHECKING, ) from azure.core.tracing.decorator import distributed_trace from azure.core.polling import LROPoller from azure.core.polling.base_polling import LROBasePolling -from ._generated.models import Model from ._generated._form_recognizer_client import FormRecognizerClient as FormRecognizer -from ._generated.models import TrainRequest, TrainSourceFilter +from ._generated.models import ( + TrainRequest, + TrainSourceFilter, + CopyRequest, + Model, + CopyOperationResult, + CopyAuthorizationResult +) from ._helpers import error_map, get_authentication_policy, POLLING_INTERVAL from ._models import ( CustomFormModelInfo, AccountProperties, CustomFormModel ) -from ._polling import TrainingPolling +from ._polling import TrainingPolling, CopyPolling from ._user_agent import USER_AGENT from ._form_recognizer_client import FormRecognizerClient if TYPE_CHECKING: from azure.core.credentials import AzureKeyCredential, TokenCredential + from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse PipelineResponseType = HttpResponse @@ -239,6 +248,99 @@ def get_custom_model(self, model_id, **kwargs): response = self._client.get_custom_model(model_id=model_id, include_keys=True, error_map=error_map, **kwargs) return CustomFormModel._from_generated(response) + @distributed_trace + def get_copy_authorization(self, resource_id, resource_region, **kwargs): + # type: (str, str, Any) -> Dict[str, Union[str, int]] + """Generate authorization for copying a custom model into the target Form Recognizer resource. + This should be called by the target resource (where the model will be copied to) + and the output can be passed as the `target` parameter into :func:`~begin_copy_model()`. + + :param str resource_id: Azure Resource Id of the target Form Recognizer resource + where the model will be copied to. + :param str resource_region: Location of the target Form Recognizer resource. A valid Azure + region name supported by Cognitive Services. + :return: A dictionary with values for the copy authorization - + "modelId", "accessToken", "resourceId", "resourceRegion", and "expirationDateTimeTicks". + :rtype: Dict[str, Union[str, int]] + :raises ~azure.core.exceptions.HttpResponseError: + + .. admonition:: Example: + + .. literalinclude:: ../samples/sample_copy_model.py + :start-after: [START get_copy_authorization] + :end-before: [END get_copy_authorization] + :language: python + :dedent: 8 + :caption: Authorize the target resource to receive the copied model + """ + + response = self._client.generate_model_copy_authorization( # type: ignore + cls=lambda pipeline_response, deserialized, response_headers: pipeline_response, + error_map=error_map, + **kwargs + ) # type: PipelineResponse + target = json.loads(response.http_response.text()) + target["resourceId"] = resource_id + target["resourceRegion"] = resource_region + return target + + @distributed_trace + def begin_copy_model( + self, + model_id, # type: str + target, # type: Dict + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Copy a custom model stored in this resource (the source) to the user specified + target Form Recognizer resource. This should be called with the source Form Recognizer resource + (with the model that is intended to be copied). The `target` parameter should be supplied from the + target resource's output from calling the :func:`~get_copy_authorization()` method. + + :param str model_id: Model identifier of the model to copy to target resource. + :param dict target: + The copy authorization generated from the target resource's call to + :func:`~get_copy_authorization()`. + :keyword int polling_interval: Default waiting time between two polls for LRO operations if + no Retry-After header is present. + :return: An instance of an LROPoller. Call `result()` on the poller + object to return a :class:`~azure.ai.formrecognizer.CustomFormModelInfo`. + :rtype: ~azure.core.polling.LROPoller[~azure.ai.formrecognizer.CustomFormModelInfo] + :raises ~azure.core.exceptions.HttpResponseError: + + .. admonition:: Example: + + .. literalinclude:: ../samples/sample_copy_model.py + :start-after: [START begin_copy_model] + :end-before: [END begin_copy_model] + :language: python + :dedent: 8 + :caption: Copy a model from the source resource to the target resource + """ + + polling_interval = kwargs.pop("polling_interval", POLLING_INTERVAL) + + def _copy_callback(raw_response, _, headers): # pylint: disable=unused-argument + copy_result = self._client._deserialize(CopyOperationResult, raw_response) + return CustomFormModelInfo._from_generated(copy_result, target["modelId"]) + + return self._client.begin_copy_custom_model( # type: ignore + model_id=model_id, + copy_request=CopyRequest( + target_resource_id=target["resourceId"], + target_resource_region=target["resourceRegion"], + copy_authorization=CopyAuthorizationResult( + access_token=target["accessToken"], + model_id=target["modelId"], + expiration_date_time_ticks=target["expirationDateTimeTicks"] + ) + ), + cls=kwargs.pop("cls", _copy_callback), + polling=LROBasePolling(timeout=polling_interval, lro_algorithms=[CopyPolling()], **kwargs), + error_map=error_map, + **kwargs + ) + def get_form_recognizer_client(self, **kwargs): # type: (Any) -> FormRecognizerClient """Get an instance of a FormRecognizerClient from FormTrainingClient. diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/__init__.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/__init__.py index cddb554367aa..bbb35174dae1 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/__init__.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_configuration.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_configuration.py index 2a292d8c068a..db0a5e6f5c48 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_configuration.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_configuration.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -46,6 +46,7 @@ def __init__( self.credential = credential self.endpoint = endpoint self.credential_scopes = ['https://cognitiveservices.azure.com/.default'] + self.credential_scopes.extend(kwargs.pop('credential_scopes', [])) kwargs.setdefault('sdk_moniker', 'ai-formrecognizer/{}'.format(VERSION)) self._configure(**kwargs) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_form_recognizer_client.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_form_recognizer_client.py index be5f79b99ff9..ad4fc9939ad0 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_form_recognizer_client.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_form_recognizer_client.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_version.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_version.py index 257ed871fc93..4b19902c2480 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_version.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/_version.py @@ -1,7 +1,7 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- -VERSION = "1.0.0b1" +VERSION = "1.0.0b2" diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/__init__.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/__init__.py index f37952aa7a51..8e5a144b5cf0 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/__init__.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_configuration_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_configuration_async.py index 44cffe657f45..5a4c7f8cb2eb 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_configuration_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_configuration_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -43,6 +43,7 @@ def __init__( self.credential = credential self.endpoint = endpoint self.credential_scopes = ['https://cognitiveservices.azure.com/.default'] + self.credential_scopes.extend(kwargs.pop('credential_scopes', [])) kwargs.setdefault('sdk_moniker', 'ai-formrecognizer/{}'.format(VERSION)) self._configure(**kwargs) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_form_recognizer_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_form_recognizer_client_async.py index 5ede3dadd72c..c084748c0821 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_form_recognizer_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/_form_recognizer_client_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/__init__.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/__init__.py index 8d1731ea865d..6711b796ba93 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/__init__.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/_form_recognizer_client_operations_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/_form_recognizer_client_operations_async.py index cf80151ed32b..64ce71c09954 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/_form_recognizer_client_operations_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/aio/operations_async/_form_recognizer_client_operations_async.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union @@ -40,7 +40,8 @@ async def train_custom_model_async( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -100,7 +101,8 @@ async def get_custom_model( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Model"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_custom_model.metadata['url'] # type: ignore @@ -155,7 +157,8 @@ async def delete_custom_model( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.delete_custom_model.metadata['url'] # type: ignore @@ -194,7 +197,8 @@ async def _analyze_with_custom_model_initial( **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -318,7 +322,8 @@ async def get_analyze_form_result( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_analyze_form_result.metadata['url'] # type: ignore @@ -354,6 +359,211 @@ async def get_analyze_form_result( return deserialized get_analyze_form_result.metadata = {'url': '/custom/models/{modelId}/analyzeResults/{resultId}'} # type: ignore + async def _copy_custom_model_initial( + self, + model_id: str, + copy_request: "models.CopyRequest", + **kwargs + ) -> None: + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self._copy_custom_model_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'modelId': self._serialize.url("model_id", model_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(copy_request, 'CopyRequest') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, None, response_headers) + + _copy_custom_model_initial.metadata = {'url': '/custom/models/{modelId}/copy'} # type: ignore + + @distributed_trace_async + async def copy_custom_model( + self, + model_id: str, + copy_request: "models.CopyRequest", + **kwargs + ) -> None: + """Copy custom model stored in this resource (the source) to user specified target Form Recognizer resource. + + Copy Custom Model. + + :param model_id: Model identifier. + :type model_id: str + :param copy_request: Copy request parameters. + :type copy_request: ~azure.ai.formrecognizer.models.CopyRequest + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: None + :rtype: None + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, AsyncPollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[None] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = await self._copy_custom_model_initial( + model_id=model_id, + copy_request=copy_request, + cls=lambda x,y,z: x, + **kwargs + ) + + def get_long_running_output(pipeline_response): + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: polling_method = AsyncLROBasePolling(lro_delay, **kwargs) + elif polling is False: polling_method = AsyncNoPolling() + else: polling_method = polling + return await async_poller(self._client, raw_result, get_long_running_output, polling_method) + copy_custom_model.metadata = {'url': '/custom/models/{modelId}/copy'} # type: ignore + + @distributed_trace_async + async def get_custom_model_copy_result( + self, + model_id: str, + result_id: str, + **kwargs + ) -> "models.CopyOperationResult": + """Obtain current status and the result of a custom model copy operation. + + Get Custom Model Copy Result. + + :param model_id: Model identifier. + :type model_id: str + :param result_id: Copy operation result identifier. + :type result_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: CopyOperationResult or the result of cls(response) + :rtype: ~azure.ai.formrecognizer.models.CopyOperationResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CopyOperationResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + # Construct URL + url = self.get_custom_model_copy_result.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'modelId': self._serialize.url("model_id", model_id, 'str'), + 'resultId': self._serialize.url("result_id", result_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('CopyOperationResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_custom_model_copy_result.metadata = {'url': '/custom/models/{modelId}/copyResults/{resultId}'} # type: ignore + + @distributed_trace_async + async def generate_model_copy_authorization( + self, + **kwargs + ) -> "models.CopyAuthorizationResult": + """Generate authorization to copy a model into the target Form Recognizer resource. + + Generate Copy Authorization. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: CopyAuthorizationResult or the result of cls(response) + :rtype: ~azure.ai.formrecognizer.models.CopyAuthorizationResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CopyAuthorizationResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + # Construct URL + url = self.generate_model_copy_authorization.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Location']=self._deserialize('str', response.headers.get('Location')) + deserialized = self._deserialize('CopyAuthorizationResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + generate_model_copy_authorization.metadata = {'url': '/custom/models/copyAuthorization'} # type: ignore + async def _analyze_receipt_async_initial( self, include_text_details: Optional[bool] = False, @@ -361,7 +571,8 @@ async def _analyze_receipt_async_initial( **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -477,7 +688,8 @@ async def get_analyze_receipt_result( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_analyze_receipt_result.metadata['url'] # type: ignore @@ -518,7 +730,8 @@ async def _analyze_layout_async_initial( **kwargs ) -> None: cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -628,7 +841,8 @@ async def get_analyze_layout_result( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_analyze_layout_result.metadata['url'] # type: ignore @@ -678,7 +892,8 @@ def list_custom_models( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Models"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) op = "full" def prepare_request(next_link=None): @@ -689,17 +904,17 @@ def prepare_request(next_link=None): 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['op'] = self._serialize.query("op", op, 'str') + else: url = next_link + query_parameters = {} # type: Dict[str, Any] path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['op'] = self._serialize.query("op", op, 'str') - # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' @@ -748,7 +963,8 @@ async def get_custom_models( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Models"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) op = "summary" # Construct URL diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/__init__.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/__init__.py index 5b6db4e8f600..9a57f152e316 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/__init__.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/__init__.py @@ -1,12 +1,16 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- try: from ._models_py3 import AnalyzeOperationResult from ._models_py3 import AnalyzeResult + from ._models_py3 import CopyAuthorizationResult + from ._models_py3 import CopyOperationResult + from ._models_py3 import CopyRequest + from ._models_py3 import CopyResult from ._models_py3 import DataTable from ._models_py3 import DataTableCell from ._models_py3 import DocumentResult @@ -33,6 +37,10 @@ except (SyntaxError, ImportError): from ._models import AnalyzeOperationResult # type: ignore from ._models import AnalyzeResult # type: ignore + from ._models import CopyAuthorizationResult # type: ignore + from ._models import CopyOperationResult # type: ignore + from ._models import CopyRequest # type: ignore + from ._models import CopyResult # type: ignore from ._models import DataTable # type: ignore from ._models import DataTableCell # type: ignore from ._models import DocumentResult # type: ignore @@ -70,6 +78,10 @@ __all__ = [ 'AnalyzeOperationResult', 'AnalyzeResult', + 'CopyAuthorizationResult', + 'CopyOperationResult', + 'CopyRequest', + 'CopyResult', 'DataTable', 'DataTableCell', 'DocumentResult', diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_form_recognizer_client_enums.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_form_recognizer_client_enums.py index 2bc1b585740d..cded7e466b2a 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_form_recognizer_client_enums.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_form_recognizer_client_enums.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models.py index 44ae84dc803a..1f3be66bb217 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -91,6 +91,148 @@ def __init__( self.errors = kwargs.get('errors', None) +class CopyAuthorizationResult(msrest.serialization.Model): + """Request parameter that contains authorization claims for copy operation. + + All required parameters must be populated in order to send to Azure. + + :param model_id: Required. Model identifier. + :type model_id: str + :param access_token: Required. Token claim used to authorize the request. + :type access_token: str + :param expiration_date_time_ticks: Required. The time when the access token expires. The date + is represented as the number of seconds from 1970-01-01T0:0:0Z UTC until the expiration time. + :type expiration_date_time_ticks: long + """ + + _validation = { + 'model_id': {'required': True}, + 'access_token': {'required': True}, + 'expiration_date_time_ticks': {'required': True}, + } + + _attribute_map = { + 'model_id': {'key': 'modelId', 'type': 'str'}, + 'access_token': {'key': 'accessToken', 'type': 'str'}, + 'expiration_date_time_ticks': {'key': 'expirationDateTimeTicks', 'type': 'long'}, + } + + def __init__( + self, + **kwargs + ): + super(CopyAuthorizationResult, self).__init__(**kwargs) + self.model_id = kwargs['model_id'] + self.access_token = kwargs['access_token'] + self.expiration_date_time_ticks = kwargs['expiration_date_time_ticks'] + + +class CopyOperationResult(msrest.serialization.Model): + """Status and result of the queued copy operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. Operation status. Possible values include: "notStarted", "running", + "succeeded", "failed". + :type status: str or ~azure.ai.formrecognizer.models.OperationStatus + :param created_date_time: Required. Date and time (UTC) when the copy operation was submitted. + :type created_date_time: ~datetime.datetime + :param last_updated_date_time: Required. Date and time (UTC) when the status was last updated. + :type last_updated_date_time: ~datetime.datetime + :param copy_result: Results of the copy operation. + :type copy_result: ~azure.ai.formrecognizer.models.CopyResult + """ + + _validation = { + 'status': {'required': True}, + 'created_date_time': {'required': True}, + 'last_updated_date_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'last_updated_date_time': {'key': 'lastUpdatedDateTime', 'type': 'iso-8601'}, + 'copy_result': {'key': 'copyResult', 'type': 'CopyResult'}, + } + + def __init__( + self, + **kwargs + ): + super(CopyOperationResult, self).__init__(**kwargs) + self.status = kwargs['status'] + self.created_date_time = kwargs['created_date_time'] + self.last_updated_date_time = kwargs['last_updated_date_time'] + self.copy_result = kwargs.get('copy_result', None) + + +class CopyRequest(msrest.serialization.Model): + """Request parameter to copy an existing custom model from the source resource to a target resource referenced by the resource ID. + + All required parameters must be populated in order to send to Azure. + + :param target_resource_id: Required. Azure Resource Id of the target Form Recognizer resource + where the model is copied to. + :type target_resource_id: str + :param target_resource_region: Required. Location of the target Azure resource. A valid Azure + region name supported by Cognitive Services. + :type target_resource_region: str + :param copy_authorization: Required. Entity that encodes claims to authorize the copy request. + :type copy_authorization: ~azure.ai.formrecognizer.models.CopyAuthorizationResult + """ + + _validation = { + 'target_resource_id': {'required': True, 'max_length': 1024, 'min_length': 0, 'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.CognitiveServices/accounts/[^/]*$'}, + 'target_resource_region': {'required': True, 'max_length': 24, 'min_length': 1, 'pattern': r'^[a-z0-9]+$'}, + 'copy_authorization': {'required': True}, + } + + _attribute_map = { + 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, + 'target_resource_region': {'key': 'targetResourceRegion', 'type': 'str'}, + 'copy_authorization': {'key': 'copyAuthorization', 'type': 'CopyAuthorizationResult'}, + } + + def __init__( + self, + **kwargs + ): + super(CopyRequest, self).__init__(**kwargs) + self.target_resource_id = kwargs['target_resource_id'] + self.target_resource_region = kwargs['target_resource_region'] + self.copy_authorization = kwargs['copy_authorization'] + + +class CopyResult(msrest.serialization.Model): + """Custom model copy result. + + All required parameters must be populated in order to send to Azure. + + :param model_id: Required. Identifier of the target model. + :type model_id: str + :param errors: Errors returned during the copy operation. + :type errors: list[~azure.ai.formrecognizer.models.ErrorInformation] + """ + + _validation = { + 'model_id': {'required': True}, + } + + _attribute_map = { + 'model_id': {'key': 'modelId', 'type': 'str'}, + 'errors': {'key': 'errors', 'type': '[ErrorInformation]'}, + } + + def __init__( + self, + **kwargs + ): + super(CopyResult, self).__init__(**kwargs) + self.model_id = kwargs['model_id'] + self.errors = kwargs.get('errors', None) + + class DataTable(msrest.serialization.Model): """Information about the extracted table contained in a page. diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models_py3.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models_py3.py index ca0f1f97dce0..6f7110ece583 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models_py3.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/models/_models_py3.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- @@ -107,6 +107,164 @@ def __init__( self.errors = errors +class CopyAuthorizationResult(msrest.serialization.Model): + """Request parameter that contains authorization claims for copy operation. + + All required parameters must be populated in order to send to Azure. + + :param model_id: Required. Model identifier. + :type model_id: str + :param access_token: Required. Token claim used to authorize the request. + :type access_token: str + :param expiration_date_time_ticks: Required. The time when the access token expires. The date + is represented as the number of seconds from 1970-01-01T0:0:0Z UTC until the expiration time. + :type expiration_date_time_ticks: long + """ + + _validation = { + 'model_id': {'required': True}, + 'access_token': {'required': True}, + 'expiration_date_time_ticks': {'required': True}, + } + + _attribute_map = { + 'model_id': {'key': 'modelId', 'type': 'str'}, + 'access_token': {'key': 'accessToken', 'type': 'str'}, + 'expiration_date_time_ticks': {'key': 'expirationDateTimeTicks', 'type': 'long'}, + } + + def __init__( + self, + *, + model_id: str, + access_token: str, + expiration_date_time_ticks: int, + **kwargs + ): + super(CopyAuthorizationResult, self).__init__(**kwargs) + self.model_id = model_id + self.access_token = access_token + self.expiration_date_time_ticks = expiration_date_time_ticks + + +class CopyOperationResult(msrest.serialization.Model): + """Status and result of the queued copy operation. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. Operation status. Possible values include: "notStarted", "running", + "succeeded", "failed". + :type status: str or ~azure.ai.formrecognizer.models.OperationStatus + :param created_date_time: Required. Date and time (UTC) when the copy operation was submitted. + :type created_date_time: ~datetime.datetime + :param last_updated_date_time: Required. Date and time (UTC) when the status was last updated. + :type last_updated_date_time: ~datetime.datetime + :param copy_result: Results of the copy operation. + :type copy_result: ~azure.ai.formrecognizer.models.CopyResult + """ + + _validation = { + 'status': {'required': True}, + 'created_date_time': {'required': True}, + 'last_updated_date_time': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'status', 'type': 'str'}, + 'created_date_time': {'key': 'createdDateTime', 'type': 'iso-8601'}, + 'last_updated_date_time': {'key': 'lastUpdatedDateTime', 'type': 'iso-8601'}, + 'copy_result': {'key': 'copyResult', 'type': 'CopyResult'}, + } + + def __init__( + self, + *, + status: Union[str, "OperationStatus"], + created_date_time: datetime.datetime, + last_updated_date_time: datetime.datetime, + copy_result: Optional["CopyResult"] = None, + **kwargs + ): + super(CopyOperationResult, self).__init__(**kwargs) + self.status = status + self.created_date_time = created_date_time + self.last_updated_date_time = last_updated_date_time + self.copy_result = copy_result + + +class CopyRequest(msrest.serialization.Model): + """Request parameter to copy an existing custom model from the source resource to a target resource referenced by the resource ID. + + All required parameters must be populated in order to send to Azure. + + :param target_resource_id: Required. Azure Resource Id of the target Form Recognizer resource + where the model is copied to. + :type target_resource_id: str + :param target_resource_region: Required. Location of the target Azure resource. A valid Azure + region name supported by Cognitive Services. + :type target_resource_region: str + :param copy_authorization: Required. Entity that encodes claims to authorize the copy request. + :type copy_authorization: ~azure.ai.formrecognizer.models.CopyAuthorizationResult + """ + + _validation = { + 'target_resource_id': {'required': True, 'max_length': 1024, 'min_length': 0, 'pattern': r'^/subscriptions/[^/]*/resourceGroups/[^/]*/providers/Microsoft.CognitiveServices/accounts/[^/]*$'}, + 'target_resource_region': {'required': True, 'max_length': 24, 'min_length': 1, 'pattern': r'^[a-z0-9]+$'}, + 'copy_authorization': {'required': True}, + } + + _attribute_map = { + 'target_resource_id': {'key': 'targetResourceId', 'type': 'str'}, + 'target_resource_region': {'key': 'targetResourceRegion', 'type': 'str'}, + 'copy_authorization': {'key': 'copyAuthorization', 'type': 'CopyAuthorizationResult'}, + } + + def __init__( + self, + *, + target_resource_id: str, + target_resource_region: str, + copy_authorization: "CopyAuthorizationResult", + **kwargs + ): + super(CopyRequest, self).__init__(**kwargs) + self.target_resource_id = target_resource_id + self.target_resource_region = target_resource_region + self.copy_authorization = copy_authorization + + +class CopyResult(msrest.serialization.Model): + """Custom model copy result. + + All required parameters must be populated in order to send to Azure. + + :param model_id: Required. Identifier of the target model. + :type model_id: str + :param errors: Errors returned during the copy operation. + :type errors: list[~azure.ai.formrecognizer.models.ErrorInformation] + """ + + _validation = { + 'model_id': {'required': True}, + } + + _attribute_map = { + 'model_id': {'key': 'modelId', 'type': 'str'}, + 'errors': {'key': 'errors', 'type': '[ErrorInformation]'}, + } + + def __init__( + self, + *, + model_id: str, + errors: Optional[List["ErrorInformation"]] = None, + **kwargs + ): + super(CopyResult, self).__init__(**kwargs) + self.model_id = model_id + self.errors = errors + + class DataTable(msrest.serialization.Model): """Information about the extracted table contained in a page. diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/__init__.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/__init__.py index fb98aab6d184..b0169450f22b 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/__init__.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/__init__.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/_form_recognizer_client_operations.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/_form_recognizer_client_operations.py index 57731e914413..6a93c83008f9 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/_form_recognizer_client_operations.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_generated/operations/_form_recognizer_client_operations.py @@ -1,6 +1,6 @@ # coding=utf-8 # -------------------------------------------------------------------------- -# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6246, generator: {generator}) +# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.0.6282, generator: {generator}) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING @@ -44,7 +44,8 @@ def train_custom_model_async( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -105,7 +106,8 @@ def get_custom_model( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Model"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_custom_model.metadata['url'] # type: ignore @@ -161,7 +163,8 @@ def delete_custom_model( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.delete_custom_model.metadata['url'] # type: ignore @@ -201,7 +204,8 @@ def _analyze_with_custom_model_initial( ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -327,7 +331,8 @@ def get_analyze_form_result( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_analyze_form_result.metadata['url'] # type: ignore @@ -363,6 +368,215 @@ def get_analyze_form_result( return deserialized get_analyze_form_result.metadata = {'url': '/custom/models/{modelId}/analyzeResults/{resultId}'} # type: ignore + def _copy_custom_model_initial( + self, + model_id, # type: str + copy_request, # type: "models.CopyRequest" + **kwargs # type: Any + ): + # type: (...) -> None + cls = kwargs.pop('cls', None) # type: ClsType[None] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + content_type = kwargs.pop("content_type", "application/json") + + # Construct URL + url = self._copy_custom_model_initial.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'modelId': self._serialize.url("model_id", model_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') + + # Construct and send request + body_content_kwargs = {} # type: Dict[str, Any] + body_content = self._serialize.body(copy_request, 'CopyRequest') + body_content_kwargs['content'] = body_content + request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs) + + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [202]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Operation-Location']=self._deserialize('str', response.headers.get('Operation-Location')) + + if cls: + return cls(pipeline_response, None, response_headers) + + _copy_custom_model_initial.metadata = {'url': '/custom/models/{modelId}/copy'} # type: ignore + + @distributed_trace + def begin_copy_custom_model( + self, + model_id, # type: str + copy_request, # type: "models.CopyRequest" + **kwargs # type: Any + ): + # type: (...) -> LROPoller + """Copy custom model stored in this resource (the source) to user specified target Form Recognizer resource. + + Copy Custom Model. + + :param model_id: Model identifier. + :type model_id: str + :param copy_request: Copy request parameters. + :type copy_request: ~azure.ai.formrecognizer.models.CopyRequest + :keyword callable cls: A custom type or function that will be passed the direct response + :keyword polling: True for ARMPolling, False for no polling, or a + polling object for personal polling strategy + :paramtype polling: bool or ~azure.core.polling.PollingMethod + :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. + :return: An instance of LROPoller that returns None + :rtype: ~azure.core.polling.LROPoller[None] + :raises ~azure.core.exceptions.HttpResponseError: + """ + polling = kwargs.pop('polling', False) # type: Union[bool, PollingMethod] + cls = kwargs.pop('cls', None) # type: ClsType[None] + lro_delay = kwargs.pop( + 'polling_interval', + self._config.polling_interval + ) + raw_result = self._copy_custom_model_initial( + model_id=model_id, + copy_request=copy_request, + cls=lambda x,y,z: x, + **kwargs + ) + + def get_long_running_output(pipeline_response): + if cls: + return cls(pipeline_response, None, {}) + + if polling is True: polling_method = LROBasePolling(lro_delay, **kwargs) + elif polling is False: polling_method = NoPolling() + else: polling_method = polling + return LROPoller(self._client, raw_result, get_long_running_output, polling_method) + begin_copy_custom_model.metadata = {'url': '/custom/models/{modelId}/copy'} # type: ignore + + @distributed_trace + def get_custom_model_copy_result( + self, + model_id, # type: str + result_id, # type: str + **kwargs # type: Any + ): + # type: (...) -> "models.CopyOperationResult" + """Obtain current status and the result of a custom model copy operation. + + Get Custom Model Copy Result. + + :param model_id: Model identifier. + :type model_id: str + :param result_id: Copy operation result identifier. + :type result_id: str + :keyword callable cls: A custom type or function that will be passed the direct response + :return: CopyOperationResult or the result of cls(response) + :rtype: ~azure.ai.formrecognizer.models.CopyOperationResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CopyOperationResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + # Construct URL + url = self.get_custom_model_copy_result.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + 'modelId': self._serialize.url("model_id", model_id, 'str'), + 'resultId': self._serialize.url("result_id", result_id, 'str'), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + deserialized = self._deserialize('CopyOperationResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, {}) + + return deserialized + get_custom_model_copy_result.metadata = {'url': '/custom/models/{modelId}/copyResults/{resultId}'} # type: ignore + + @distributed_trace + def generate_model_copy_authorization( + self, + **kwargs # type: Any + ): + # type: (...) -> "models.CopyAuthorizationResult" + """Generate authorization to copy a model into the target Form Recognizer resource. + + Generate Copy Authorization. + + :keyword callable cls: A custom type or function that will be passed the direct response + :return: CopyAuthorizationResult or the result of cls(response) + :rtype: ~azure.ai.formrecognizer.models.CopyAuthorizationResult + :raises: ~azure.core.exceptions.HttpResponseError + """ + cls = kwargs.pop('cls', None) # type: ClsType["models.CopyAuthorizationResult"] + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) + + # Construct URL + url = self.generate_model_copy_authorization.metadata['url'] # type: ignore + path_format_arguments = { + 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + + # Construct headers + header_parameters = {} # type: Dict[str, Any] + header_parameters['Accept'] = 'application/json' + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + error = self._deserialize(models.ErrorResponse, response) + raise HttpResponseError(response=response, model=error) + + response_headers = {} + response_headers['Location']=self._deserialize('str', response.headers.get('Location')) + deserialized = self._deserialize('CopyAuthorizationResult', pipeline_response) + + if cls: + return cls(pipeline_response, deserialized, response_headers) + + return deserialized + generate_model_copy_authorization.metadata = {'url': '/custom/models/copyAuthorization'} # type: ignore + def _analyze_receipt_async_initial( self, include_text_details=False, # type: Optional[bool] @@ -371,7 +585,8 @@ def _analyze_receipt_async_initial( ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -489,7 +704,8 @@ def get_analyze_receipt_result( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_analyze_receipt_result.metadata['url'] # type: ignore @@ -531,7 +747,8 @@ def _analyze_layout_async_initial( ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop("content_type", "application/json") # Construct URL @@ -643,7 +860,8 @@ def get_analyze_layout_result( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.AnalyzeOperationResult"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) # Construct URL url = self.get_analyze_layout_result.metadata['url'] # type: ignore @@ -694,7 +912,8 @@ def list_custom_models( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Models"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) op = "full" def prepare_request(next_link=None): @@ -705,17 +924,17 @@ def prepare_request(next_link=None): 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) + # Construct parameters + query_parameters = {} # type: Dict[str, Any] + query_parameters['op'] = self._serialize.query("op", op, 'str') + else: url = next_link + query_parameters = {} # type: Dict[str, Any] path_format_arguments = { 'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } url = self._client.format_url(url, **path_format_arguments) - - # Construct parameters - query_parameters = {} # type: Dict[str, Any] - query_parameters['op'] = self._serialize.query("op", op, 'str') - # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = 'application/json' @@ -765,7 +984,8 @@ def get_custom_models( :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.Models"] - error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError}) + error_map = {404: ResourceNotFoundError, 409: ResourceExistsError} + error_map.update(kwargs.pop('error_map', {})) op = "summary" # Construct URL diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py index 59e8f2e97c30..3b8bf933a30e 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_models.py @@ -306,7 +306,6 @@ def _from_generated(cls, field, value, read_result): confidence=adjust_confidence(value.confidence) if value else None, ) - @classmethod def _from_generated_unlabeled(cls, field, idx, page, read_result): return cls( @@ -474,6 +473,7 @@ def __repr__(self): self.text, self.bounding_box, repr(self.words), self.page_number )[:1024] + class FormWord(FormContent): """Represents a word recognized from the input document. @@ -866,9 +866,9 @@ def __init__(self, **kwargs): self.last_modified = kwargs.get("last_modified", None) @classmethod - def _from_generated(cls, model): + def _from_generated(cls, model, model_id=None): return cls( - model_id=model.model_id, + model_id=model_id if model_id else model.model_id, status=model.status, created_on=model.created_date_time, last_modified=model.last_updated_date_time diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py index 07717e108c8e..f1ef5267a283 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/_polling.py @@ -92,3 +92,36 @@ def get_status(self, pipeline_response): # pylint: disable=no-self-use if errors: raise_error(response, errors, message="") return status + + +class CopyPolling(OperationResourcePolling): + """Polling method overrides for copy endpoint. + + """ + + def get_status(self, pipeline_response): # pylint: disable=no-self-use + # type: (PipelineResponse) -> str + """Process the latest status update retrieved from an "Operation-Location" header. + Raise errors for issues occurring during the copy model operation. + + :param azure.core.pipeline.PipelineResponse pipeline_response: The response to extract the status. + :raises: BadResponse if response has no body, or body does not contain status. + HttpResponseError if there is an error with the input document. + """ + response = pipeline_response.http_response + if _is_empty(response): + raise BadResponse( + "The response from long running operation does not contain a body." + ) + + body = _as_json(response) + status = body.get("status") + if not status: + raise BadResponse("No status found in body") + if status.lower() == "failed": + copy_result = body.get("copyResult") + if copy_result: + errors = copy_result.get("errors") + if errors: + raise_error(response, errors, message="") + return status diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py index 54ef2982ffdc..d12010ea13e5 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/azure/ai/formrecognizer/aio/_form_training_client_async.py @@ -6,11 +6,13 @@ # pylint: disable=protected-access +import json from typing import ( Optional, Any, - Union, AsyncIterable, + Dict, + Union, TYPE_CHECKING, ) from azure.core.polling import async_poller @@ -19,8 +21,14 @@ from azure.core.tracing.decorator_async import distributed_trace_async from ._form_recognizer_client_async import FormRecognizerClient from .._generated.aio._form_recognizer_client_async import FormRecognizerClient as FormRecognizer -from .._generated.models import TrainRequest, TrainSourceFilter -from .._generated.models import Model +from .._generated.models import ( + TrainRequest, + TrainSourceFilter, + Model, + CopyRequest, + CopyOperationResult, + CopyAuthorizationResult +) from .._helpers import error_map, get_authentication_policy, POLLING_INTERVAL from .._models import ( CustomFormModelInfo, @@ -28,8 +36,9 @@ CustomFormModel ) from .._user_agent import USER_AGENT -from .._polling import TrainingPolling +from .._polling import TrainingPolling, CopyPolling if TYPE_CHECKING: + from azure.core.pipeline import PipelineResponse from azure.core.credentials import AzureKeyCredential from azure.core.credentials_async import AsyncTokenCredential @@ -247,6 +256,100 @@ async def get_custom_model(self, model_id: str, **kwargs: Any) -> CustomFormMode ) return CustomFormModel._from_generated(response) + @distributed_trace_async + async def get_copy_authorization( + self, + resource_id: str, + resource_region: str, + **kwargs: Any + ) -> Dict[str, Union[str, int]]: + """Generate authorization for copying a custom model into the target Form Recognizer resource. + This should be called by the target resource (where the model will be copied to) + and the output can be passed as the `target` parameter into :func:`~copy_model()`. + + :param str resource_id: Azure Resource Id of the target Form Recognizer resource + where the model will be copied to. + :param str resource_region: Location of the target Form Recognizer resource. A valid Azure + region name supported by Cognitive Services. + :return: A dictionary with values for the copy authorization - + "modelId", "accessToken", "resourceId", "resourceRegion", and "expirationDateTimeTicks". + :rtype: Dict[str, Union[str, int]] + :raises ~azure.core.exceptions.HttpResponseError: + + .. admonition:: Example: + + .. literalinclude:: ../samples/async_samples/sample_copy_model_async.py + :start-after: [START get_copy_authorization_async] + :end-before: [END get_copy_authorization_async] + :language: python + :dedent: 8 + :caption: Authorize the target resource to receive the copied model + """ + + response = await self._client.generate_model_copy_authorization( # type: ignore + cls=lambda pipeline_response, deserialized, response_headers: pipeline_response, + error_map=error_map, + **kwargs + ) # type: PipelineResponse + target = json.loads(response.http_response.text()) + target["resourceId"] = resource_id + target["resourceRegion"] = resource_region + return target + + @distributed_trace_async + async def copy_model( + self, + model_id: str, + target: dict, + **kwargs: Any + ) -> CustomFormModelInfo: + """Copy a custom model stored in this resource (the source) to the user specified + target Form Recognizer resource. This should be called with the source Form Recognizer resource + (with the model that is intended to be copied). The `target` parameter should be supplied from the + target resource's output from calling the :func:`~get_copy_authorization()` method. + + :param str model_id: Model identifier of the model to copy to target resource. + :param dict target: + The copy authorization generated from the target resource's call to + :func:`~get_copy_authorization()`. + :keyword int polling_interval: Default waiting time between two polls for LRO operations if + no Retry-After header is present. + :return: CustomFormModelInfo + :rtype: ~azure.ai.formrecognizer.CustomFormModelInfo + :raises ~azure.core.exceptions.HttpResponseError: + + .. admonition:: Example: + + .. literalinclude:: ../samples/async_samples/sample_copy_model_async.py + :start-after: [START copy_model_async] + :end-before: [END copy_model_async] + :language: python + :dedent: 8 + :caption: Copy a model from the source resource to the target resource + """ + polling_interval = kwargs.pop("polling_interval", POLLING_INTERVAL) + + def _copy_callback(raw_response, _, headers): # pylint: disable=unused-argument + copy_result = self._client._deserialize(CopyOperationResult, raw_response) + return CustomFormModelInfo._from_generated(copy_result, target["modelId"]) + + return await self._client.copy_custom_model( # type: ignore + model_id=model_id, + copy_request=CopyRequest( + target_resource_id=target["resourceId"], + target_resource_region=target["resourceRegion"], + copy_authorization=CopyAuthorizationResult( + access_token=target["accessToken"], + model_id=target["modelId"], + expiration_date_time_ticks=target["expirationDateTimeTicks"] + ) + ), + cls=kwargs.pop("cls", _copy_callback), + polling=AsyncLROBasePolling(timeout=polling_interval, lro_algorithms=[CopyPolling()], **kwargs), + error_map=error_map, + **kwargs + ) + def get_form_recognizer_client(self, **kwargs: Any) -> FormRecognizerClient: """Get an instance of a FormRecognizerClient from FormTrainingClient. diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md b/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md index 8bee2aa4d46e..22312586dfbf 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/README.md @@ -27,6 +27,8 @@ All of these samples need the endpoint to your Form Recognizer resource ([instru |[sample_train_model_without_labels.py][sample_train_model_without_labels] and [sample_train_model_without_labels_async.py][sample_train_model_without_labels_async]|Train a custom model with unlabeled data| |[sample_train_model_with_labels.py][sample_train_model_with_labels] and [sample_train_model_with_labels_async.py][sample_train_model_with_labels_async]|Train a custom model with labeled data| |[sample_manage_custom_models.py][sample_manage_custom_models] and [sample_manage_custom_models_async.py][sample_manage_custom_models_async]|Manage the custom models in your account| +|[sample_copy_model.py][sample_copy_model] and [sample_copy_model_async.py][sample_copy_model_async]|Copy a custom model from one Form Recognizer resource to another| + ## Prerequisites * Python 2.7, or 3.5 or later is required to use this package (3.5 or later if using asyncio) @@ -90,3 +92,5 @@ what you can do with the Azure Form Recognizer client library. [sample_train_model_with_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_with_labels_async.py [sample_train_model_without_labels]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_train_model_without_labels.py [sample_train_model_without_labels_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_train_model_without_labels_async.py +[sample_copy_model]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_copy_model.py +[sample_copy_model_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_copy_model_async.py \ No newline at end of file diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_copy_model_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_copy_model_async.py new file mode 100644 index 000000000000..90b0c50ad8b6 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/async_samples/sample_copy_model_async.py @@ -0,0 +1,81 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +""" +FILE: sample_copy_model_async.py + +DESCRIPTION: + This sample demonstrates how to copy a custom model from a source Form Recognizer resource + to a target Form Recognizer resource. + +USAGE: + python sample_copy_model_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_FORM_RECOGNIZER_SOURCE_ENDPOINT - the endpoint to your source Form Recognizer resource. + 2) AZURE_FORM_RECOGNIZER_SOURCE_KEY - your source Form Recognizer API key + 3) AZURE_FORM_RECOGNIZER_TARGET_ENDPOINT - the endpoint to your target Form Recognizer resource. + 4) AZURE_FORM_RECOGNIZER_TARGET_KEY - your target Form Recognizer API key + 5) AZURE_SOURCE_MODEL_ID - the model ID from the source resource to be copied over to the target resource. + 6) AZURE_FORM_RECOGNIZER_TARGET_REGION - the region the target resource was created in + 7) AZURE_FORM_RECOGNIZER_TARGET_RESOURCE_ID - the entire resource ID to the target resource +""" + +import os +import asyncio + + +class CopyModelSampleAsync(object): + + async def copy_model_async(self): + from azure.core.credentials import AzureKeyCredential + from azure.ai.formrecognizer.aio import FormTrainingClient + + source_endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"] + source_key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] + target_endpoint = os.environ["AZURE_FORM_RECOGNIZER_TARGET_ENDPOINT"] + target_key = os.environ["AZURE_FORM_RECOGNIZER_TARGET_KEY"] + source_model_id = os.environ["AZURE_SOURCE_MODEL_ID"] + target_region = os.environ["AZURE_FORM_RECOGNIZER_TARGET_REGION"] + target_resource_id = os.environ["AZURE_FORM_RECOGNIZER_TARGET_RESOURCE_ID"] + + # [START get_copy_authorization_async] + target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key)) + + async with target_client: + target = await target_client.get_copy_authorization( + resource_region=target_region, + resource_id=target_resource_id + ) + # [END get_copy_authorization_async] + + # [START copy_model_async] + source_client = FormTrainingClient(endpoint=source_endpoint, credential=AzureKeyCredential(source_key)) + target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key)) + + async with source_client: + copy = await source_client.copy_model( + model_id=source_model_id, + target=target + ) + + async with target_client: + copied_over_model = await target_client.get_custom_model(copy.model_id) + print("Model ID: {}".format(copied_over_model.model_id)) + print("Status: {}".format(copied_over_model.status)) + # [END copy_model_async] + + +async def main(): + sample = CopyModelSampleAsync() + await sample.copy_model_async() + + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_copy_model.py b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_copy_model.py new file mode 100644 index 000000000000..23901a65e856 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/samples/sample_copy_model.py @@ -0,0 +1,73 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +""" +FILE: sample_copy_model.py + +DESCRIPTION: + This sample demonstrates how to copy a custom model from a source Form Recognizer resource + to a target Form Recognizer resource. + +USAGE: + python sample_copy_model.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_FORM_RECOGNIZER_SOURCE_ENDPOINT - the endpoint to your source Form Recognizer resource. + 2) AZURE_FORM_RECOGNIZER_SOURCE_KEY - your source Form Recognizer API key + 3) AZURE_FORM_RECOGNIZER_TARGET_ENDPOINT - the endpoint to your target Form Recognizer resource. + 4) AZURE_FORM_RECOGNIZER_TARGET_KEY - your target Form Recognizer API key + 5) AZURE_SOURCE_MODEL_ID - the model ID from the source resource to be copied over to the target resource. + 6) AZURE_FORM_RECOGNIZER_TARGET_REGION - the region the target resource was created in + 7) AZURE_FORM_RECOGNIZER_TARGET_RESOURCE_ID - the entire resource ID to the target resource +""" + +import os + + +class CopyModelSample(object): + + def copy_model(self): + from azure.core.credentials import AzureKeyCredential + from azure.ai.formrecognizer import FormTrainingClient + + source_endpoint = os.environ["AZURE_FORM_RECOGNIZER_ENDPOINT"] + source_key = os.environ["AZURE_FORM_RECOGNIZER_KEY"] + target_endpoint = os.environ["AZURE_FORM_RECOGNIZER_TARGET_ENDPOINT"] + target_key = os.environ["AZURE_FORM_RECOGNIZER_TARGET_KEY"] + source_model_id = os.environ["AZURE_SOURCE_MODEL_ID"] + target_region = os.environ["AZURE_FORM_RECOGNIZER_TARGET_REGION"] + target_resource_id = os.environ["AZURE_FORM_RECOGNIZER_TARGET_RESOURCE_ID"] + + # [START get_copy_authorization] + target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key)) + + target = target_client.get_copy_authorization( + resource_region=target_region, + resource_id=target_resource_id + ) + # [END get_copy_authorization] + + # [START begin_copy_model] + source_client = FormTrainingClient(endpoint=source_endpoint, credential=AzureKeyCredential(source_key)) + target_client = FormTrainingClient(endpoint=target_endpoint, credential=AzureKeyCredential(target_key)) + + poller = source_client.begin_copy_model( + model_id=source_model_id, + target=target + ) + copy = poller.result() + + copied_over_model = target_client.get_custom_model(copy.model_id) + print("Model ID: {}".format(copied_over_model.model_id)) + print("Status: {}".format(copied_over_model.status)) + # [END begin_copy_model] + + +if __name__ == '__main__': + sample = CopyModelSample() + sample.copy_model() diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_authorization.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_authorization.yaml new file mode 100644 index 000000000000..956b96dc8b97 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_authorization.yaml @@ -0,0 +1,42 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "3a1af284-1187-402d-9869-39ad09181bf5", "accessToken": + "redacted", "expirationDateTimeTicks": 637255865516657423}' + headers: + apim-request-id: + - be03b750-1177-47fa-9473-028489a9ce67 + content-type: + - application/json; charset=utf-8 + date: + - Tue, 19 May 2020 15:49:11 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/3a1af284-1187-402d-9869-39ad09181bf5 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '231' + status: + code: 201 + message: Created +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_fail.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_fail.yaml new file mode 100644 index 000000000000..0120b1902049 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_fail.yaml @@ -0,0 +1,361 @@ +interactions: +- request: + body: 'b''b\''{"source": "containersasurl", "sourceFilter": {"prefix": "", "includeSubFolders": + false}, "useLabelFile": false}\''''' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '288' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models + response: + body: + string: '' + headers: + apim-request-id: + - 4380f150-98ac-4265-8275-9e7e2c23a149 + content-length: + - '0' + date: + - Thu, 14 May 2020 01:01:51 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5205' + status: + code: 201 + message: Created +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "b7cc00ef-0ead-4f28-8619-445cba528b91", "status": + "creating", "createdDateTime": "2020-05-14T01:01:52Z", "lastUpdatedDateTime": + "2020-05-14T01:01:52Z"}, "accessToken": "redacted"}' + headers: + apim-request-id: + - f54580b2-8b7d-4320-a0eb-595204ece538 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:01:57 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '112' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "b7cc00ef-0ead-4f28-8619-445cba528b91", "status": + "ready", "createdDateTime": "2020-05-14T01:01:52Z", "lastUpdatedDateTime": + "2020-05-14T01:02:01Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Phone:", "Purchase Order", "Purchase Order", + "Purchase Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", + "Shipped To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": + {"trainingDocuments": [{"documentName": "Form_1.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_2.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_3.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_4.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_5.jpg", "pages": 1, "errors": + [], "status": "succeeded"}], "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: + - abc1d480-5568-4110-9a34-9e1162ee3dc2 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:02:02 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '128' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51", "accessToken": + "redacted", "expirationDateTimeTicks": 637251013234333452}' + headers: + apim-request-id: + - ad78e1d1-162d-469b-93d8-4d2eb47b1f0f + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:02:03 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '98' + status: + code: 201 + message: Created +- request: + body: 'b''b\''{"targetResourceId": "resource_id", "targetResourceRegion": "eastus", + "copyAuthorization": {"modelId": "a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51", "accessToken": + 00000000-0000-0000-0000-000000000000, "expirationDateTimeTicks": 637251013234333452}}\''''' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '448' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91/copy + response: + body: + string: '' + headers: + apim-request-id: + - 682fa9dc-1b3b-4def-9f38-e98d1a04c7e0 + content-length: + - '0' + date: + - Thu, 14 May 2020 01:02:03 GMT + operation-location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91/copyresults/cd9acbeb-46aa-40b2-ab14-c1c35749167c + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '155' + status: + code: 202 + message: Accepted +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91/copyresults/cd9acbeb-46aa-40b2-ab14-c1c35749167c + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:02:03Z", + "lastUpdatedDateTime": "2020-05-14T01:02:03Z", "copyResult": {"modelId": "a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 23c7f328-3010-4212-abf0-41b5a14cb219 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:02:08 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '99' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91/copyresults/cd9acbeb-46aa-40b2-ab14-c1c35749167c + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:02:03Z", + "lastUpdatedDateTime": "2020-05-14T01:02:03Z", "copyResult": {"modelId": "a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - fc7531d8-e8e2-4856-ae6e-32118966e56b + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:02:18 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5303' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91/copyresults/cd9acbeb-46aa-40b2-ab14-c1c35749167c + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:02:03Z", + "lastUpdatedDateTime": "2020-05-14T01:02:03Z", "copyResult": {"modelId": "a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 2c3c4c73-01a7-4399-8071-e15db1044a20 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:02:23 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '41' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/b7cc00ef-0ead-4f28-8619-445cba528b91/copyresults/cd9acbeb-46aa-40b2-ab14-c1c35749167c + response: + body: + string: '{"status": "failed", "createdDateTime": "2020-05-14T01:02:30.7732121Z", + "lastUpdatedDateTime": "2020-05-14T01:02:30.7732123Z", "copyResult": {"modelId": + "a15b3499-7d6b-4b2c-9dc8-8e57b5b6ee51", "errors": [{"code": "ResourceResolverError", + "message": "{\"ResourceReferenceProperty\":null,\"ClassName\":\"Microsoft.CloudAI.Containers.ResourceResolverServiceException\",\"Message\":\"One + or more errors occurred. (Could not fetch resource information. Ensure the + resource identifier ''resource_id'' is valid and exists in the specified region + eastus. Error: {\\\"error\\\":{\\\"code\\\":\\\"ResourceNotFound\\\",\\\"message\\\":\\\"The + resource with identifier ''resource_id'' is not found.\\\"}}).\",\"Data\":null,\"InnerException\":null,\"HelpURL\":null,\"StackTraceString\":null,\"RemoteStackTraceString\":null,\"RemoteStackIndex\":0,\"ExceptionMethod\":null,\"HResult\":-2146233088,\"Source\":null,\"WatsonBuckets\":null}"}]}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 16da8022-2949-4c9a-93e0-22b0488ee5a6 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 01:02:34 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5044' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_successful.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_successful.yaml new file mode 100644 index 000000000000..4410de1124d7 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_successful.yaml @@ -0,0 +1,402 @@ +interactions: +- request: + body: 'b''b\''{"source": "containersasurl", "sourceFilter": {"prefix": "", "includeSubFolders": + false}, "useLabelFile": false}\''''' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '288' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models + response: + body: + string: '' + headers: + apim-request-id: + - 1f4f2813-350c-4c4f-8c19-a9a731f90145 + content-length: + - '0' + date: + - Thu, 14 May 2020 00:41:50 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5225' + status: + code: 201 + message: Created +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "c885a510-785f-402b-a69b-650d581331db", "status": + "ready", "createdDateTime": "2020-05-14T00:41:51Z", "lastUpdatedDateTime": + "2020-05-14T00:41:58Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Phone:", "Purchase Order", "Purchase Order", + "Purchase Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", + "Shipped To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": + {"trainingDocuments": [{"documentName": "Form_1.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_2.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_3.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_4.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_5.jpg", "pages": 1, "errors": + [], "status": "succeeded"}], "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: + - 09abe2a5-a145-4d23-b606-155d2e8c2e0d + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:01 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5243' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c", "accessToken": + "redacted", "expirationDateTimeTicks": 637251001221004515}' + headers: + apim-request-id: + - c223d84e-9ac5-4204-8348-3341c4d2be27 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:02 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '178' + status: + code: 201 + message: Created +- request: + body: 'b''b\''{"targetResourceId": "resource_id", "targetResourceRegion": "westus", + "copyAuthorization": {"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c", "accessToken": + 00000000-0000-0000-0000-000000000000, "expirationDateTimeTicks": 637251001221004515}}\''''' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '448' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copy + response: + body: + string: '' + headers: + apim-request-id: + - 9d30186d-72a3-4371-9b87-6fa2c7aacfab + content-length: + - '0' + date: + - Thu, 14 May 2020 00:42:02 GMT + operation-location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copyresults/5fb67559-3e16-4b09-9295-1fe0b3cfec94 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '425' + status: + code: 202 + message: Accepted +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copyresults/5fb67559-3e16-4b09-9295-1fe0b3cfec94 + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T00:42:02Z", + "lastUpdatedDateTime": "2020-05-14T00:42:02Z", "copyResult": {"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 63d27f20-441a-410b-8abb-3c24159f4b9f + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:07 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '108' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copyresults/5fb67559-3e16-4b09-9295-1fe0b3cfec94 + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T00:42:02Z", + "lastUpdatedDateTime": "2020-05-14T00:42:02Z", "copyResult": {"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 40ecd2db-65fc-48c6-9196-850c68391b58 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:12 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '101' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copyresults/5fb67559-3e16-4b09-9295-1fe0b3cfec94 + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T00:42:02Z", + "lastUpdatedDateTime": "2020-05-14T00:42:02Z", "copyResult": {"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 07b8bdfa-5f42-40d5-b21c-4f73b068f8e5 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:17 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '274' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copyresults/5fb67559-3e16-4b09-9295-1fe0b3cfec94 + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T00:42:02Z", + "lastUpdatedDateTime": "2020-05-14T00:42:02Z", "copyResult": {"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c"}, + "accessToken": "redacted"}' + headers: + apim-request-id: + - 2a498014-f8d1-45e3-9438-ba2589b05278 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:23 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '79' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/c885a510-785f-402b-a69b-650d581331db/copyresults/5fb67559-3e16-4b09-9295-1fe0b3cfec94 + response: + body: + string: '{"status": "succeeded", "createdDateTime": "2020-05-14T00:42:28.4618648Z", + "lastUpdatedDateTime": "2020-05-14T00:42:28.4618651Z", "copyResult": {}, "accessToken": + "redacted"}' + headers: + apim-request-id: + - 50324af7-81b5-4cca-aacc-b32470591d74 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:33 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5172' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "a0490d1a-e0d9-4e18-96ed-3bc4ad1df34c", "status": + "ready", "createdDateTime": "2020-05-14T00:41:51Z", "lastUpdatedDateTime": + "2020-05-14T00:41:58Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Phone:", "Purchase Order", "Purchase Order", + "Purchase Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", + "Shipped To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": + {"trainingDocuments": [{"documentName": "Form_1.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_2.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_3.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_4.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_5.jpg", "pages": 1, "errors": + [], "status": "succeeded"}], "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: + - 3a2ce69b-bbbc-4e55-90b0-f4f6c7851093 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:42:33 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '406' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_transform.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_transform.yaml new file mode 100644 index 000000000000..a03d4b0fe3be --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model.test_copy_model_transform.yaml @@ -0,0 +1,244 @@ +interactions: +- request: + body: 'b''b\''{"source": "containersasurl", "sourceFilter": {"prefix": "", "includeSubFolders": + false}, "useLabelFile": false}\''''' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '288' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models + response: + body: + string: '' + headers: + apim-request-id: + - 04323b91-2063-4e81-b458-428e7833eee2 + content-length: + - '0' + date: + - Thu, 14 May 2020 00:50:41 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/cf566ace-71ef-4331-9110-82734edf1bc8 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '187' + status: + code: 201 + message: Created +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/cf566ace-71ef-4331-9110-82734edf1bc8?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "cf566ace-71ef-4331-9110-82734edf1bc8", "status": + "creating", "createdDateTime": "2020-05-14T00:50:42Z", "lastUpdatedDateTime": + "2020-05-14T00:50:42Z"}, "accessToken": "redacted"}' + headers: + apim-request-id: + - f12d5552-2242-4242-9cf8-450b8ee09fc4 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:50:46 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '148' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/cf566ace-71ef-4331-9110-82734edf1bc8?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "cf566ace-71ef-4331-9110-82734edf1bc8", "status": + "ready", "createdDateTime": "2020-05-14T00:50:42Z", "lastUpdatedDateTime": + "2020-05-14T00:50:51Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Phone:", "Purchase Order", "Purchase Order", + "Purchase Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", + "Shipped To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": + {"trainingDocuments": [{"documentName": "Form_1.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_2.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_3.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_4.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_5.jpg", "pages": 1, "errors": + [], "status": "succeeded"}], "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: + - 0c4bf41c-3ba8-4c9c-b5d4-65b1fe553382 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:50:53 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '130' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "459db417-84b4-4588-aa3b-d64762c08200", "accessToken": + "redacted", "expirationDateTimeTicks": 637251006533045470}' + headers: + apim-request-id: + - 2cc2c9ee-a999-4756-a820-ef6aee622d48 + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:50:53 GMT + location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/459db417-84b4-4588-aa3b-d64762c08200 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '124' + status: + code: 201 + message: Created +- request: + body: 'b''b\''{"targetResourceId": "resource_id", "targetResourceRegion": "westus", + "copyAuthorization": {"modelId": "459db417-84b4-4588-aa3b-d64762c08200", "accessToken": + 00000000-0000-0000-0000-000000000000, "expirationDateTimeTicks": 637251006533045470}}\''''' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '448' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/cf566ace-71ef-4331-9110-82734edf1bc8/copy + response: + body: + string: '' + headers: + apim-request-id: + - eb1736f2-df7e-4545-96a0-befea8de6f80 + content-length: + - '0' + date: + - Thu, 14 May 2020 00:50:53 GMT + operation-location: + - https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/cf566ace-71ef-4331-9110-82734edf1bc8/copyresults/6e206af9-5eae-4540-a80f-d0f8faf24ead + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '73' + status: + code: 202 + message: Accepted +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/cf566ace-71ef-4331-9110-82734edf1bc8/copyresults/6e206af9-5eae-4540-a80f-d0f8faf24ead + response: + body: + string: '{"status": "succeeded", "createdDateTime": "2020-05-14T00:51:00.1376588Z", + "lastUpdatedDateTime": "2020-05-14T00:51:00.1376594Z", "copyResult": {}, "accessToken": + "redacted"}' + headers: + apim-request-id: + - 54152a3e-1267-4401-9f5e-6f3bddded6ab + content-type: + - application/json; charset=utf-8 + date: + - Thu, 14 May 2020 00:51:03 GMT + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '5168' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_authorization.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_authorization.yaml new file mode 100644 index 000000000000..01ff0c8827f0 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_authorization.yaml @@ -0,0 +1,29 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "adcd2786-9aa5-4098-9a55-7d68eaeaaf2f", "accessToken": + "redacted", "expirationDateTimeTicks": 637255866617034918}' + headers: + apim-request-id: 99a50a5e-a423-47ae-8e9e-9369a4ac8d90 + content-type: application/json; charset=utf-8 + date: Tue, 19 May 2020 15:51:01 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/adcd2786-9aa5-4098-9a55-7d68eaeaaf2f + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '226' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/copyAuthorization +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_fail.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_fail.yaml new file mode 100644 index 000000000000..fc41403ca42e --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_fail.yaml @@ -0,0 +1,252 @@ +interactions: +- request: + body: 'b''b\''{"source": "containersasurl", "sourceFilter": {"prefix": "", "includeSubFolders": + false}, "useLabelFile": false}\''''' + headers: + Content-Length: + - '288' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models + response: + body: + string: '' + headers: + apim-request-id: 0ef35015-e98c-4980-9b4f-bb9b64c05678 + content-length: '0' + date: Thu, 14 May 2020 01:12:40 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a + strict-transport-security: max-age=31536000; includeSubDomains; preload + x-content-type-options: nosniff + x-envoy-upstream-service-time: '146' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "aa33d092-934e-4af9-b835-122492610f5a", "status": + "creating", "createdDateTime": "2020-05-14T01:12:40Z", "lastUpdatedDateTime": + "2020-05-14T01:12:40Z"}, "accessToken": "redacted"}' + headers: + apim-request-id: 8f251d26-3003-4f06-8974-a94cde8802ce + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:12:45 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '194' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a?includeKeys=true +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "aa33d092-934e-4af9-b835-122492610f5a", "status": + "ready", "createdDateTime": "2020-05-14T01:12:40Z", "lastUpdatedDateTime": + "2020-05-14T01:12:48Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Phone:", "Purchase Order", "Purchase Order", + "Purchase Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", + "Shipped To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": + {"trainingDocuments": [{"documentName": "Form_1.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_2.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_3.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_4.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_5.jpg", "pages": 1, "errors": + [], "status": "succeeded"}], "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: 6fe0f81a-aa4b-4922-b8e9-f4d29ead547c + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:12:50 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '150' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a?includeKeys=true +- request: + body: null + headers: + Accept: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "9c8a6245-d918-4507-b29e-6c0b53e6b22f", "accessToken": + "redacted", "expirationDateTimeTicks": 637251019711245795}' + headers: + apim-request-id: 4e64bee8-c932-4391-972c-a95e2aa0dd4f + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:12:50 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/9c8a6245-d918-4507-b29e-6c0b53e6b22f + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '175' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/copyAuthorization +- request: + body: 'b''b\''{"targetResourceId": "resource_id", "targetResourceRegion": "eastus", + "copyAuthorization": {"modelId": "9c8a6245-d918-4507-b29e-6c0b53e6b22f", "accessToken": + 00000000-0000-0000-0000-000000000000, "expirationDateTimeTicks": 637251019711245795}}\''''' + headers: + Content-Length: + - '448' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copy + response: + body: + string: '' + headers: + apim-request-id: d9805313-ec0a-46e9-afab-750164b505c9 + content-length: '0' + date: Thu, 14 May 2020 01:12:50 GMT + operation-location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f + strict-transport-security: max-age=31536000; includeSubDomains; preload + x-content-type-options: nosniff + x-envoy-upstream-service-time: '167' + status: + code: 202 + message: Accepted + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copy +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:12:51Z", + "lastUpdatedDateTime": "2020-05-14T01:12:51Z", "copyResult": {"modelId": "9c8a6245-d918-4507-b29e-6c0b53e6b22f"}, + "accessToken": "redacted"}' + headers: + apim-request-id: 43f437fc-fcfb-40c1-b2ed-ab84436f79b3 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:12:55 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '173' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:12:51Z", + "lastUpdatedDateTime": "2020-05-14T01:12:51Z", "copyResult": {"modelId": "9c8a6245-d918-4507-b29e-6c0b53e6b22f"}, + "accessToken": "redacted"}' + headers: + apim-request-id: 26f78b70-03b0-4560-93db-91d4725c0611 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:13:00 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '45' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:12:51Z", + "lastUpdatedDateTime": "2020-05-14T01:12:51Z", "copyResult": {"modelId": "9c8a6245-d918-4507-b29e-6c0b53e6b22f"}, + "accessToken": "redacted"}' + headers: + apim-request-id: 08126cce-50a2-42a4-ab03-2c42b9835324 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:13:06 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '84' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f + response: + body: + string: '{"status": "failed", "createdDateTime": "2020-05-14T01:13:07.0471807Z", + "lastUpdatedDateTime": "2020-05-14T01:13:07.047181Z", "copyResult": {"modelId": + "9c8a6245-d918-4507-b29e-6c0b53e6b22f", "errors": [{"code": "ResourceResolverError", + "message": "{\"ResourceReferenceProperty\":null,\"ClassName\":\"Microsoft.CloudAI.Containers.ResourceResolverServiceException\",\"Message\":\"One + or more errors occurred. (Could not fetch resource information. Ensure the + resource identifier ''resource_id'' is valid and exists in the specified region + eastus. Error: {\\\"error\\\":{\\\"code\\\":\\\"ResourceNotFound\\\",\\\"message\\\":\\\"The + resource with identifier ''resource_id'' is not found.\\\"}}).\",\"Data\":null,\"InnerException\":null,\"HelpURL\":null,\"StackTraceString\":null,\"RemoteStackTraceString\":null,\"RemoteStackIndex\":0,\"ExceptionMethod\":null,\"HResult\":-2146233088,\"Source\":null,\"WatsonBuckets\":null}"}]}, + "accessToken": "redacted"}' + headers: + apim-request-id: 53b90555-e176-4745-88ef-9719ab61f13e + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:13:11 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '163' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/aa33d092-934e-4af9-b835-122492610f5a/copyresults/097d8fa2-25cf-4cef-9dea-8657edc95f3f +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_successful.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_successful.yaml new file mode 100644 index 000000000000..d0052131b670 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_successful.yaml @@ -0,0 +1,208 @@ +interactions: +- request: + body: 'b''b\''{"source": "containersasurl", "sourceFilter": {"prefix": "", "includeSubFolders": + false}, "useLabelFile": false}\''''' + headers: + Content-Length: + - '288' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models + response: + body: + string: '' + headers: + apim-request-id: d86b82bd-c06f-4e15-803d-a91436f38900 + content-length: '0' + date: Thu, 14 May 2020 01:09:10 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6 + strict-transport-security: max-age=31536000; includeSubDomains; preload + x-content-type-options: nosniff + x-envoy-upstream-service-time: '181' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "a9b558b2-152e-43fc-b060-2f97d0a2fcc6", "status": + "creating", "createdDateTime": "2020-05-14T01:09:11Z", "lastUpdatedDateTime": + "2020-05-14T01:09:11Z"}, "accessToken": "redacted"}' + headers: + apim-request-id: 28a8f5c5-0e8e-46a5-814f-829bfebc2dcf + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:16 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '51' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6?includeKeys=true +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "a9b558b2-152e-43fc-b060-2f97d0a2fcc6", "status": + "ready", "createdDateTime": "2020-05-14T01:09:11Z", "lastUpdatedDateTime": + "2020-05-14T01:09:18Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Purchase Order", "Purchase Order", "Purchase + Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", "Shipped + To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": {"trainingDocuments": + [{"documentName": "Form_1.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_2.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_3.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_4.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_5.jpg", "pages": 1, "errors": [], "status": "succeeded"}], + "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: 82ac1744-b0f0-4aee-9ed3-1a99b48800ef + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:21 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '109' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6?includeKeys=true +- request: + body: null + headers: + Accept: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "7c88c4f8-053f-4e37-bc03-ce80e81f49e5", "accessToken": + "redacted", "expirationDateTimeTicks": 637251017619338551}' + headers: + apim-request-id: 5a07d519-1cf2-466c-bcd1-6eaaa771a366 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:21 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/7c88c4f8-053f-4e37-bc03-ce80e81f49e5 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '112' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/copyAuthorization +- request: + body: 'b''b\''{"targetResourceId": "resource_id", "targetResourceRegion": "westus", + "copyAuthorization": {"modelId": "7c88c4f8-053f-4e37-bc03-ce80e81f49e5", "accessToken": + 00000000-0000-0000-0000-000000000000, "expirationDateTimeTicks": 637251017619338551}}\''''' + headers: + Content-Length: + - '448' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6/copy + response: + body: + string: '' + headers: + apim-request-id: ce3c30f8-8011-4245-8a27-1c447e5072b8 + content-length: '0' + date: Thu, 14 May 2020 01:09:21 GMT + operation-location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6/copyresults/fe00dde8-61e6-4fb0-9712-6c3e0b5d61ce + strict-transport-security: max-age=31536000; includeSubDomains; preload + x-content-type-options: nosniff + x-envoy-upstream-service-time: '159' + status: + code: 202 + message: Accepted + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6/copy +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6/copyresults/fe00dde8-61e6-4fb0-9712-6c3e0b5d61ce + response: + body: + string: '{"status": "succeeded", "createdDateTime": "2020-05-14T01:09:31.2445585Z", + "lastUpdatedDateTime": "2020-05-14T01:09:31.2445588Z", "copyResult": {}, "accessToken": + "redacted"}' + headers: + apim-request-id: 93992072-32c8-4057-87a0-64a470cb1d6e + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:31 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '5281' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/a9b558b2-152e-43fc-b060-2f97d0a2fcc6/copyresults/fe00dde8-61e6-4fb0-9712-6c3e0b5d61ce +- request: + body: null + headers: + Accept: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/7c88c4f8-053f-4e37-bc03-ce80e81f49e5?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "7c88c4f8-053f-4e37-bc03-ce80e81f49e5", "status": + "ready", "createdDateTime": "2020-05-14T01:09:11Z", "lastUpdatedDateTime": + "2020-05-14T01:09:18Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Purchase Order", "Purchase Order", "Purchase + Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", "Shipped + To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": {"trainingDocuments": + [{"documentName": "Form_1.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_2.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_3.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_4.jpg", "pages": 1, "errors": [], "status": "succeeded"}, + {"documentName": "Form_5.jpg", "pages": 1, "errors": [], "status": "succeeded"}], + "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: 234ef1cd-09aa-49eb-823b-e899bc2daa18 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:31 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '112' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/7c88c4f8-053f-4e37-bc03-ce80e81f49e5?includeKeys=true +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_transform.yaml b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_transform.yaml new file mode 100644 index 000000000000..000b3bd9d538 --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/recordings/test_copy_model_async.test_copy_model_transform.yaml @@ -0,0 +1,246 @@ +interactions: +- request: + body: 'b''b\''{"source": "containersasurl", "sourceFilter": {"prefix": "", "includeSubFolders": + false}, "useLabelFile": false}\''''' + headers: + Content-Length: + - '288' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models + response: + body: + string: '' + headers: + apim-request-id: 79122024-c16c-4828-bb84-e3ec65eb7383 + content-length: '0' + date: Thu, 14 May 2020 01:09:32 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8 + strict-transport-security: max-age=31536000; includeSubDomains; preload + x-content-type-options: nosniff + x-envoy-upstream-service-time: '76' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "f622d63d-9e3d-4a1d-a290-5e3949b932d8", "status": + "creating", "createdDateTime": "2020-05-14T01:09:33Z", "lastUpdatedDateTime": + "2020-05-14T01:09:33Z"}, "accessToken": "redacted"}' + headers: + apim-request-id: 8a045cfe-d6d3-430d-8374-06ab1fad9bfd + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:37 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '139' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8?includeKeys=true +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8?includeKeys=true + response: + body: + string: '{"modelInfo": {"modelId": "f622d63d-9e3d-4a1d-a290-5e3949b932d8", "status": + "ready", "createdDateTime": "2020-05-14T01:09:33Z", "lastUpdatedDateTime": + "2020-05-14T01:09:40Z"}, "keys": {"clusters": {"0": ["Additional Notes:", + "Address:", "Address:", "Company Name:", "Company Phone:", "Dated As:", "Email:", + "Hero Limited", "Name:", "Phone:", "Phone:", "Purchase Order", "Purchase Order", + "Purchase Order #:", "SUBTOTAL", "Seattle, WA 93849 Phone:", "Shipped From", + "Shipped To", "TAX", "TOTAL", "Vendor Name:", "Website:"]}}, "trainResult": + {"trainingDocuments": [{"documentName": "Form_1.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_2.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_3.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_4.jpg", "pages": 1, "errors": + [], "status": "succeeded"}, {"documentName": "Form_5.jpg", "pages": 1, "errors": + [], "status": "succeeded"}], "errors": []}, "accessToken": "redacted"}' + headers: + apim-request-id: 0cae8ba6-1d8d-40f2-aee7-53f0f6fd3c21 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:42 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '138' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8?includeKeys=true +- request: + body: null + headers: + Accept: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/copyAuthorization + response: + body: + string: '{"modelId": "2741351c-66be-4d9a-bffa-f51858f0a1e8", "accessToken": + "redacted", "expirationDateTimeTicks": 637251017835447315}' + headers: + apim-request-id: f5a567ea-38b3-4f3b-af4e-d39fd2294f66 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:43 GMT + location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/2741351c-66be-4d9a-bffa-f51858f0a1e8 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '18' + status: + code: 201 + message: Created + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/copyAuthorization +- request: + body: 'b''b\''{"targetResourceId": "resource_id", "targetResourceRegion": "westus", + "copyAuthorization": {"modelId": "2741351c-66be-4d9a-bffa-f51858f0a1e8", "accessToken": + 00000000-0000-0000-0000-000000000000, "expirationDateTimeTicks": 637251017835447315}}\''''' + headers: + Content-Length: + - '448' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: POST + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copy + response: + body: + string: '' + headers: + apim-request-id: 662bc4ad-d4ac-4e85-9942-0208926ea6b1 + content-length: '0' + date: Thu, 14 May 2020 01:09:43 GMT + operation-location: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff + strict-transport-security: max-age=31536000; includeSubDomains; preload + x-content-type-options: nosniff + x-envoy-upstream-service-time: '143' + status: + code: 202 + message: Accepted + url: https://westus.api.cognitive.microsoft.com//formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copy +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:09:43Z", + "lastUpdatedDateTime": "2020-05-14T01:09:43Z", "copyResult": {"modelId": "2741351c-66be-4d9a-bffa-f51858f0a1e8"}, + "accessToken": "redacted"}' + headers: + apim-request-id: 10ac066d-b0d9-49ba-847a-7c4744183dd8 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:48 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '183' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff + response: + body: + string: '{"status": "notStarted", "createdDateTime": "2020-05-14T01:09:43Z", + "lastUpdatedDateTime": "2020-05-14T01:09:43Z", "copyResult": {"modelId": "2741351c-66be-4d9a-bffa-f51858f0a1e8"}, + "accessToken": "redacted"}' + headers: + apim-request-id: 0b9eb162-f76a-4346-a07c-0ed7744bbdd1 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:09:53 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '109' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff + response: + body: + string: '{"status": "running", "createdDateTime": "2020-05-14T01:10:01.2858546Z", + "lastUpdatedDateTime": "2020-05-14T01:10:01.285855Z", "copyResult": {}, "accessToken": + "redacted"}' + headers: + apim-request-id: 59c62973-c5bc-4181-b50d-13f7868a4aff + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:10:04 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '5109' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff +- request: + body: null + headers: + User-Agent: + - azsdk-python-ai-formrecognizer/1.0.0b3 Python/3.7.3 (Windows-10-10.0.18362-SP0) + Python/3.7.3 (Windows-10-10.0.18362-SP0) + method: GET + uri: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff + response: + body: + string: '{"status": "succeeded", "createdDateTime": "2020-05-14T01:10:06.5336275Z", + "lastUpdatedDateTime": "2020-05-14T01:10:06.5336279Z", "copyResult": {}, "accessToken": + "redacted"}' + headers: + apim-request-id: a0da10bb-1a98-458e-9a16-483ed276d406 + content-type: application/json; charset=utf-8 + date: Thu, 14 May 2020 01:10:13 GMT + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '5104' + status: + code: 200 + message: OK + url: https://westus.api.cognitive.microsoft.com/formrecognizer/v2.0-preview/custom/models/f622d63d-9e3d-4a1d-a290-5e3949b932d8/copyresults/e3a252d8-f4d0-425b-b313-ebdb4c2aecff +version: 1 diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model.py new file mode 100644 index 000000000000..be9bce84721a --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import functools +from azure.core.exceptions import HttpResponseError +from azure.ai.formrecognizer._generated.models import CopyOperationResult +from azure.ai.formrecognizer import CustomFormModelInfo +from azure.ai.formrecognizer import FormTrainingClient +from testcase import FormRecognizerTest, GlobalFormRecognizerAccountPreparer +from testcase import GlobalTrainingAccountPreparer as _GlobalTrainingAccountPreparer + +GlobalTrainingAccountPreparer = functools.partial(_GlobalTrainingAccountPreparer, FormTrainingClient) + + +class TestCopyModel(FormRecognizerTest): + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + def test_copy_model_successful(self, client, container_sas_url, location, resource_id): + + poller = client.begin_train_model(container_sas_url) + model = poller.result() + + target = client.get_copy_authorization(resource_region=location, resource_id=resource_id) + + poller = client.begin_copy_model(model.model_id, target=target) + copy = poller.result() + + copied_model = client.get_custom_model(copy.model_id) + + self.assertEqual(copy.status, "succeeded") + self.assertIsNotNone(copy.created_on) + self.assertIsNotNone(copy.last_modified) + self.assertEqual(target["modelId"], copy.model_id) + self.assertNotEqual(target["modelId"], model.model_id) + self.assertIsNotNone(copied_model) + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + def test_copy_model_fail(self, client, container_sas_url, location, resource_id): + + poller = client.begin_train_model(container_sas_url) + model = poller.result() + + # give an incorrect region + target = client.get_copy_authorization(resource_region="eastus", resource_id=resource_id) + + with self.assertRaises(HttpResponseError): + poller = client.begin_copy_model(model.model_id, target=target) + copy = poller.result() + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + def test_copy_model_transform(self, client, container_sas_url, location, resource_id): + + poller = client.begin_train_model(container_sas_url) + model = poller.result() + + target = client.get_copy_authorization(resource_region=location, resource_id=resource_id) + + raw_response = [] + + def callback(response, _, headers): + copy_result = client._client._deserialize(CopyOperationResult, response) + model_info = CustomFormModelInfo._from_generated(copy_result, target["modelId"]) + raw_response.append(copy_result) + raw_response.append(model_info) + + poller = client.begin_copy_model(model.model_id, target=target, cls=callback) + copy = poller.result() + + actual = raw_response[0] + copy = raw_response[1] + self.assertEqual(copy.created_on, actual.created_date_time) + self.assertEqual(copy.status, actual.status) + self.assertEqual(copy.last_modified, actual.last_updated_date_time) + self.assertEqual(copy.model_id, target["modelId"]) + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + def test_copy_authorization(self, client, container_sas_url, location, resource_id): + + target = client.get_copy_authorization(resource_region="eastus", resource_id=resource_id) + + self.assertIsNotNone(target["modelId"]) + self.assertIsNotNone(target["accessToken"]) + self.assertIsNotNone(target["expirationDateTimeTicks"]) + self.assertEqual(target["resourceRegion"], "eastus") + self.assertEqual(target["resourceId"], resource_id) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model_async.py new file mode 100644 index 000000000000..0b7a2aa0422e --- /dev/null +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_copy_model_async.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +import functools +from azure.core.exceptions import HttpResponseError +from azure.ai.formrecognizer._generated.models import CopyOperationResult +from azure.ai.formrecognizer import CustomFormModelInfo +from azure.ai.formrecognizer.aio import FormTrainingClient +from testcase import GlobalFormRecognizerAccountPreparer +from testcase import GlobalTrainingAccountPreparer as _GlobalTrainingAccountPreparer +from asynctestcase import AsyncFormRecognizerTest + +GlobalTrainingAccountPreparer = functools.partial(_GlobalTrainingAccountPreparer, FormTrainingClient) + + +class TestCopyModelAsync(AsyncFormRecognizerTest): + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + async def test_copy_model_successful(self, client, container_sas_url, location, resource_id): + + model = await client.train_model(container_sas_url) + + target = await client.get_copy_authorization(resource_region=location, resource_id=resource_id) + + copy = await client.copy_model(model.model_id, target=target) + + copied_model = await client.get_custom_model(copy.model_id) + + self.assertEqual(copy.status, "succeeded") + self.assertIsNotNone(copy.created_on) + self.assertIsNotNone(copy.last_modified) + self.assertEqual(target["modelId"], copy.model_id) + self.assertNotEqual(target["modelId"], model.model_id) + self.assertIsNotNone(copied_model) + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + async def test_copy_model_fail(self, client, container_sas_url, location, resource_id): + + model = await client.train_model(container_sas_url) + + # give an incorrect region + target = await client.get_copy_authorization(resource_region="eastus", resource_id=resource_id) + + with self.assertRaises(HttpResponseError): + copy = await client.copy_model(model.model_id, target=target) + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + async def test_copy_model_transform(self, client, container_sas_url, location, resource_id): + + model = await client.train_model(container_sas_url) + + target = await client.get_copy_authorization(resource_region=location, resource_id=resource_id) + + raw_response = [] + + def callback(response, _, headers): + copy_result = client._client._deserialize(CopyOperationResult, response) + model_info = CustomFormModelInfo._from_generated(copy_result, target["modelId"]) + raw_response.append(copy_result) + raw_response.append(model_info) + + copy = await client.copy_model(model.model_id, target=target, cls=callback) + + actual = raw_response[0] + copy = raw_response[1] + self.assertEqual(copy.created_on, actual.created_date_time) + self.assertEqual(copy.status, actual.status) + self.assertEqual(copy.last_modified, actual.last_updated_date_time) + self.assertEqual(copy.model_id, target["modelId"]) + + @GlobalFormRecognizerAccountPreparer() + @GlobalTrainingAccountPreparer(copy=True) + async def test_copy_authorization(self, client, container_sas_url, location, resource_id): + + target = await client.get_copy_authorization(resource_region="eastus", resource_id=resource_id) + + self.assertIsNotNone(target["modelId"]) + self.assertIsNotNone(target["accessToken"]) + self.assertIsNotNone(target["expirationDateTimeTicks"]) + self.assertEqual(target["resourceRegion"], "eastus") + self.assertEqual(target["resourceId"], resource_id) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py index 6eb9be39cc22..4efe9d52e4d5 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt.py @@ -79,7 +79,7 @@ def test_mgmt_model_labeled(self, client, container_sas_url): models_list = client.list_custom_models() for model in models_list: self.assertIsNotNone(model.model_id) - self.assertEqual(model.status, "ready") + self.assertIsNotNone(model.status) self.assertIsNotNone(model.created_on) self.assertIsNotNone(model.last_modified) @@ -114,7 +114,7 @@ def test_mgmt_model_unlabeled(self, client, container_sas_url): models_list = client.list_custom_models() for model in models_list: self.assertIsNotNone(model.model_id) - self.assertEqual(model.status, "ready") + self.assertIsNotNone(model.status) self.assertIsNotNone(model.created_on) self.assertIsNotNone(model.last_modified) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py index 735875672cb5..2a51556f4a5a 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/test_mgmt_async.py @@ -79,7 +79,7 @@ async def test_mgmt_model_labeled(self, client, container_sas_url): models_list = client.list_custom_models() async for model in models_list: self.assertIsNotNone(model.model_id) - self.assertEqual(model.status, "ready") + self.assertIsNotNone(model.status) self.assertIsNotNone(model.created_on) self.assertIsNotNone(model.last_modified) @@ -112,7 +112,7 @@ async def test_mgmt_model_unlabeled(self, client, container_sas_url): models_list = client.list_custom_models() async for model in models_list: self.assertIsNotNone(model.model_id) - self.assertEqual(model.status, "ready") + self.assertIsNotNone(model.status) self.assertIsNotNone(model.created_on) self.assertIsNotNone(model.last_modified) diff --git a/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py b/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py index fdcfbb69d363..07581730cb8a 100644 --- a/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py +++ b/sdk/formrecognizer/azure-ai-formrecognizer/tests/testcase.py @@ -18,7 +18,36 @@ ResourceGroupPreparer, ) from devtools_testutils.cognitiveservices_testcase import CognitiveServicesAccountPreparer -from azure_devtools.scenario_tests import ReplayableTest +from azure_devtools.scenario_tests import ( + RecordingProcessor, + ReplayableTest +) +from azure_devtools.scenario_tests.utilities import is_text_payload + + +class AccessTokenReplacer(RecordingProcessor): + """Replace the access token in a request/response body.""" + + def __init__(self, replacement='redacted'): + self._replacement = replacement + + def process_request(self, request): + import re + if is_text_payload(request) and request.body: + body = str(request.body) + body = re.sub(r'"accessToken": "([0-9a-f-]{36})"', r'"accessToken": 00000000-0000-0000-0000-000000000000', body) + request.body = body + return request + + def process_response(self, response): + import json + try: + body = json.loads(response['body']['string']) + body['accessToken'] = self._replacement + except (KeyError, ValueError): + return response + response['body']['string'] = json.dumps(body) + return response class FakeTokenCredential(object): @@ -37,6 +66,8 @@ class FormRecognizerTest(AzureTestCase): def __init__(self, method_name): super(FormRecognizerTest, self).__init__(method_name) + self.recording_processors.append(AccessTokenReplacer()) + # URL samples self.receipt_url_jpg = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/master/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/receipt/contoso-allinone.jpg" self.receipt_url_png = "https://raw.githubusercontent.com/Azure/azure-sdk-for-python/master/sdk/formrecognizer/azure-ai-formrecognizer/tests/sample_forms/receipt/contoso-receipt.png" @@ -348,7 +379,7 @@ def create_resource(self, name, **kwargs): ) return { - 'location': 'westus2', + 'location': 'westus', 'resource_group': rg, } @@ -363,10 +394,10 @@ def __init__(self): def create_resource(self, name, **kwargs): form_recognizer_account = FormRecognizerTest._FORM_RECOGNIZER_ACCOUNT return { - 'location': 'westus2', + 'location': 'westus', 'resource_group': FormRecognizerTest._RESOURCE_GROUP, 'form_recognizer_account': form_recognizer_account, - 'form_recognizer_account_key': FormRecognizerTest._FORM_RECOGNIZER_KEY, + 'form_recognizer_account_key': FormRecognizerTest._FORM_RECOGNIZER_KEY } @@ -380,6 +411,7 @@ def __init__(self, client_cls, client_kwargs={}, **kwargs): self.client_cls = client_cls self.multipage_test = kwargs.get("multipage", False) self.need_blob_sas_url = kwargs.get("blob_sas_url", False) + self.copy = kwargs.get("copy", False) def _load_settings(self): try: @@ -412,6 +444,30 @@ def create_resource(self, name, **kwargs): return {"client": client, "container_sas_url": container_sas_url, "blob_sas_url": blob_sas_url} + if self.copy: + if self.is_live: + resource_group = kwargs.get("resource_group") + subscription_id = self.get_settings_value("SUBSCRIPTION_ID") + form_recognizer_name = FormRecognizerTest._FORM_RECOGNIZER_NAME + + resource_id = "/subscriptions/" + subscription_id + "/resourceGroups/" + resource_group.name + \ + "/providers/Microsoft.CognitiveServices/accounts/" + form_recognizer_name + resource_location = "westus" + self.test_class_instance.scrubber.register_name_pair( + resource_id, + "resource_id" + ) + else: + resource_location = "westus" + resource_id = "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/rgname/providers/Microsoft.CognitiveServices/accounts/frname" + + return { + "client": client, + "container_sas_url": container_sas_url, + "location": resource_location, + "resource_id": resource_id + } + else: return {"client": client, "container_sas_url": container_sas_url} @@ -458,7 +514,7 @@ def create_form_client_and_container_sas_url(self, **kwargs): @pytest.fixture(scope="session") def form_recognizer_account(): test_case = AzureTestCase("__init__") - rg_preparer = ResourceGroupPreparer(random_name_enabled=True, name_prefix='pycog') + rg_preparer = ResourceGroupPreparer(random_name_enabled=True, name_prefix='pycog', location="westus") form_recognizer_preparer = CognitiveServicesAccountPreparer( random_name_enabled=True, kind="formrecognizer", @@ -474,6 +530,7 @@ def form_recognizer_account(): test_case, **rg_kwargs) FormRecognizerTest._FORM_RECOGNIZER_ACCOUNT = form_recognizer_kwargs['cognitiveservices_account'] FormRecognizerTest._FORM_RECOGNIZER_KEY = form_recognizer_kwargs['cognitiveservices_account_key'] + FormRecognizerTest._FORM_RECOGNIZER_NAME = form_recognizer_name yield finally: form_recognizer_preparer.remove_resource( From b22a709260430d45f72e06665ec7e8f29b5d113b Mon Sep 17 00:00:00 2001 From: Zim Kalinowski Date: Thu, 21 May 2020 09:04:57 +0200 Subject: [PATCH 28/28] Kaihuis maps (#11574) * add test for maps * Update test_cli_mgmt_maps.test_maps.yaml * Packaging update of azure-mgmt-maps * updated coverage Co-authored-by: 00Kai0 Co-authored-by: Azure SDK Bot --- sdk/maps/azure-mgmt-maps/MANIFEST.in | 6 +- sdk/maps/azure-mgmt-maps/README.md | 48 +-- sdk/maps/azure-mgmt-maps/setup.py | 6 +- .../test_cli_mgmt_maps.test_maps.yaml | 317 ++++++++++++++++++ .../tests/test_cli_mgmt_maps.py | 97 ++++++ 5 files changed, 434 insertions(+), 40 deletions(-) create mode 100644 sdk/maps/azure-mgmt-maps/tests/recordings/test_cli_mgmt_maps.test_maps.yaml create mode 100644 sdk/maps/azure-mgmt-maps/tests/test_cli_mgmt_maps.py diff --git a/sdk/maps/azure-mgmt-maps/MANIFEST.in b/sdk/maps/azure-mgmt-maps/MANIFEST.in index 5b365d1fb18e..a3cb07df8765 100644 --- a/sdk/maps/azure-mgmt-maps/MANIFEST.in +++ b/sdk/maps/azure-mgmt-maps/MANIFEST.in @@ -1 +1,5 @@ -include *.rst *.md +recursive-include tests *.py *.yaml +include *.md +include azure/__init__.py +include azure/mgmt/__init__.py + diff --git a/sdk/maps/azure-mgmt-maps/README.md b/sdk/maps/azure-mgmt-maps/README.md index 4f21f0c6ddaf..53f2deb187b7 100644 --- a/sdk/maps/azure-mgmt-maps/README.md +++ b/sdk/maps/azure-mgmt-maps/README.md @@ -1,47 +1,21 @@ -## Microsoft Azure SDK for Python +# Microsoft Azure SDK for Python This is the Microsoft Azure Maps Client Library. +This package has been tested with Python 2.7, 3.5, 3.6, 3.7 and 3.8. +For a more complete view of Azure libraries, see the [Github repo](https://github.com/Azure/azure-sdk-for-python/) -Azure Resource Manager (ARM) is the next generation of management APIs -that replace the old Azure Service Management (ASM). -This package has been tested with Python 2.7, 3.4, 3.5, 3.6 and 3.7. +# Usage -For the older Azure Service Management (ASM) libraries, see -[azure-servicemanagement-legacy](https://pypi.python.org/pypi/azure-servicemanagement-legacy) -library. +For code examples, see [Maps](https://docs.microsoft.com/python/api/overview/azure/) +on docs.microsoft.com. -For a more complete set of Azure libraries, see the -[azure](https://pypi.python.org/pypi/azure) bundle package. -## Compatibility +# Provide Feedback -**IMPORTANT**: If you have an earlier version of the azure package -(version < 1.0), you should uninstall it before installing this -package. - -You can check the version using pip: - -``` shell -pip freeze -``` - -If you see azure==0.11.0 (or any version below 1.0), uninstall it first: - -``` shell -pip uninstall azure -``` - -## Usage - -For code examples, see -[Maps](https://docs.microsoft.com/python/api/overview/azure/) on -docs.microsoft.com. - -## Provide Feedback - -If you encounter any bugs or have suggestions, please file an issue in -the [Issues](https://github.com/Azure/azure-sdk-for-python/issues) +If you encounter any bugs or have suggestions, please file an issue in the +[Issues](https://github.com/Azure/azure-sdk-for-python/issues) section of the project. -![image](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-mgmt-maps%2FREADME.png) + +![Impressions](https://azure-sdk-impressions.azurewebsites.net/api/impressions/azure-sdk-for-python%2Fazure-mgmt-maps%2FREADME.png) diff --git a/sdk/maps/azure-mgmt-maps/setup.py b/sdk/maps/azure-mgmt-maps/setup.py index 230863918a61..d828a745206f 100644 --- a/sdk/maps/azure-mgmt-maps/setup.py +++ b/sdk/maps/azure-mgmt-maps/setup.py @@ -36,7 +36,9 @@ pass # Version extraction inspired from 'requests' -with open(os.path.join(package_folder_path, 'version.py'), 'r') as fd: +with open(os.path.join(package_folder_path, 'version.py') + if os.path.exists(os.path.join(package_folder_path, 'version.py')) + else os.path.join(package_folder_path, '_version.py'), 'r') as fd: version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', fd.read(), re.MULTILINE).group(1) @@ -64,10 +66,10 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', 'License :: OSI Approved :: MIT License', ], zip_safe=False, diff --git a/sdk/maps/azure-mgmt-maps/tests/recordings/test_cli_mgmt_maps.test_maps.yaml b/sdk/maps/azure-mgmt-maps/tests/recordings/test_cli_mgmt_maps.test_maps.yaml new file mode 100644 index 000000000000..be4dc70875b6 --- /dev/null +++ b/sdk/maps/azure-mgmt-maps/tests/recordings/test_cli_mgmt_maps.test_maps.yaml @@ -0,0 +1,317 @@ +interactions: +- request: + body: '{"location": "global", "tags": {"test": "true"}, "sku": {"name": "S0"}}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '71' + Content-Type: + - application/json; charset=utf-8 + User-Agent: + - python/3.6.9 (Linux-4.9.184-linuxkit-x86_64-with-Ubuntu-18.04-bionic) msrest/0.6.10 + msrest_azure/0.6.2 azure-mgmt-maps/0.1.0 Azure-SDK-For-Python + accept-language: + - en-US + method: PUT + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname?api-version=2018-05-01 + response: + body: + string: "{\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname\"\ + ,\r\n \"name\": \"accountname\",\r\n \"type\": \"Microsoft.Maps/accounts\"\ + ,\r\n \"location\": \"global\",\r\n \"tags\": {\r\n \"test\": \"true\"\ + \r\n },\r\n \"sku\": {\r\n \"name\": \"S0\",\r\n \"tier\": \"Standard\"\ + \r\n },\r\n \"properties\": {\r\n \"x-ms-client-id\": \"144cabaf-076d-4e9f-8d96-b5da49bcce07\"\ + \r\n }\r\n}" + headers: + cache-control: + - no-cache + content-length: + - '441' + content-type: + - application/json; charset=utf-8 + date: + - Tue, 18 Feb 2020 04:17:32 GMT + expires: + - '-1' + pragma: + - no-cache + server: + - Kestrel + strict-transport-security: + - max-age=31536000; includeSubDomains + x-content-type-options: + - nosniff + x-ms-ratelimit-remaining-subscription-writes: + - '1198' + status: + code: 201 + message: Created +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json; charset=utf-8 + User-Agent: + - python/3.6.9 (Linux-4.9.184-linuxkit-x86_64-with-Ubuntu-18.04-bionic) msrest/0.6.10 + msrest_azure/0.6.2 azure-mgmt-maps/0.1.0 Azure-SDK-For-Python + accept-language: + - en-US + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname?api-version=2018-05-01 + response: + body: + string: "{\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname\"\ + ,\r\n \"name\": \"accountname\",\r\n \"type\": \"Microsoft.Maps/accounts\"\ + ,\r\n \"location\": \"global\",\r\n \"tags\": {\r\n \"test\": \"true\"\ + \r\n },\r\n \"sku\": {\r\n \"name\": \"S0\",\r\n \"tier\": \"Standard\"\ + \r\n },\r\n \"properties\": {\r\n \"x-ms-client-id\": \"144cabaf-076d-4e9f-8d96-b5da49bcce07\"\ + \r\n }\r\n}" + headers: + cache-control: + - no-cache + content-length: + - '441' + content-type: + - application/json; charset=utf-8 + date: + - Tue, 18 Feb 2020 04:17:33 GMT + expires: + - '-1' + pragma: + - no-cache + server: + - Kestrel + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + status: + code: 200 + message: OK +- request: + body: '{"keyType": "primary"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '22' + Content-Type: + - application/json; charset=utf-8 + User-Agent: + - python/3.6.9 (Linux-4.9.184-linuxkit-x86_64-with-Ubuntu-18.04-bionic) msrest/0.6.10 + msrest_azure/0.6.2 azure-mgmt-maps/0.1.0 Azure-SDK-For-Python + accept-language: + - en-US + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname/regenerateKey?api-version=2018-05-01 + response: + body: + string: "{\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname\"\ + ,\r\n \"primaryKey\": \"MYNPUe9z30krVAYcD4lUQo2wZrds6mfNxJMFK8Qabtc\",\r\n\ + \ \"secondaryKey\": \"ICubKt1Cjhyj69U4cvdggWkb5feV85I577UxSDRB65M\",\r\n\ + \ \"primaryKeyLastUpdated\": \"2020-02-18T04:17:35.298607Z\",\r\n \"secondaryKeyLastUpdated\"\ + : \"2020-02-18T04:17:31.9655787Z\"\r\n}" + headers: + cache-control: + - no-cache + content-length: + - '416' + content-type: + - application/json; charset=utf-8 + date: + - Tue, 18 Feb 2020 04:17:34 GMT + expires: + - '-1' + pragma: + - no-cache + server: + - Kestrel + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-ms-ratelimit-remaining-subscription-writes: + - '1199' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + Content-Type: + - application/json; charset=utf-8 + User-Agent: + - python/3.6.9 (Linux-4.9.184-linuxkit-x86_64-with-Ubuntu-18.04-bionic) msrest/0.6.10 + msrest_azure/0.6.2 azure-mgmt-maps/0.1.0 Azure-SDK-For-Python + accept-language: + - en-US + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname/listKeys?api-version=2018-05-01 + response: + body: + string: "{\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname\"\ + ,\r\n \"primaryKey\": \"MYNPUe9z30krVAYcD4lUQo2wZrds6mfNxJMFK8Qabtc\",\r\n\ + \ \"secondaryKey\": \"ICubKt1Cjhyj69U4cvdggWkb5feV85I577UxSDRB65M\",\r\n\ + \ \"primaryKeyLastUpdated\": \"2020-02-18T04:17:35.298607Z\",\r\n \"secondaryKeyLastUpdated\"\ + : \"2020-02-18T04:17:31.9655787Z\"\r\n}" + headers: + cache-control: + - no-cache + content-length: + - '416' + content-type: + - application/json; charset=utf-8 + date: + - Tue, 18 Feb 2020 04:17:36 GMT + expires: + - '-1' + pragma: + - no-cache + server: + - Kestrel + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-ms-ratelimit-remaining-subscription-writes: + - '1199' + status: + code: 200 + message: OK +- request: + body: '{"tags": {"tags": "{''special_tag'': ''true''}"}}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '45' + Content-Type: + - application/json; charset=utf-8 + User-Agent: + - python/3.6.9 (Linux-4.9.184-linuxkit-x86_64-with-Ubuntu-18.04-bionic) msrest/0.6.10 + msrest_azure/0.6.2 azure-mgmt-maps/0.1.0 Azure-SDK-For-Python + accept-language: + - en-US + method: PATCH + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname?api-version=2018-05-01 + response: + body: + string: "{\r\n \"id\": \"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname\"\ + ,\r\n \"name\": \"accountname\",\r\n \"type\": \"Microsoft.Maps/accounts\"\ + ,\r\n \"location\": \"global\",\r\n \"tags\": {\r\n \"tags\": \"{'special_tag':\ + \ 'true'}\"\r\n },\r\n \"sku\": {\r\n \"name\": \"S0\",\r\n \"tier\"\ + : \"Standard\"\r\n },\r\n \"properties\": {\r\n \"x-ms-client-id\": \"\ + 144cabaf-076d-4e9f-8d96-b5da49bcce07\"\r\n }\r\n}" + headers: + cache-control: + - no-cache + content-length: + - '460' + content-type: + - application/json; charset=utf-8 + date: + - Tue, 18 Feb 2020 04:17:43 GMT + expires: + - '-1' + pragma: + - no-cache + server: + - Kestrel + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-ms-ratelimit-remaining-subscription-writes: + - '1198' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + Content-Type: + - application/json; charset=utf-8 + User-Agent: + - python/3.6.9 (Linux-4.9.184-linuxkit-x86_64-with-Ubuntu-18.04-bionic) msrest/0.6.10 + msrest_azure/0.6.2 azure-mgmt-maps/0.1.0 Azure-SDK-For-Python + accept-language: + - en-US + method: DELETE + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/test_cli_mgmt_maps_test_mapsa6f40b7a/providers/Microsoft.Maps/accounts/accountname?api-version=2018-05-01 + response: + body: + string: '' + headers: + cache-control: + - no-cache + content-length: + - '0' + date: + - Tue, 18 Feb 2020 04:17:48 GMT + expires: + - '-1' + pragma: + - no-cache + server: + - Kestrel + strict-transport-security: + - max-age=31536000; includeSubDomains + x-content-type-options: + - nosniff + x-ms-ratelimit-remaining-subscription-deletes: + - '14998' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/maps/azure-mgmt-maps/tests/test_cli_mgmt_maps.py b/sdk/maps/azure-mgmt-maps/tests/test_cli_mgmt_maps.py new file mode 100644 index 000000000000..85c99b48a67e --- /dev/null +++ b/sdk/maps/azure-mgmt-maps/tests/test_cli_mgmt_maps.py @@ -0,0 +1,97 @@ +# coding: utf-8 + +#------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +#-------------------------------------------------------------------------- + + +# TEST SCENARIO COVERAGE +# ---------------------- +# Methods Total : 10 +# Methods Covered : 10 +# Examples Total : 10 +# Examples Tested : 9 +# Coverage % : 90 +# ---------------------- + +import unittest + +import azure.mgmt.maps +from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer + +AZURE_LOCATION = 'eastus' + +class MgmtMapsManagementClientTest(AzureMgmtTestCase): + + def setUp(self): + super(MgmtMapsManagementClientTest, self).setUp() + self.mgmt_client = self.create_mgmt_client( + azure.mgmt.maps.MapsManagementClient + ) + + @ResourceGroupPreparer(location=AZURE_LOCATION) + def test_maps(self, resource_group): + + ACCOUNT_NAME = "accountname" + # CreateAccount[put] + BODY = { + "location": "global", + "sku": { + "name": "S0" + }, + "tags": { + "test": "true" + } + } + result = self.mgmt_client.accounts.create_or_update(resource_group.name, ACCOUNT_NAME, BODY) + + # GetAccount[get] + result = self.mgmt_client.accounts.get(resource_group.name, ACCOUNT_NAME) + + # ListAccountsByResourceGroup[get] + result = self.mgmt_client.accounts.list_by_resource_group(resource_group.name) + + # ListAccountsBySubscription[get] + result = self.mgmt_client.accounts.list_by_subscription() + + # GetOperations[get] + result = self.mgmt_client.accounts.list_operations() + + # RegenerateKey[post] + # BODY = { + # "key_type": "primary" + # } + key_type = "primary" + result = self.mgmt_client.accounts.regenerate_keys(resource_group.name, ACCOUNT_NAME, key_type) + + # ListKeys[post] + result = self.mgmt_client.accounts.list_keys(resource_group.name, ACCOUNT_NAME) + + # UpdateAccount[patch] + BODY = { + "tags": { + "special_tag": "true" + } + } + result = self.mgmt_client.accounts.update(resource_group.name, ACCOUNT_NAME, BODY) + + # TODO: Multiple resources involved + # # MoveAccounts[post] + # BODY = { + # "target_resource_group": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "", + # "resource_ids": [ + # "/subscriptions/21a9967a-e8a9-4656-a70b-96ff1c4d05a0/resourceGroups/myResourceGroup/providers/Microsoft.Maps/accounts/myMapsAccount", + # "/subscriptions/21a9967a-e8a9-4656-a70b-96ff1c4d05a0/resourceGroups/myResourceGroup/providers/Microsoft.Maps/accounts/myMapsAccount2" + # ] + # } + # result = self.mgmt_client.accounts.move(resource_group.name, BODY) + + # DeleteAccount[delete] + result = self.mgmt_client.accounts.delete(resource_group.name, ACCOUNT_NAME) + + +#------------------------------------------------------------------------------ +if __name__ == '__main__': + unittest.main()