diff --git a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md index b70e8b595ecf..34fa103b335b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md +++ b/sdk/textanalytics/azure-ai-textanalytics/CHANGELOG.md @@ -2,6 +2,12 @@ ## 5.1.0b7 (Unreleased) +**Breaking Changes** +- Renamed `begin_analyze_batch_actions` to `begin_analyze_actions`. +- Renamed `AnalyzeBatchActionsType` to `AnalyzeActionsType`. +- Renamed `AnalyzeBatchActionsResult` to `AnalyzeActionsResult`. +- Renamed `AnalyzeBatchActionsError` to `AnalyzeActionsError`. + **New Features** - Added enums `EntityConditionality`, `EntityCertainty`, and `EntityAssociation`. - Added `AnalyzeSentimentAction` as a supported action type for `begin_analyze_batch_actions`. diff --git a/sdk/textanalytics/azure-ai-textanalytics/README.md b/sdk/textanalytics/azure-ai-textanalytics/README.md index 6fd6659cf01e..b1e69be921de 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/README.md +++ b/sdk/textanalytics/azure-ai-textanalytics/README.md @@ -8,7 +8,7 @@ Text Analytics is a cloud-based service that provides advanced natural language - Personally Identifiable Information (PII) Entity Recognition - Language Detection - Key Phrase Extraction -- Batch Analysis +- Multiple Analysis - Healthcare Entities Analysis (Gated Preview) [Source code][source_code] | [Package (PyPI)][ta_pypi] | [API reference documentation][ta_ref_docs]| [Product documentation][ta_product_documentation] | [Samples][ta_samples] @@ -209,7 +209,7 @@ Long-running operations are operations which consist of an initial request sent followed by polling the service at intervals to determine whether the operation has completed or failed, and if it has succeeded, to get the result. -Methods that support Healthcare Analysis or batch operations over multiple Text Analytics APIs are modeled as long-running operations. +Methods that support Healthcare Analysis or multiple operations over multiple Text Analytics APIs are modeled as long-running operations. The client exposes a `begin_` method that returns an `LROPoller` or `AsyncLROPoller`. Callers should wait for the operation to complete by calling `result()` on the poller object returned from the `begin_` method. Sample code snippets are provided to illustrate using long-running operations [below](#examples "Examples"). @@ -225,7 +225,7 @@ The following section provides several code snippets covering some of the most c - [Extract Key Phrases](#extract-key-phrases "Extract key phrases") - [Detect Language](#detect-language "Detect language") - [Healthcare Entities Analysis](#healthcare-entities-analysis "Healthcare Entities Analysis") -- [Batch Analysis](#batch-analysis "Batch analysis") +- [Multiple Analysis](#multiple-analysis "Multiple analysis") ### Analyze sentiment @@ -491,9 +491,9 @@ for idx, doc in enumerate(docs): Note: The Healthcare Entities Analysis service is currently available only in the API v3.1 preview versions and is in gated preview. Since this is a gated preview, AAD is not supported. More information [here](https://docs.microsoft.com/azure/cognitive-services/text-analytics/how-tos/text-analytics-for-health?tabs=ner#request-access-to-the-public-preview). -### Batch Analysis +### Multiple Analysis -[Long-running operation](#long-running-operations) [`begin_analyze_batch_actions`][analyze_batch_actions] performs multiple analyses over one set of documents in a single request. Currently batching is supported using any combination of the following Text Analytics APIs in a single request: +[Long-running operation](#long-running-operations) [`begin_analyze_actions`][analyze_actions] performs multiple analyses over one set of documents in a single request. Currently it is supported using any combination of the following Text Analytics APIs in a single request: - Entities Recognition - PII Entities Recognition @@ -519,7 +519,7 @@ text_analytics_client = TextAnalyticsClient(endpoint, credential) documents = ["Microsoft was founded by Bill Gates and Paul Allen."] -poller = text_analytics_client.begin_analyze_batch_actions( +poller = text_analytics_client.begin_analyze_actions( documents, display_name="Sample Text Analysis", actions=[ @@ -531,7 +531,7 @@ poller = text_analytics_client.begin_analyze_batch_actions( ] ) -# returns batch actions results in the same order as the inputted actions +# returns multiple actions results in the same order as the inputted actions result = poller.result() first_action_result = next(result) @@ -604,7 +604,7 @@ for doc in docs: The returned response is an object encapsulating multiple iterables, each representing results of individual analyses. -Note: Batch analysis is currently available only in the v3.1-preview API version. +Note: Multiple analysis is currently available only in the v3.1-preview API version. ## Optional Configuration @@ -683,7 +683,7 @@ Common scenarios - Extract key phrases: [sample_extract_key_phrases.py][extract_key_phrases_sample] ([async version][extract_key_phrases_sample_async]) - Detect language: [sample_detect_language.py][detect_language_sample] ([async version][detect_language_sample_async]) - Healthcare Entities Analysis: [sample_analyze_healthcare_entities.py][analyze_healthcare_entities_sample] ([async version][analyze_healthcare_entities_sample_async]) -- Batch Analysis: [sample_analyze_batch_actions.py][analyze_sample] ([async version][analyze_sample_async]) +- Multiple Analysis: [sample_analyze_actions.py][analyze_sample] ([async version][analyze_sample_async]) Advanced scenarios @@ -739,7 +739,7 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con [detect_language_input]: https://aka.ms/azsdk-python-textanalytics-detectlanguageinput [text_analytics_client]: https://aka.ms/azsdk-python-textanalytics-textanalyticsclient [analyze_sentiment]: https://aka.ms/azsdk-python-textanalytics-analyzesentiment -[analyze_batch_actions]: https://aka.ms/azsdk/python/docs/ref/textanalytics#azure.ai.textanalytics.TextAnalyticsClient.begin_analyze_batch_actions +[analyze_actions]: https://aka.ms/azsdk/python/docs/ref/textanalytics#azure.ai.textanalytics.TextAnalyticsClient.begin_analyze_actions [analyze_healthcare_entities]: https://aka.ms/azsdk/python/docs/ref/textanalytics#azure.ai.textanalytics.TextAnalyticsClient.begin_analyze_healthcare_entities [recognize_entities]: https://aka.ms/azsdk-python-textanalytics-recognizeentities [recognize_pii_entities]: https://aka.ms/azsdk-python-textanalytics-recognizepiientities @@ -775,8 +775,8 @@ This project has adopted the [Microsoft Open Source Code of Conduct][code_of_con [recognize_pii_entities_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_recognize_pii_entities_async.py [analyze_healthcare_entities_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_healthcare_entities.py [analyze_healthcare_entities_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_healthcare_entities_async.py -[analyze_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_batch_actions.py -[analyze_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_batch_actions_async.py +[analyze_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_actions.py +[analyze_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_actions_async.py [opinion_mining_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_sentiment_with_opinion_mining.py [opinion_mining_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_sentiment_with_opinion_mining_async.py [cla]: https://cla.microsoft.com diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py index 3c53b0622f65..753e2c49ecba 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/__init__.py @@ -39,10 +39,10 @@ RecognizeLinkedEntitiesAction, RecognizePiiEntitiesAction, ExtractKeyPhrasesAction, - AnalyzeBatchActionsResult, + AnalyzeActionsResult, RequestStatistics, - AnalyzeBatchActionsType, - AnalyzeBatchActionsError, + AnalyzeActionsType, + AnalyzeActionsError, HealthcareEntityRelationRoleType, HealthcareRelation, HealthcareRelationRole, @@ -93,10 +93,10 @@ 'RecognizeLinkedEntitiesAction', 'RecognizePiiEntitiesAction', 'ExtractKeyPhrasesAction', - 'AnalyzeBatchActionsResult', + 'AnalyzeActionsResult', 'RequestStatistics', - 'AnalyzeBatchActionsType', - "AnalyzeBatchActionsError", + 'AnalyzeActionsType', + "AnalyzeActionsError", "PiiEntityCategoryType", "HealthcareEntityRelationType", "HealthcareEntityRelationRoleType", diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py index 3db9586a4257..dca3c7fb434c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_models.py @@ -1356,8 +1356,8 @@ def __repr__(self): .format(self.positive, self.neutral, self.negative)[:1024] -class AnalyzeBatchActionsType(str, Enum): - """The type of batch action that was applied to the documents +class AnalyzeActionsType(str, Enum): + """The type of action that was applied to the documents """ RECOGNIZE_ENTITIES = "recognize_entities" #: Entities Recognition action. RECOGNIZE_PII_ENTITIES = "recognize_pii_entities" #: PII Entities Recognition action. @@ -1366,17 +1366,17 @@ class AnalyzeBatchActionsType(str, Enum): ANALYZE_SENTIMENT = "analyze_sentiment" #: Sentiment Analysis action. -class AnalyzeBatchActionsResult(DictMixin): - """AnalyzeBatchActionsResult contains the results of a recognize entities action - on a list of documents. Returned by `begin_analyze_batch_actions` +class AnalyzeActionsResult(DictMixin): + """AnalyzeActionsResult contains the results of a recognize entities action + on a list of documents. Returned by `begin_analyze_actions` :ivar document_results: A list of objects containing results for all Entity Recognition actions included in the analysis. :vartype document_results: list[~azure.ai.textanalytics.RecognizeEntitiesResult] :ivar bool is_error: Boolean check for error item when iterating over list of - actions. Always False for an instance of a AnalyzeBatchActionsResult. - :ivar action_type: The type of batch action this class is a result of. - :vartype action_type: str or ~azure.ai.textanalytics.AnalyzeBatchActionsType + actions. Always False for an instance of a AnalyzeActionsResult. + :ivar action_type: The type of action this class is a result of. + :vartype action_type: str or ~azure.ai.textanalytics.AnalyzeActionsType :ivar ~datetime.datetime completed_on: Date and time (UTC) when the result completed on the service. :ivar statistics: Overall statistics for the action result. @@ -1390,7 +1390,7 @@ def __init__(self, **kwargs): self.statistics = kwargs.get("statistics") def __repr__(self): - return "AnalyzeBatchActionsResult(document_results={}, is_error={}, action_type={}, completed_on={}, " \ + return "AnalyzeActionsResult(document_results={}, is_error={}, action_type={}, completed_on={}, " \ "statistics={})".format( repr(self.document_results), self.is_error, @@ -1399,8 +1399,9 @@ def __repr__(self): repr(self.statistics) )[:1024] -class AnalyzeBatchActionsError(DictMixin): - """AnalyzeBatchActionsError is an error object which represents an an + +class AnalyzeActionsError(DictMixin): + """AnalyzeActionsError is an error object which represents an an error response for an action. :ivar error: The action result error. @@ -1414,7 +1415,7 @@ def __init__(self, **kwargs): self.is_error = True def __repr__(self): - return "AnalyzeBatchActionsError(error={}, is_error={}".format( + return "AnalyzeActionsError(error={}, is_error={}".format( repr(self.error), self.is_error ) @@ -1428,8 +1429,8 @@ def _from_generated(cls, error): class RecognizeEntitiesAction(DictMixin): """RecognizeEntitiesAction encapsulates the parameters for starting a long-running Entities Recognition operation. - If you just want to recognize entities in a list of documents, and not perform a batch - of long running actions on the input of documents, call method `recognize_entities` instead + If you just want to recognize entities in a list of documents, and not perform multiple + long running actions on the input of documents, call method `recognize_entities` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. @@ -1465,8 +1466,8 @@ class AnalyzeSentimentAction(DictMixin): """AnalyzeSentimentAction encapsulates the parameters for starting a long-running Sentiment Analysis operation. - If you just want to analyze sentiment in a list of documents, and not perform a batch - of long running actions on the input of documents, call method `analyze_sentiment` instead + If you just want to analyze sentiment in a list of documents, and not perform multiple + long running actions on the input of documents, call method `analyze_sentiment` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. @@ -1517,8 +1518,8 @@ class RecognizePiiEntitiesAction(DictMixin): """RecognizePiiEntitiesAction encapsulates the parameters for starting a long-running PII Entities Recognition operation. - If you just want to recognize pii entities in a list of documents, and not perform a batch - of long running actions on the input of documents, call method `recognize_pii_entities` instead + If you just want to recognize pii entities in a list of documents, and not perform multiple + long running actions on the input of documents, call method `recognize_pii_entities` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. @@ -1563,8 +1564,8 @@ class ExtractKeyPhrasesAction(DictMixin): """ExtractKeyPhrasesAction encapsulates the parameters for starting a long-running key phrase extraction operation - If you just want to extract key phrases from a list of documents, and not perform a batch - of long running actions on the input of documents, call method `extract_key_phrases` instead + If you just want to extract key phrases from a list of documents, and not perform multiple + long running actions on the input of documents, call method `extract_key_phrases` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. @@ -1590,8 +1591,8 @@ class RecognizeLinkedEntitiesAction(DictMixin): """RecognizeEntitiesAction encapsulates the parameters for starting a long-running Linked Entities Recognition operation. - If you just want to recognize linked entities in a list of documents, and not perform a batch - of long running actions on the input of documents, call method `recognize_linked_entities` instead + If you just want to recognize linked entities in a list of documents, and not perform multiple + long running actions on the input of documents, call method `recognize_linked_entities` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py index bf429008cece..baf47085ba0b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_request_handlers.py @@ -14,7 +14,7 @@ RecognizePiiEntitiesAction, RecognizeLinkedEntitiesAction, AnalyzeSentimentAction, - AnalyzeBatchActionsType, + AnalyzeActionsType, ) def _validate_input(documents, hint, whole_input_hint): @@ -71,14 +71,14 @@ def _validate_input(documents, hint, whole_input_hint): def _determine_action_type(action): if isinstance(action, RecognizeEntitiesAction): - return AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + return AnalyzeActionsType.RECOGNIZE_ENTITIES if isinstance(action, RecognizePiiEntitiesAction): - return AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + return AnalyzeActionsType.RECOGNIZE_PII_ENTITIES if isinstance(action, RecognizeLinkedEntitiesAction): - return AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES + return AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES if isinstance(action, AnalyzeSentimentAction): - return AnalyzeBatchActionsType.ANALYZE_SENTIMENT - return AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES + return AnalyzeActionsType.ANALYZE_SENTIMENT + return AnalyzeActionsType.EXTRACT_KEY_PHRASES def _check_string_index_type_arg(string_index_type_arg, api_version, string_index_type_default="UnicodeCodePoint"): string_index_type = None diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py index a781c1cc74be..bba2c934d37c 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_response_handlers.py @@ -31,10 +31,10 @@ RecognizePiiEntitiesResult, PiiEntity, AnalyzeHealthcareEntitiesResultItem, - AnalyzeBatchActionsResult, + AnalyzeActionsResult, RequestStatistics, - AnalyzeBatchActionsType, - AnalyzeBatchActionsError, + AnalyzeActionsType, + AnalyzeActionsError, _get_indices, ) from ._paging import AnalyzeHealthcareEntitiesResult, AnalyzeResult @@ -199,24 +199,24 @@ def healthcare_extract_page_data(doc_id_order, obj, response_headers, health_job healthcare_result(doc_id_order, health_job_state.results, response_headers, lro=True)) def _get_deserialization_callback_from_task_type(task_type): - if task_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES: + if task_type == AnalyzeActionsType.RECOGNIZE_ENTITIES: return entities_result - if task_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES: + if task_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES: return pii_entities_result - if task_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES: + if task_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES: return linked_entities_result - if task_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT: + if task_type == AnalyzeActionsType.ANALYZE_SENTIMENT: return sentiment_result return key_phrases_result def _get_property_name_from_task_type(task_type): - if task_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES: + if task_type == AnalyzeActionsType.RECOGNIZE_ENTITIES: return "entity_recognition_tasks" - if task_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES: + if task_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES: return "entity_recognition_pii_tasks" - if task_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES: + if task_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES: return "entity_linking_tasks" - if task_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT: + if task_type == AnalyzeActionsType.ANALYZE_SENTIMENT: return "sentiment_analysis_tasks" return "key_phrase_extraction_tasks" @@ -231,14 +231,14 @@ def _num_tasks_in_current_page(returned_tasks_object): def _get_task_type_from_error(error): if "pii" in error.target.lower(): - return AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + return AnalyzeActionsType.RECOGNIZE_PII_ENTITIES if "entityrecognition" in error.target.lower(): - return AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + return AnalyzeActionsType.RECOGNIZE_ENTITIES if "entitylinking" in error.target.lower(): - return AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES + return AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES if "sentiment" in error.target.lower(): - return AnalyzeBatchActionsType.ANALYZE_SENTIMENT - return AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES + return AnalyzeActionsType.ANALYZE_SENTIMENT + return AnalyzeActionsType.EXTRACT_KEY_PHRASES def _get_mapped_errors(analyze_job_state): """ @@ -260,7 +260,7 @@ def _get_good_result(current_task_type, index_of_task_result, doc_id_order, resp document_results = deserialization_callback( doc_id_order, response_task_to_deserialize.results, response_headers, lro=True ) - return AnalyzeBatchActionsResult( + return AnalyzeActionsResult( document_results=document_results, statistics=RequestStatistics._from_generated( # pylint: disable=protected-access response_task_to_deserialize.results.statistics @@ -284,7 +284,7 @@ def get_iter_items(doc_id_order, task_order, response_headers, analyze_job_state current_task_type_errors = mapped_errors[current_task_type] error = next(err for err in current_task_type_errors if err[0] == index_of_task_result) - result = AnalyzeBatchActionsError._from_generated(error[1]) # pylint: disable=protected-access + result = AnalyzeActionsError._from_generated(error[1]) # pylint: disable=protected-access except StopIteration: result = _get_good_result( current_task_type, index_of_task_result, doc_id_order, response_headers, returned_tasks_object diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py index 1e23b153d3a3..5479207f2f77 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_text_analytics_client.py @@ -36,7 +36,7 @@ _get_deserialize ) -from ._models import AnalyzeBatchActionsType +from ._models import AnalyzeActionsType from ._lro import ( TextAnalyticsOperationResourcePolling, @@ -62,7 +62,7 @@ ExtractKeyPhrasesAction, AnalyzeSentimentAction, AnalyzeHealthcareEntitiesResultItem, - AnalyzeBatchActionsResult, + AnalyzeActionsResult, ) @@ -742,12 +742,12 @@ def _analyze_result_callback(self, doc_id_order, task_order, raw_response, _, he ) @distributed_trace - def begin_analyze_batch_actions( # type: ignore + def begin_analyze_actions( # type: ignore self, documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] actions, # type: List[Union[RecognizeEntitiesAction, RecognizeLinkedEntitiesAction, RecognizePiiEntitiesAction, ExtractKeyPhrasesAction, AnalyzeSentimentAction]] # pylint: disable=line-too-long **kwargs # type: Any - ): # type: (...) -> LROPoller[ItemPaged[AnalyzeBatchActionsResult]] + ): # type: (...) -> LROPoller[ItemPaged[AnalyzeActionsResult]] """Start a long-running operation to perform a variety of text analysis actions over a batch of documents. :param documents: The set of documents to process as part of this batch. @@ -779,12 +779,12 @@ def begin_analyze_batch_actions( # type: ignore the actions were sent in this method. :rtype: ~azure.core.polling.LROPoller[~azure.core.paging.ItemPaged[ - ~azure.ai.textanalytics.AnalyzeBatchActionsResult]] + ~azure.ai.textanalytics.AnalyzeActionsResult]] :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError or NotImplementedError: .. admonition:: Example: - .. literalinclude:: ../samples/sample_analyze_batch_actions.py + .. literalinclude:: ../samples/sample_analyze_actions.py :start-after: [START analyze] :end-before: [END analyze] :language: python @@ -810,26 +810,26 @@ def begin_analyze_batch_actions( # type: ignore analyze_tasks = self._client.models(api_version='v3.1-preview.5').JobManifestTasks( entity_recognition_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.RECOGNIZE_ENTITIES] ], entity_recognition_pii_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES] ], key_phrase_extraction_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.EXTRACT_KEY_PHRASES] ], entity_linking_tasks=[ t.to_generated() for t in [ a for a in actions - if _determine_action_type(a) == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES + if _determine_action_type(a) == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES ] ], sentiment_analysis_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.ANALYZE_SENTIMENT] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.ANALYZE_SENTIMENT] ] ) analyze_body = self._client.models(api_version='v3.1-preview.5').AnalyzeBatchInput( @@ -855,7 +855,7 @@ def begin_analyze_batch_actions( # type: ignore except ValueError as error: if "API version v3.0 does not have operation 'begin_analyze'" in str(error): raise ValueError( - "'begin_analyze_batch_actions' endpoint is only available for API version V3_1_PREVIEW and up" + "'begin_analyze_actions' endpoint is only available for API version V3_1_PREVIEW and up" ) raise error diff --git a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py index 209a8fefb266..10953341bb4b 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/aio/_text_analytics_client_async.py @@ -43,8 +43,8 @@ RecognizeEntitiesAction, RecognizePiiEntitiesAction, ExtractKeyPhrasesAction, - AnalyzeBatchActionsResult, - AnalyzeBatchActionsType, + AnalyzeActionsResult, + AnalyzeActionsType, RecognizeLinkedEntitiesAction, AnalyzeSentimentAction ) @@ -726,12 +726,12 @@ def _analyze_result_callback(self, doc_id_order, task_order, raw_response, _, he ) @distributed_trace_async - async def begin_analyze_batch_actions( # type: ignore + async def begin_analyze_actions( # type: ignore self, documents, # type: Union[List[str], List[TextDocumentInput], List[Dict[str, str]]] actions, # type: List[Union[RecognizeEntitiesAction, RecognizeLinkedEntitiesAction, RecognizePiiEntitiesAction, ExtractKeyPhrasesAction, AnalyzeSentimentAction]] # pylint: disable=line-too-long **kwargs # type: Any - ): # type: (...) -> AsyncLROPoller[AsyncItemPaged[AnalyzeBatchActionsResult]] + ): # type: (...) -> AsyncLROPoller[AsyncItemPaged[AnalyzeActionsResult]] """Start a long-running operation to perform a variety of text analysis actions over a batch of documents. :param documents: The set of documents to process as part of this batch. @@ -763,17 +763,17 @@ async def begin_analyze_batch_actions( # type: ignore the actions were sent in this method. :rtype: ~azure.core.polling.AsyncLROPoller[~azure.core.async_paging.AsyncItemPaged[ - ~azure.ai.textanalytics.AnalyzeBatchActionsResult]] + ~azure.ai.textanalytics.AnalyzeActionsResult]] :raises ~azure.core.exceptions.HttpResponseError or TypeError or ValueError or NotImplementedError: .. admonition:: Example: - .. literalinclude:: ../samples/async_samples/sample_analyze_batch_actions_async.py + .. literalinclude:: ../samples/async_samples/sample_analyze_actions_async.py :start-after: [START analyze_async] :end-before: [END analyze_async] :language: python :dedent: 8 - :caption: Start a long-running operation to perform a variety of text analysis tasks over + :caption: Start a long-running operation to perform a variety of text analysis actions over a batch of documents. """ @@ -794,26 +794,26 @@ async def begin_analyze_batch_actions( # type: ignore analyze_tasks = self._client.models(api_version='v3.1-preview.5').JobManifestTasks( entity_recognition_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.RECOGNIZE_ENTITIES] ], entity_recognition_pii_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES] ], key_phrase_extraction_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.EXTRACT_KEY_PHRASES] ], entity_linking_tasks=[ t.to_generated() for t in [ a for a in actions if \ - _determine_action_type(a) == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES + _determine_action_type(a) == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES ] ], sentiment_analysis_tasks=[ t.to_generated() for t in - [a for a in actions if _determine_action_type(a) == AnalyzeBatchActionsType.ANALYZE_SENTIMENT] + [a for a in actions if _determine_action_type(a) == AnalyzeActionsType.ANALYZE_SENTIMENT] ] ) analyze_body = self._client.models(api_version='v3.1-preview.5').AnalyzeBatchInput( @@ -839,7 +839,7 @@ async def begin_analyze_batch_actions( # type: ignore except ValueError as error: if "API version v3.0 does not have operation 'begin_analyze'" in str(error): raise ValueError( - "'begin_analyze_batch_actions' endpoint is only available for API version V3_1_PREVIEW and up" + "'begin_analyze_actions' endpoint is only available for API version V3_1_PREVIEW and up" ) raise error diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/README.md b/sdk/textanalytics/azure-ai-textanalytics/samples/README.md index 5746a4ff7e55..dc77ca5f6127 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/README.md +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/README.md @@ -29,7 +29,7 @@ These sample programs show common scenarios for the Text Analytics client's offe |[sample_analyze_sentiment.py][analyze_sentiment] and [sample_analyze_sentiment_async.py][analyze_sentiment_async]|Analyze the sentiment of documents| |[sample_alternative_document_input.py][sample_alternative_document_input] and [sample_alternative_document_input_async.py][sample_alternative_document_input_async]|Pass documents to an endpoint using dicts| |[sample_analyze_healthcare_entities.py][analyze_healthcare_entities_sample] and [sample_analyze_healthcare_entities_async.py][analyze_healthcare_entities_sample_async]|Analyze healthcare entities| -|[sample_analyze_batch_actions.py][analyze_sample] and [sample_analyze_batch_actions_async.py][analyze_sample_async]|Batch multiple analyses together in a single request| +|[sample_analyze_actions.py][analyze_sample] and [sample_analyze_actions_async.py][analyze_sample_async]|Run multiple analyses together in a single request| ## Prerequisites * Python 2.7, or 3.5 or later is required to use this package (3.5 or later if using asyncio) @@ -93,8 +93,8 @@ what you can do with the Azure Text Analytics client library. [sample_analyze_sentiment_with_opinion_mining_async]: https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_sentiment_with_opinion_mining_async.py [analyze_healthcare_entities_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_healthcare_entities.py [analyze_healthcare_entities_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_healthcare_entities_async.py -[analyze_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_batch_actions.py -[analyze_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_batch_actions_async.py +[analyze_sample]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_actions.py +[analyze_sample_async]: https://github.com/Azure/azure-sdk-for-python/blob/master/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_actions_async.py [pip]: https://pypi.org/project/pip/ [azure_subscription]: https://azure.microsoft.com/free/ [azure_text_analytics_account]: https://docs.microsoft.com/azure/cognitive-services/cognitive-services-apis-create-account?tabs=singleservice%2Cwindows diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_batch_actions_async.py b/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_actions_async.py similarity index 91% rename from sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_batch_actions_async.py rename to sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_actions_async.py index ad7c551ad92b..d8894f025c51 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_batch_actions_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/async_samples/sample_analyze_actions_async.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- """ -FILE: sample_analyze_batch_actions_async.py +FILE: sample_analyze_actions_async.py DESCRIPTION: This sample demonstrates how to submit a collection of text documents for analysis, which consists of a variety @@ -16,7 +16,7 @@ actions specified in the request. USAGE: - python sample_analyze_batch_actions_async.py + python sample_analyze_actions_async.py Set the environment variables with your own values before running the sample: 1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource. @@ -39,7 +39,7 @@ async def analyze_async(self): RecognizePiiEntitiesAction, ExtractKeyPhrasesAction, AnalyzeSentimentAction, - AnalyzeBatchActionsType + AnalyzeActionsType ) endpoint = os.environ["AZURE_TEXT_ANALYTICS_ENDPOINT"] @@ -60,7 +60,7 @@ async def analyze_async(self): ] async with text_analytics_client: - poller = await text_analytics_client.begin_analyze_batch_actions( + poller = await text_analytics_client.begin_analyze_actions( documents, display_name="Sample Text Analysis", actions=[ @@ -81,7 +81,7 @@ async def analyze_async(self): action_result.error.message ) ) - if action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES: + if action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES: print("Results of Entities Recognition action:") for idx, doc in enumerate(action_result.document_results): print("\nDocument text: {}".format(documents[idx])) @@ -92,7 +92,7 @@ async def analyze_async(self): print("...Offset: {}".format(entity.offset)) print("------------------------------------------") - if action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES: + if action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES: print("Results of PII Entities Recognition action:") for idx, doc in enumerate(action_result.document_results): print("Document text: {}".format(documents[idx])) @@ -102,14 +102,14 @@ async def analyze_async(self): print("Confidence Score: {}\n".format(entity.confidence_score)) print("------------------------------------------") - if action_result.action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES: + if action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES: print("Results of Key Phrase Extraction action:") for idx, doc in enumerate(action_result.document_results): print("Document text: {}\n".format(documents[idx])) print("Key Phrases: {}\n".format(doc.key_phrases)) print("------------------------------------------") - if action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES: + if action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES: print("Results of Linked Entities Recognition action:") for idx, doc in enumerate(action_result.document_results): print("Document text: {}\n".format(documents[idx])) @@ -127,7 +127,7 @@ async def analyze_async(self): print(".........Length: {}".format(match.length)) print("------------------------------------------") - if action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT: + if action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT: print("Results of Sentiment Analysis action:") for doc in action_result.document_results: print("Overall sentiment: {}".format(doc.sentiment)) diff --git a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_batch_actions.py b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_actions.py similarity index 97% rename from sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_batch_actions.py rename to sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_actions.py index 5639557f832e..1ffc0be170b9 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_batch_actions.py +++ b/sdk/textanalytics/azure-ai-textanalytics/samples/sample_analyze_actions.py @@ -7,7 +7,7 @@ # -------------------------------------------------------------------------- """ -FILE: sample_analyze_batch_actions.py +FILE: sample_analyze_actions.py DESCRIPTION: This sample demonstrates how to submit a collection of text documents for analysis, which consists of a variety @@ -16,7 +16,7 @@ actions specified in the request. USAGE: - python sample_analyze_batch_actions.py + python sample_analyze_actions.py Set the environment variables with your own values before running the sample: 1) AZURE_TEXT_ANALYTICS_ENDPOINT - the endpoint to your Cognitive Services resource. @@ -59,7 +59,7 @@ def analyze(self): The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!" ] - poller = text_analytics_client.begin_analyze_batch_actions( + poller = text_analytics_client.begin_analyze_actions( documents, display_name="Sample Text Analysis", actions=[ diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py index 01f4a67f88b4..1e22d815418e 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py @@ -25,7 +25,7 @@ TextDocumentInput, VERSION, TextAnalyticsApiVersion, - AnalyzeBatchActionsType, + AnalyzeActionsType, ) # pre-apply the client_cls positional argument so it needn't be explicitly passed below @@ -41,7 +41,7 @@ def _interval(self): @TextAnalyticsClientPreparer() def test_no_single_input(self, client): with self.assertRaises(TypeError): - response = client.begin_analyze_batch_actions("hello world", actions=[], polling_interval=self._interval()) + response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval()) @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() @@ -49,7 +49,7 @@ def test_all_successful_passing_dict_key_phrase_task(self, client): docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"}, {"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[ExtractKeyPhrasesAction()], show_stats=True, @@ -61,7 +61,7 @@ def test_all_successful_passing_dict_key_phrase_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES + assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES assert len(action_result.document_results) == len(docs) for doc in action_result.document_results: @@ -77,7 +77,7 @@ def test_all_successful_passing_dict_sentiment_task(self, client): {"id": "2", "language": "en", "text": "I did not like the hotel we stayed at. It was too expensive."}, {"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[AnalyzeSentimentAction()], show_stats=True, @@ -89,7 +89,7 @@ def test_all_successful_passing_dict_sentiment_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT assert len(action_result.document_results) == len(docs) self.assertEqual(action_result.document_results[0].sentiment, "neutral") @@ -120,7 +120,7 @@ def test_sentiment_analysis_task_with_opinion_mining(self, client): "The food and service is not good" ] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( documents, actions=[AnalyzeSentimentAction(show_opinion_mining=True)], show_stats=True, @@ -132,7 +132,7 @@ def test_sentiment_analysis_task_with_opinion_mining(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT assert len(action_result.document_results) == len(documents) for idx, doc in enumerate(action_result.document_results): @@ -198,7 +198,7 @@ def test_all_successful_passing_text_document_input_entities_task(self, client): TextDocumentInput(id="3", text="Microsoft wurde am 4. April 1975 von Bill Gates und Paul Allen gegründet.", language="de"), ] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[RecognizeEntitiesAction()], show_stats=True, @@ -210,7 +210,7 @@ def test_all_successful_passing_text_document_input_entities_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES assert len(action_result.document_results) == len(docs) for doc in action_result.document_results: @@ -231,7 +231,7 @@ def test_all_successful_passing_string_pii_entities_task(self, client): "Is 998.214.865-68 your Brazilian CPF number?" ] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[RecognizePiiEntitiesAction()], show_stats=True, @@ -243,7 +243,7 @@ def test_all_successful_passing_string_pii_entities_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES assert len(action_result.document_results) == len(docs) self.assertEqual(action_result.document_results[0].entities[0].text, "859-98-0987") @@ -268,7 +268,7 @@ def test_bad_request_on_empty_document(self, client): docs = [u""] with self.assertRaises(HttpResponseError): - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[ExtractKeyPhrasesAction()], polling_interval=self._interval(), @@ -280,7 +280,7 @@ def test_bad_request_on_empty_document(self, client): }) def test_empty_credential_class(self, client): with self.assertRaises(ClientAuthenticationError): - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( ["This is written in English."], actions=[ RecognizeEntitiesAction(), @@ -298,7 +298,7 @@ def test_empty_credential_class(self, client): }) def test_bad_credentials(self, client): with self.assertRaises(ClientAuthenticationError): - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( ["This is written in English."], actions=[ RecognizeEntitiesAction(), @@ -320,7 +320,7 @@ def test_out_of_order_ids_multiple_tasks(self, client): {"id": "19", "text": ":P"}, {"id": "1", "text": ":D"}] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="bad"), @@ -336,8 +336,8 @@ def test_out_of_order_ids_multiple_tasks(self, client): assert len(action_results) == 3 assert action_results[0].is_error - assert action_results[1].action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES - assert action_results[2].action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES + assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES action_results = [r for r in action_results if not r.is_error] assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)]) @@ -361,7 +361,7 @@ def callback(resp): {"id": "19", "text": ":P"}, {"id": "1", "text": ":D"}] - poller = client.begin_analyze_batch_actions( + poller = client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="latest"), @@ -379,11 +379,11 @@ def callback(resp): action_results = list(response) assert len(action_results) == 5 - assert action_results[0].action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES - assert action_results[1].action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES - assert action_results[2].action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES - assert action_results[3].action_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES - assert action_results[4].action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES + assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES + assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES + assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES + assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)]) @@ -397,7 +397,7 @@ def callback(resp): def test_poller_metadata(self, client): docs = [{"id": "56", "text": ":)"}] - poller = client.begin_analyze_batch_actions( + poller = client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="latest") @@ -435,7 +435,7 @@ def test_poller_metadata(self, client): # u"The restaurant was not as good as I hoped." # ] - # response = list(client.begin_analyze_batch_actions( + # response = list(client.begin_analyze_actions( # docs, # actions=[ # RecognizeEntitiesAction(), @@ -472,7 +472,7 @@ def test_poller_metadata(self, client): # TextDocumentInput(id="3", text="猫は幸せ"), # ] - # response = list(client.begin_analyze_batch_actions( + # response = list(client.begin_analyze_actions( # docs, # actions=[ # RecognizeEntitiesAction(), @@ -489,7 +489,7 @@ def test_poller_metadata(self, client): @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() def test_invalid_language_hint_method(self, client): - response = list(client.begin_analyze_batch_actions( + response = list(client.begin_analyze_actions( ["This should fail because we're passing in an invalid language hint"], language="notalanguage", actions=[ @@ -513,7 +513,7 @@ def test_invalid_language_hint_method(self, client): def test_bad_model_version_error_multiple_tasks(self, client): # TODO: verify behavior of service docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}] - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="latest"), @@ -527,7 +527,7 @@ def test_bad_model_version_error_multiple_tasks(self, client): # TODO: verify b action_results = list(response) assert action_results[0].is_error == False - assert action_results[0].action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES assert action_results[1].is_error == True assert action_results[1].error.code == "InvalidRequest" assert action_results[2].is_error == True @@ -543,7 +543,7 @@ def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavi docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}] with self.assertRaises(HttpResponseError): - response = client.begin_analyze_batch_actions( + response = client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="bad"), @@ -560,7 +560,7 @@ def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavi def test_missing_input_records_error(self, client): docs = [] with pytest.raises(ValueError) as excinfo: - client.begin_analyze_batch_actions( + client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(), @@ -577,7 +577,7 @@ def test_missing_input_records_error(self, client): @TextAnalyticsClientPreparer() def test_passing_none_docs(self, client): with pytest.raises(ValueError) as excinfo: - client.begin_analyze_batch_actions(None, None) + client.begin_analyze_actions(None, None) assert "Input documents can not be empty or None" in str(excinfo.value) @GlobalTextAnalyticsAccountPreparer() @@ -585,7 +585,7 @@ def test_passing_none_docs(self, client): def test_pass_cls(self, client): def callback(pipeline_response, deserialized, _): return "cls result" - res = client.begin_analyze_batch_actions( + res = client.begin_analyze_actions( documents=["Test passing cls to endpoint"], actions=[ RecognizeEntitiesAction(), @@ -601,7 +601,7 @@ def test_multiple_pages_of_results_returned_successfully(self, client): single_doc = "hello world" docs = [{"id": str(idx), "text": val} for (idx, val) in enumerate(list(itertools.repeat(single_doc, 25)))] # max number of documents is 25 - result = client.begin_analyze_batch_actions( + result = client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(), @@ -625,19 +625,19 @@ def test_multiple_pages_of_results_returned_successfully(self, client): # do 2 pages of 5 task results for idx, action_result in enumerate(action_results): if idx % 5 == 0: - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES recognize_entities_results.append(action_result) elif idx % 5 == 1: - assert action_result.action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES + assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES extract_key_phrases_results.append(action_result) elif idx % 5 == 2: - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES recognize_pii_entities_results.append(action_result) elif idx % 5 == 3: - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES recognize_linked_entities_results.append(action_result) else: - assert action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT analyze_sentiment_results.append(action_result) if idx < 5: # first page of task results assert len(action_result.document_results) == 20 @@ -656,7 +656,7 @@ def test_too_many_documents(self, client): docs = list(itertools.repeat("input document", 26)) # Maximum number of documents per request is 25 with pytest.raises(HttpResponseError) as excinfo: - client.begin_analyze_batch_actions( + client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(), diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_async.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_async.py index 9db031370843..d46298c172b3 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_async.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze_async.py @@ -27,7 +27,7 @@ RecognizePiiEntitiesAction, ExtractKeyPhrasesAction, AnalyzeSentimentAction, - AnalyzeBatchActionsType + AnalyzeActionsType ) # pre-apply the client_cls positional argument so it needn't be explicitly passed below @@ -55,7 +55,7 @@ def _interval(self): @TextAnalyticsClientPreparer() async def test_no_single_input(self, client): with self.assertRaises(TypeError): - response = await client.begin_analyze_batch_actions("hello world", actions=[], polling_interval=self._interval()) + response = await client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval()) @GlobalTextAnalyticsAccountPreparer() @TextAnalyticsClientPreparer() @@ -64,7 +64,7 @@ async def test_all_successful_passing_dict_key_phrase_task(self, client): {"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}] async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( docs, actions=[ExtractKeyPhrasesAction()], show_stats=True, @@ -77,7 +77,7 @@ async def test_all_successful_passing_dict_key_phrase_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES + assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES assert len(action_result.document_results) == len(docs) for doc in action_result.document_results: @@ -94,7 +94,7 @@ async def test_all_successful_passing_dict_sentiment_task(self, client): {"id": "3", "language": "en", "text": "The restaurant had really good food. I recommend you try it."}] async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( docs, actions=[AnalyzeSentimentAction()], show_stats=True, @@ -108,7 +108,7 @@ async def test_all_successful_passing_dict_sentiment_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT assert len(action_result.document_results) == len(docs) self.assertEqual(action_result.document_results[0].sentiment, "neutral") @@ -139,7 +139,7 @@ async def test_sentiment_analysis_task_with_opinion_mining(self, client): ] async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( documents, actions=[AnalyzeSentimentAction(show_opinion_mining=True)], show_stats=True, @@ -153,7 +153,7 @@ async def test_sentiment_analysis_task_with_opinion_mining(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT assert len(action_result.document_results) == len(documents) for idx, doc in enumerate(action_result.document_results): @@ -220,7 +220,7 @@ async def test_all_successful_passing_text_document_input_entities_task(self, cl ] async with client: - poller = await client.begin_analyze_batch_actions( + poller = await client.begin_analyze_actions( docs, actions=[RecognizeEntitiesAction()], show_stats=True, @@ -234,7 +234,7 @@ async def test_all_successful_passing_text_document_input_entities_task(self, cl assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES assert len(action_result.document_results) == len(docs) for doc in action_result.document_results: @@ -256,7 +256,7 @@ async def test_all_successful_passing_string_pii_entities_task(self, client): ] async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( docs, actions=[RecognizePiiEntitiesAction()], show_stats=True, @@ -269,7 +269,7 @@ async def test_all_successful_passing_string_pii_entities_task(self, client): assert len(action_results) == 1 action_result = action_results[0] - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES assert len(action_result.document_results) == len(docs) self.assertEqual(action_result.document_results[0].entities[0].text, "859-98-0987") @@ -295,7 +295,7 @@ async def test_bad_request_on_empty_document(self, client): with self.assertRaises(HttpResponseError): async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( docs, actions=[ExtractKeyPhrasesAction()], polling_interval=self._interval() @@ -308,7 +308,7 @@ async def test_bad_request_on_empty_document(self, client): async def test_empty_credential_class(self, client): with self.assertRaises(ClientAuthenticationError): async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( ["This is written in English."], actions=[ RecognizeEntitiesAction(), @@ -327,7 +327,7 @@ async def test_empty_credential_class(self, client): async def test_bad_credentials(self, client): with self.assertRaises(ClientAuthenticationError): async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( ["This is written in English."], actions=[ RecognizeEntitiesAction(), @@ -350,7 +350,7 @@ async def test_out_of_order_ids_multiple_tasks(self, client): {"id": "1", "text": ":D"}] async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="bad"), @@ -368,8 +368,8 @@ async def test_out_of_order_ids_multiple_tasks(self, client): assert len(action_results) == 3 assert action_results[0].is_error - assert action_results[1].action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES - assert action_results[2].action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES + assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES action_results = [r for r in action_results if not r.is_error] @@ -390,7 +390,7 @@ async def test_show_stats_and_model_version_multiple_tasks(self, client): {"id": "1", "text": ":D"}] async with client: - response = await (await client.begin_analyze_batch_actions( + response = await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="latest"), @@ -407,11 +407,11 @@ async def test_show_stats_and_model_version_multiple_tasks(self, client): async for p in response: action_results.append(p) assert len(action_results) == 5 - assert action_results[0].action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES - assert action_results[1].action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES - assert action_results[2].action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES - assert action_results[3].action_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES - assert action_results[4].action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES + assert action_results[1].action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES + assert action_results[2].action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES + assert action_results[3].action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES + assert action_results[4].action_type == AnalyzeActionsType.ANALYZE_SENTIMENT assert all([action_result for action_result in action_results if len(action_result.document_results) == len(docs)]) @@ -426,7 +426,7 @@ async def test_poller_metadata(self, client): docs = [{"id": "56", "text": ":)"}] async with client: - poller = await client.begin_analyze_batch_actions( + poller = await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="latest") @@ -465,7 +465,7 @@ async def test_poller_metadata(self, client): # ] # async with client: - # response = await (await client.begin_analyze_batch_actions( + # response = await (await client.begin_analyze_actions( # docs, # actions=[ # RecognizeEntitiesAction(), @@ -503,7 +503,7 @@ async def test_poller_metadata(self, client): # ] # async with client: - # response = await (await client.begin_analyze_batch_actions( + # response = await (await client.begin_analyze_actions( # docs, # actions=[ # RecognizeEntitiesAction(), @@ -522,7 +522,7 @@ async def test_poller_metadata(self, client): # @TextAnalyticsClientPreparer() # async def test_invalid_language_hint_method(self, client): # async with client: - # response = await (await client.begin_analyze_batch_actions( + # response = await (await client.begin_analyze_actions( # ["This should fail because we're passing in an invalid language hint"], # language="notalanguage", # actions=[ @@ -546,7 +546,7 @@ async def test_bad_model_version_error_multiple_tasks(self, client): # TODO: ve async with client: response = await (await - client.begin_analyze_batch_actions( + client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="latest"), @@ -563,7 +563,7 @@ async def test_bad_model_version_error_multiple_tasks(self, client): # TODO: ve action_results.append(p) assert action_results[0].is_error == False - assert action_results[0].action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + assert action_results[0].action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES assert action_results[1].is_error == True assert action_results[1].error.code == "InvalidRequest" assert action_results[2].is_error == True @@ -580,7 +580,7 @@ async def test_bad_model_version_error_all_tasks(self, client): # TODO: verify with self.assertRaises(HttpResponseError): async with client: - result = await (await client.begin_analyze_batch_actions( + result = await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="bad"), @@ -598,7 +598,7 @@ async def test_missing_input_records_error(self, client): docs = [] with pytest.raises(ValueError) as excinfo: async with client: - await (await client.begin_analyze_batch_actions( + await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(), @@ -616,7 +616,7 @@ async def test_missing_input_records_error(self, client): async def test_passing_none_docs(self, client): with pytest.raises(ValueError) as excinfo: async with client: - await client.begin_analyze_batch_actions(None, None, polling_interval=self._interval()) + await client.begin_analyze_actions(None, None, polling_interval=self._interval()) assert "Input documents can not be empty or None" in str(excinfo.value) @GlobalTextAnalyticsAccountPreparer() @@ -626,7 +626,7 @@ def callback(pipeline_response, deserialized, _): return "cls result" async with client: - res = await (await client.begin_analyze_batch_actions( + res = await (await client.begin_analyze_actions( documents=["Test passing cls to endpoint"], actions=[ RecognizeEntitiesAction(), @@ -644,7 +644,7 @@ async def test_multiple_pages_of_results_returned_successfully(self, client): enumerate(list(itertools.repeat(single_doc, 25)))] # max number of documents is 25 async with client: - result = await (await client.begin_analyze_batch_actions( + result = await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(), @@ -669,19 +669,19 @@ async def test_multiple_pages_of_results_returned_successfully(self, client): for idx, action_result in enumerate(pages): if idx % 5 == 0: - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_ENTITIES recognize_entities_results.append(action_result) elif idx % 5 == 1: - assert action_result.action_type == AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES + assert action_result.action_type == AnalyzeActionsType.EXTRACT_KEY_PHRASES extract_key_phrases_results.append(action_result) elif idx % 5 == 2: - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_PII_ENTITIES recognize_pii_entities_results.append(action_result) elif idx % 5 == 3: - assert action_result.action_type == AnalyzeBatchActionsType.RECOGNIZE_LINKED_ENTITIES + assert action_result.action_type == AnalyzeActionsType.RECOGNIZE_LINKED_ENTITIES recognize_linked_entities_results.append(action_result) else: - assert action_result.action_type == AnalyzeBatchActionsType.ANALYZE_SENTIMENT + assert action_result.action_type == AnalyzeActionsType.ANALYZE_SENTIMENT analyze_sentiment_results.append(action_result) if idx < 5: # first page of task results assert len(action_result.document_results) == 20 @@ -704,7 +704,7 @@ async def test_multiple_pages_of_results_with_errors_returned_successfully(self, enumerate(list(itertools.repeat(single_doc, 25)))] # max number of documents is 25 async with client: - result = await (await client.begin_analyze_batch_actions( + result = await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(model_version="bad"), @@ -733,7 +733,7 @@ async def test_too_many_documents(self, client): with pytest.raises(HttpResponseError) as excinfo: async with client: - await (await client.begin_analyze_batch_actions( + await (await client.begin_analyze_actions( docs, actions=[ RecognizeEntitiesAction(), diff --git a/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py b/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py index e71a93e0c789..3a0843fd6f22 100644 --- a/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py +++ b/sdk/textanalytics/azure-ai-textanalytics/tests/test_repr.py @@ -455,51 +455,51 @@ def test_inner_error_takes_precedence(self): assert error.code == "UnsupportedLanguageCode" assert error.message == "Supplied language not supported. Pass in one of: de,en,es,fr,it,ja,ko,nl,pt-PT,zh-Hans,zh-Hant" - def test_analyze_batch_actions_result_recognize_entities(self, recognize_entities_result, request_statistics): - model = _models.AnalyzeBatchActionsResult( + def test_analyze_actions_result_recognize_entities(self, recognize_entities_result, request_statistics): + model = _models.AnalyzeActionsResult( document_results=[recognize_entities_result[0]], statistics=request_statistics[0], is_error=False, - action_type=_models.AnalyzeBatchActionsType.RECOGNIZE_ENTITIES, + action_type=_models.AnalyzeActionsType.RECOGNIZE_ENTITIES, completed_on=datetime.datetime(1, 1, 1) ) model_repr = ( - "AnalyzeBatchActionsResult(document_results=[{}], is_error={}, action_type={}, completed_on={}, statistics={})".format( + "AnalyzeActionsResult(document_results=[{}], is_error={}, action_type={}, completed_on={}, statistics={})".format( recognize_entities_result[1], False, "recognize_entities", datetime.datetime(1, 1, 1), request_statistics[1] ) ) assert repr(model) == model_repr - def test_analyze_batch_actions_result_recognize_pii_entities(self, recognize_pii_entities_result, request_statistics): - model = _models.AnalyzeBatchActionsResult( + def test_analyze_actions_result_recognize_pii_entities(self, recognize_pii_entities_result, request_statistics): + model = _models.AnalyzeActionsResult( document_results=[recognize_pii_entities_result[0]], statistics=request_statistics[0], is_error=False, - action_type=_models.AnalyzeBatchActionsType.RECOGNIZE_PII_ENTITIES, + action_type=_models.AnalyzeActionsType.RECOGNIZE_PII_ENTITIES, completed_on=datetime.datetime(1, 1, 1) ) model_repr = ( - "AnalyzeBatchActionsResult(document_results=[{}], is_error={}, action_type={}, completed_on={}, statistics={})".format( + "AnalyzeActionsResult(document_results=[{}], is_error={}, action_type={}, completed_on={}, statistics={})".format( recognize_pii_entities_result[1], False, "recognize_pii_entities", datetime.datetime(1, 1, 1), request_statistics[1] ) ) assert repr(model) == model_repr - def test_analyze_batch_actions_result_extract_key_phrases(self, extract_key_phrases_result, request_statistics): - model = _models.AnalyzeBatchActionsResult( + def test_analyze_actions_result_extract_key_phrases(self, extract_key_phrases_result, request_statistics): + model = _models.AnalyzeActionsResult( document_results=[extract_key_phrases_result[0]], statistics=request_statistics[0], is_error=False, - action_type=_models.AnalyzeBatchActionsType.EXTRACT_KEY_PHRASES, + action_type=_models.AnalyzeActionsType.EXTRACT_KEY_PHRASES, completed_on=datetime.datetime(1, 1, 1) ) model_repr = ( - "AnalyzeBatchActionsResult(document_results=[{}], is_error={}, action_type={}, completed_on={}, statistics={})".format( + "AnalyzeActionsResult(document_results=[{}], is_error={}, action_type={}, completed_on={}, statistics={})".format( extract_key_phrases_result[1], False, "extract_key_phrases", datetime.datetime(1, 1, 1), request_statistics[1] ) )