Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[textanalytics] add ARM template + run samples in CI #19270

Merged
merged 5 commits into from
Jun 16, 2021
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
update tests
  • Loading branch information
kristapratico committed Jun 15, 2021
commit 0531384126298415a0804a482907897029e36d69
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
# license information.
# --------------------------------------------------------------------------
import asyncio
import os
import functools
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
from azure.core.credentials import AccessToken
Expand All @@ -29,9 +30,9 @@ def generate_oauth_token(self):
if self.is_live:
from azure.identity.aio import ClientSecretCredential
return ClientSecretCredential(
self.get_settings_value("TENANT_ID"),
self.get_settings_value("CLIENT_ID"),
self.get_settings_value("CLIENT_SECRET"),
os.getenv("TEXTANALYTICS_TENANT_ID"),
os.getenv("TEXTANALYTICS_CLIENT_ID"),
os.getenv("TEXTANALYTICS_CLIENT_SECRET"),
)
return self.generate_fake_token()

Expand Down
9 changes: 0 additions & 9 deletions sdk/textanalytics/azure-ai-textanalytics/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,7 @@

import sys

# fixture needs to be visible from conftest
from testcase import text_analytics_account

# Ignore async tests for Python < 3.5
collect_ignore_glob = []
if sys.version_info < (3, 5):
collect_ignore_glob.append("*_async.py")

def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"usefixtures", "text_analytics_account"
)
Original file line number Diff line number Diff line change
Expand Up @@ -15,68 +15,90 @@ interactions:
Content-Type:
- application/json
User-Agent:
- azsdk-python-ai-textanalytics/5.1.0b7 Python/3.9.1 (macOS-10.16-x86_64-i386-64bit)
- azsdk-python-ai-textanalytics/5.1.0b8 Python/3.9.0 (Windows-10-10.0.19041-SP0)
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this test was missing an await so I rerecorded it.

method: POST
uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs?stringIndexType=UnicodeCodePoint
response:
body:
string: ''
headers:
apim-request-id: 244f5672-aebe-4342-b987-b785b606231d
date: Tue, 18 May 2021 17:47:30 GMT
operation-location: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/53de7ea9-2780-4872-b4ce-6662ddbb726b
apim-request-id: c9489a83-1996-4e2b-974b-3d4650fa725d
date: Tue, 15 Jun 2021 20:39:45 GMT
operation-location: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
strict-transport-security: max-age=31536000; includeSubDomains; preload
transfer-encoding: chunked
x-content-type-options: nosniff
x-envoy-upstream-service-time: '291'
x-envoy-upstream-service-time: '297'
status:
code: 202
message: Accepted
url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.5/entities/health/jobs?stringIndexType=UnicodeCodePoint
url: https://textanalyticskrpratictextanalytics.cognitiveservices.azure.com//text/analytics/v3.1-preview.5/entities/health/jobs?stringIndexType=UnicodeCodePoint
- request:
body: null
headers:
User-Agent:
- azsdk-python-ai-textanalytics/5.1.0b7 Python/3.9.1 (macOS-10.16-x86_64-i386-64bit)
- azsdk-python-ai-textanalytics/5.1.0b8 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/53de7ea9-2780-4872-b4ce-6662ddbb726b
uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
response:
body:
string: '{"jobId":"53de7ea9-2780-4872-b4ce-6662ddbb726b","lastUpdateDateTime":"2021-05-18T17:47:31Z","createdDateTime":"2021-05-18T17:47:30Z","expirationDateTime":"2021-05-19T17:47:30Z","status":"notStarted","errors":[]}'
string: '{"jobId":"6a725b61-4d45-4f1a-95c7-52c734eb73f5","lastUpdateDateTime":"2021-06-15T20:39:45Z","createdDateTime":"2021-06-15T20:39:45Z","expirationDateTime":"2021-06-16T20:39:45Z","status":"notStarted","errors":[]}'
headers:
apim-request-id: 09bb3595-4bb9-489d-9b23-d828ef9f7805
apim-request-id: 49e4efce-0513-476d-ae96-50cce51a4c58
content-type: application/json; charset=utf-8
date: Tue, 18 May 2021 17:47:31 GMT
date: Tue, 15 Jun 2021 20:39:45 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
transfer-encoding: chunked
x-content-type-options: nosniff
x-envoy-upstream-service-time: '19'
x-envoy-upstream-service-time: '6'
status:
code: 200
message: OK
url: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/53de7ea9-2780-4872-b4ce-6662ddbb726b
url: https://textanalyticskrpratictextanalytics.cognitiveservices.azure.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
- request:
body: null
headers:
Accept:
- application/json, text/json
User-Agent:
- azsdk-python-ai-textanalytics/5.1.0b7 Python/3.9.1 (macOS-10.16-x86_64-i386-64bit)
- azsdk-python-ai-textanalytics/5.1.0b8 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: DELETE
uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/53de7ea9-2780-4872-b4ce-6662ddbb726b
uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
response:
body:
string: ''
headers:
apim-request-id: 201eb5d5-2da2-4193-9369-af6a3f9594c5
date: Tue, 18 May 2021 17:47:31 GMT
operation-location: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/53de7ea9-2780-4872-b4ce-6662ddbb726b
apim-request-id: 0df934d8-210c-4f46-8c20-ff11919686d4
date: Tue, 15 Jun 2021 20:39:45 GMT
operation-location: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
strict-transport-security: max-age=31536000; includeSubDomains; preload
transfer-encoding: chunked
x-content-type-options: nosniff
x-envoy-upstream-service-time: '19'
x-envoy-upstream-service-time: '20'
status:
code: 202
message: Accepted
url: https://westus2.api.cognitive.microsoft.com//text/analytics/v3.1-preview.5/entities/health/jobs/53de7ea9-2780-4872-b4ce-6662ddbb726b
url: https://textanalyticskrpratictextanalytics.cognitiveservices.azure.com//text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
- request:
body: null
headers:
User-Agent:
- azsdk-python-ai-textanalytics/5.1.0b8 Python/3.9.0 (Windows-10-10.0.19041-SP0)
method: GET
uri: https://westus2.api.cognitive.microsoft.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
response:
body:
string: '{"jobId":"6a725b61-4d45-4f1a-95c7-52c734eb73f5","lastUpdateDateTime":"2021-06-15T20:39:45Z","createdDateTime":"2021-06-15T20:39:45Z","expirationDateTime":"2021-06-16T20:39:45Z","status":"cancelled","errors":[]}'
headers:
apim-request-id: 095eef4b-c8fe-44dc-946c-c01510a079e4
content-type: application/json; charset=utf-8
date: Tue, 15 Jun 2021 20:39:49 GMT
strict-transport-security: max-age=31536000; includeSubDomains; preload
transfer-encoding: chunked
x-content-type-options: nosniff
x-envoy-upstream-service-time: '7'
status:
code: 200
message: OK
url: https://textanalyticskrpratictextanalytics.cognitiveservices.azure.com/text/analytics/v3.1-preview.5/entities/health/jobs/6a725b61-4d45-4f1a-95c7-52c734eb73f5
version: 1
48 changes: 24 additions & 24 deletions sdk/textanalytics/azure-ai-textanalytics/tests/test_analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@

from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import TextAnalyticsTest, GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsTest, TextAnalyticsPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from azure.ai.textanalytics import (
TextAnalyticsClient,
Expand Down Expand Up @@ -44,13 +44,13 @@ class TestAnalyze(TextAnalyticsTest):
def _interval(self):
return 5 if self.is_live else 0

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = client.begin_analyze_actions("hello world", actions=[], polling_interval=self._interval())

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_key_phrase_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
Expand All @@ -75,7 +75,7 @@ def test_all_successful_passing_dict_key_phrase_task(self, client):
assert "Microsoft" in document_result.key_phrases
assert document_result.id is not None

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_dict_sentiment_task(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
Expand Down Expand Up @@ -115,7 +115,7 @@ def test_all_successful_passing_dict_sentiment_task(self, client):
assert document_result.sentences[0].text == "The restaurant had really good food."
assert document_result.sentences[1].text == "I recommend you try it."

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_sentiment_analysis_task_with_opinion_mining(self, client):
documents = [
Expand Down Expand Up @@ -190,7 +190,7 @@ def test_sentiment_analysis_task_with_opinion_mining(self, client):
self.assertEqual('negative', food_target.sentiment)
self.assertEqual(0.0, food_target.confidence_scores.neutral)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_text_document_input_entities_task(self, client):
docs = [
Expand Down Expand Up @@ -224,7 +224,7 @@ def test_all_successful_passing_text_document_input_entities_task(self, client):
self.assertIsNotNone(entity.offset)
self.assertIsNotNone(entity.confidence_score)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_all_successful_passing_string_pii_entities_task(self, client):

Expand Down Expand Up @@ -258,7 +258,7 @@ def test_all_successful_passing_string_pii_entities_task(self, client):
assert entity.offset is not None
assert entity.confidence_score is not None

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_bad_request_on_empty_document(self, client):
docs = [u""]
Expand All @@ -270,7 +270,7 @@ def test_bad_request_on_empty_document(self, client):
polling_interval=self._interval(),
)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "",
})
Expand All @@ -288,7 +288,7 @@ def test_empty_credential_class(self, client):
polling_interval=self._interval(),
)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"text_analytics_account_key": "xxxxxxxxxxxx",
})
Expand All @@ -306,7 +306,7 @@ def test_bad_credentials(self, client):
polling_interval=self._interval(),
)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_out_of_order_ids_multiple_tasks(self, client):
docs = [{"id": "56", "text": ":)"},
Expand Down Expand Up @@ -343,7 +343,7 @@ def test_out_of_order_ids_multiple_tasks(self, client):
self.assertEqual(document_result.id, document_order[doc_idx])
self.assertEqual(self.document_result_to_action_type(document_result), action_order[action_idx])

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_show_stats_and_model_version_multiple_tasks(self, client):

Expand Down Expand Up @@ -403,7 +403,7 @@ def callback(resp):
assert document_result.statistics.character_count
assert document_result.statistics.transaction_count

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_poller_metadata(self, client):
docs = [{"id": "56", "text": ":)"}]
Expand Down Expand Up @@ -431,7 +431,7 @@ def test_poller_metadata(self, client):

### TODO: Commenting out language tests. Right now analyze only supports language 'en', so no point to these tests yet

# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsPreparer()
# @TextAnalyticsClientPreparer()
# def test_whole_batch_language_hint(self, client):
# def callback(resp):
Expand Down Expand Up @@ -462,7 +462,7 @@ def test_poller_metadata(self, client):
# for doc in document_result.document_results:
# self.assertFalse(doc.is_error)

# @GlobalTextAnalyticsAccountPreparer()
# @TextAnalyticsPreparer()
# @TextAnalyticsClientPreparer(client_kwargs={
# "default_language": "en"
# })
Expand Down Expand Up @@ -497,7 +497,7 @@ def test_poller_metadata(self, client):
# for doc in document_result.document_results:
# assert not doc.is_error

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_invalid_language_hint_method(self, client):
response = list(client.begin_analyze_actions(
Expand All @@ -517,7 +517,7 @@ def test_invalid_language_hint_method(self, client):
for doc in document_results:
assert doc.is_error

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_multiple_tasks(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
Expand All @@ -535,7 +535,7 @@ def test_bad_model_version_error_multiple_tasks(self, client):
polling_interval=self._interval(),
).result()

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavior of service
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
Expand All @@ -553,7 +553,7 @@ def test_bad_model_version_error_all_tasks(self, client): # TODO: verify behavi
polling_interval=self._interval(),
).result()

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_missing_input_records_error(self, client):
docs = []
Expand All @@ -571,14 +571,14 @@ def test_missing_input_records_error(self, client):
)
assert "Input documents can not be empty or None" in str(excinfo.value)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
client.begin_analyze_actions(None, None)
assert "Input documents can not be empty or None" in str(excinfo.value)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
Expand All @@ -593,7 +593,7 @@ def callback(pipeline_response, deserialized, _):
).result()
assert res == "cls result"

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_multiple_pages_of_results_returned_successfully(self, client):
single_doc = "hello world"
Expand Down Expand Up @@ -634,7 +634,7 @@ def test_multiple_pages_of_results_returned_successfully(self, client):
for document_results in action_type_to_document_results.values():
assert len(document_results) == len(docs)

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_too_many_documents(self, client):
docs = list(itertools.repeat("input document", 26)) # Maximum number of documents per request is 25
Expand All @@ -653,7 +653,7 @@ def test_too_many_documents(self, client):
)
assert excinfo.value.status_code == 400

@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsPreparer()
@TextAnalyticsClientPreparer()
def test_disable_service_logs(self, client):
actions = [
Expand Down
Loading