Skip to content

Commit

Permalink
Natural Language GAPIC client library [(#1018)](GoogleCloudPlatform/p…
Browse files Browse the repository at this point in the history
  • Loading branch information
dizcology authored and busunkim96 committed Sep 29, 2020
1 parent 5a1cc58 commit 1b20402
Show file tree
Hide file tree
Showing 13 changed files with 216 additions and 110 deletions.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ Google Cloud Natural Language API Python Samples

This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API.

- See the `migration guide`_ for information about migrating to Python client library v0.26.1.

.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration




Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ product:
entity recognition, and syntax analysis. This API is part of the larger
Cloud Machine Learning API.


- See the `migration guide`_ for information about migrating to Python client library v0.26.1.


.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration

setup:
- auth
- install_deps
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,25 @@
def run_quickstart():
# [START language_quickstart]
# Imports the Google Cloud client library
# [START migration_import]
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
# [END migration_import]

# Instantiates a client
language_client = language.Client()
# [START migration_client]
client = language.LanguageServiceClient()
# [END migration_client]

# The text to analyze
text = 'Hello, world!'
document = language_client.document_from_text(text)
text = u'Hello, world!'
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)

# Detects the sentiment of the text
sentiment = document.analyze_sentiment().sentiment
sentiment = client.analyze_sentiment(document=document).document_sentiment

print('Text: {}'.format(text))
print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude))
Expand Down
Original file line number Diff line number Diff line change
@@ -1 +1 @@
google-cloud-language==0.25.0
google-cloud-language==0.26.1
Original file line number Diff line number Diff line change
Expand Up @@ -24,118 +24,164 @@
import argparse

from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
import six


# [START def_sentiment_text]
def sentiment_text(text):
"""Detects sentiment in the text."""
language_client = language.Client()
client = language.LanguageServiceClient()

if isinstance(text, six.binary_type):
text = text.decode('utf-8')

# Instantiates a plain text document.
document = language_client.document_from_text(text)
# [START migration_document_text]
# [START migration_analyze_sentiment]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)
# [END migration_document_text]

# Detects sentiment in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
sentiment = document.analyze_sentiment().sentiment
# document.type == enums.Document.Type.HTML
sentiment = client.analyze_sentiment(document).document_sentiment

print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
# [END migration_analyze_sentiment]
# [END def_sentiment_text]


# [START def_sentiment_file]
def sentiment_file(gcs_uri):
"""Detects sentiment in the file located in Google Cloud Storage."""
language_client = language.Client()
client = language.LanguageServiceClient()

# Instantiates a plain text document.
document = language_client.document_from_url(gcs_uri)
# [START migration_document_gcs_uri]
document = types.Document(
gcs_content_uri=gcs_uri,
type=enums.Document.Type.PLAIN_TEXT)
# [END migration_document_gcs_uri]

# Detects sentiment in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
sentiment = document.analyze_sentiment().sentiment
# document.type == enums.Document.Type.HTML
sentiment = client.analyze_sentiment(document).document_sentiment

print('Score: {}'.format(sentiment.score))
print('Magnitude: {}'.format(sentiment.magnitude))
# [END def_sentiment_file]


# [START def_entities_text]
def entities_text(text):
"""Detects entities in the text."""
language_client = language.Client()
client = language.LanguageServiceClient()

if isinstance(text, six.binary_type):
text = text.decode('utf-8')

# Instantiates a plain text document.
document = language_client.document_from_text(text)
# [START migration_analyze_entities]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)

# Detects entities in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
entities = document.analyze_entities().entities
# document.type == enums.Document.Type.HTML
entities = client.analyze_entities(document).entities

for entity in entities:
print('=' * 20)
print(u'{:<16}: {}'.format('name', entity.name))
print(u'{:<16}: {}'.format('type', entity.entity_type))
print(u'{:<16}: {}'.format('type', entity.type))
print(u'{:<16}: {}'.format('metadata', entity.metadata))
print(u'{:<16}: {}'.format('salience', entity.salience))
print(u'{:<16}: {}'.format('wikipedia_url',
entity.metadata.get('wikipedia_url', '-')))
# [END migration_analyze_entities]
# [END def_entities_text]


# [START def_entities_file]
def entities_file(gcs_uri):
"""Detects entities in the file located in Google Cloud Storage."""
language_client = language.Client()
client = language.LanguageServiceClient()

# Instantiates a plain text document.
document = language_client.document_from_url(gcs_uri)
document = types.Document(
gcs_content_uri=gcs_uri,
type=enums.Document.Type.PLAIN_TEXT)

# Detects sentiment in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
entities = document.analyze_entities().entities
# document.type == enums.Document.Type.HTML
entities = client.analyze_entities(document).entities

for entity in entities:
print('=' * 20)
print(u'{:<16}: {}'.format('name', entity.name))
print(u'{:<16}: {}'.format('type', entity.entity_type))
print(u'{:<16}: {}'.format('type', entity.type))
print(u'{:<16}: {}'.format('metadata', entity.metadata))
print(u'{:<16}: {}'.format('salience', entity.salience))
print(u'{:<16}: {}'.format('wikipedia_url',
entity.metadata.get('wikipedia_url', '-')))
# [END def_entities_file]


# [START def_syntax_text]
def syntax_text(text):
"""Detects syntax in the text."""
language_client = language.Client()
client = language.LanguageServiceClient()

if isinstance(text, six.binary_type):
text = text.decode('utf-8')

# Instantiates a plain text document.
document = language_client.document_from_text(text)
# [START migration_analyze_syntax]
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT)

# Detects syntax in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
tokens = document.analyze_syntax().tokens
# document.type == enums.Document.Type.HTML
tokens = client.analyze_syntax(document).tokens

# part-of-speech tags from enums.PartOfSpeech.Tag
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',
'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')

for token in tokens:
print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content))
print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content))
# [END migration_analyze_syntax]
# [END def_syntax_text]


# [START def_syntax_file]
def syntax_file(gcs_uri):
"""Detects syntax in the file located in Google Cloud Storage."""
language_client = language.Client()
client = language.LanguageServiceClient()

# Instantiates a plain text document.
document = language_client.document_from_url(gcs_uri)
document = types.Document(
gcs_content_uri=gcs_uri,
type=enums.Document.Type.PLAIN_TEXT)

# Detects syntax in the document. You can also analyze HTML with:
# document.doc_type == language.Document.HTML
tokens = document.analyze_syntax().tokens
# document.type == enums.Document.Type.HTML
tokens = client.analyze_syntax(document).tokens

# part-of-speech tags from enums.PartOfSpeech.Tag
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',
'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')

for token in tokens:
print(u'{}: {}'.format(token.part_of_speech.tag, token.text_content))
print(u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content))
# [END def_syntax_file]


if __name__ == '__main__':
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ Google Cloud Natural Language API Python Samples

This directory contains samples for Google Cloud Natural Language API. The `Google Cloud Natural Language API`_ provides natural language understanding technologies to developers, including sentiment analysis, entity recognition, and syntax analysis. This API is part of the larger Cloud Machine Learning API.

- See the `migration guide`_ for information about migrating to Python client library v0.26.1.

.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration




Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ product:
entity recognition, and syntax analysis. This API is part of the larger
Cloud Machine Learning API.


- See the `migration guide`_ for information about migrating to Python client library v0.26.1.


.. _migration guide: https://cloud.google.com/natural-language/docs/python-client-migration

setup:
- auth
- install_deps
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,25 @@
def run_quickstart():
# [START language_quickstart]
# Imports the Google Cloud client library
from google.cloud import language
# [START beta_import_client]
# [START beta_import]
from google.cloud import language_v1beta2
from google.cloud.language_v1beta2 import enums
from google.cloud.language_v1beta2 import types
# [END beta_import]

# Instantiates a client with they v1beta2 version
language_client = language.Client(api_version='v1beta2')
# Instantiates a client with the v1beta2 version
client = language_v1beta2.LanguageServiceClient()
# [END beta_import_client]

# The text to analyze
text = 'Hallo Welt!'
document = language_client.document_from_text(text, language='DE')

text = u'Hallo Welt!'
document = types.Document(
content=text,
type=enums.Document.Type.PLAIN_TEXT,
language='de')
# Detects the sentiment of the text
sentiment = document.analyze_sentiment().sentiment
sentiment = client.analyze_sentiment(document).document_sentiment

print('Text: {}'.format(text))
print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude))
Expand Down
Original file line number Diff line number Diff line change
@@ -1,2 +1 @@
gapic-google-cloud-language-v1beta2==0.15.3
google-cloud-language==0.25.0
google-cloud-language==0.26.1
Loading

0 comments on commit 1b20402

Please sign in to comment.