Skip to content

Commit

Permalink
Vision support for batch processing part one. (#2967)
Browse files Browse the repository at this point in the history
* Vision support for batch processing part one.
  • Loading branch information
daspecster authored Jan 30, 2017
1 parent 50c8e88 commit f8ceb94
Show file tree
Hide file tree
Showing 7 changed files with 88 additions and 37 deletions.
13 changes: 5 additions & 8 deletions vision/google/cloud/vision/_gax.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,21 +39,18 @@ def annotate(self, image, features):
:type features: list
:param features: List of :class:`~google.cloud.vision.feature.Feature`.
:rtype: :class:`~google.cloud.vision.annotations.Annotations`
:returns: Instance of ``Annotations`` with results or ``None``.
:rtype: list
:returns: List of
:class:`~google.cloud.vision.annotations.Annotations`.
"""
gapic_features = [_to_gapic_feature(feature) for feature in features]
gapic_image = _to_gapic_image(image)
request = image_annotator_pb2.AnnotateImageRequest(
image=gapic_image, features=gapic_features)
requests = [request]
annotator_client = self._annotator_client
images = annotator_client.batch_annotate_images(requests)
if len(images.responses) == 1:
return Annotations.from_pb(images.responses[0])
elif len(images.responses) > 1:
raise NotImplementedError(
'Multiple image processing is not yet supported.')
responses = annotator_client.batch_annotate_images(requests).responses
return [Annotations.from_pb(response) for response in responses]


def _to_gapic_feature(feature):
Expand Down
12 changes: 4 additions & 8 deletions vision/google/cloud/vision/_http.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,16 @@ def annotate(self, image, features):
based on the number of Feature Types.
See: https://cloud.google.com/vision/docs/pricing
:rtype: dict
:returns: List of annotations.
:rtype: list
:returns: List of :class:`~googe.cloud.vision.annotations.Annotations`.
"""
request = _make_request(image, features)

data = {'requests': [request]}
api_response = self._connection.api_request(
method='POST', path='/images:annotate', data=data)
images = api_response.get('responses')
if len(images) == 1:
return Annotations.from_api_repr(images[0])
elif len(images) > 1:
raise NotImplementedError(
'Multiple image processing is not yet supported.')
responses = api_response.get('responses')
return [Annotations.from_api_repr(response) for response in responses]


def _make_request(image, features):
Expand Down
14 changes: 7 additions & 7 deletions vision/google/cloud/vision/image.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def detect_faces(self, limit=10):
"""
features = [Feature(FeatureTypes.FACE_DETECTION, limit)]
annotations = self._detect_annotation(features)
return annotations.faces
return annotations[0].faces

def detect_labels(self, limit=10):
"""Detect labels that describe objects in an image.
Expand All @@ -147,7 +147,7 @@ def detect_labels(self, limit=10):
"""
features = [Feature(FeatureTypes.LABEL_DETECTION, limit)]
annotations = self._detect_annotation(features)
return annotations.labels
return annotations[0].labels

def detect_landmarks(self, limit=10):
"""Detect landmarks in an image.
Expand All @@ -161,7 +161,7 @@ def detect_landmarks(self, limit=10):
"""
features = [Feature(FeatureTypes.LANDMARK_DETECTION, limit)]
annotations = self._detect_annotation(features)
return annotations.landmarks
return annotations[0].landmarks

def detect_logos(self, limit=10):
"""Detect logos in an image.
Expand All @@ -175,7 +175,7 @@ def detect_logos(self, limit=10):
"""
features = [Feature(FeatureTypes.LOGO_DETECTION, limit)]
annotations = self._detect_annotation(features)
return annotations.logos
return annotations[0].logos

def detect_properties(self, limit=10):
"""Detect the color properties of an image.
Expand All @@ -189,7 +189,7 @@ def detect_properties(self, limit=10):
"""
features = [Feature(FeatureTypes.IMAGE_PROPERTIES, limit)]
annotations = self._detect_annotation(features)
return annotations.properties
return annotations[0].properties

def detect_safe_search(self, limit=10):
"""Retreive safe search properties from an image.
Expand All @@ -203,7 +203,7 @@ def detect_safe_search(self, limit=10):
"""
features = [Feature(FeatureTypes.SAFE_SEARCH_DETECTION, limit)]
annotations = self._detect_annotation(features)
return annotations.safe_searches
return annotations[0].safe_searches

def detect_text(self, limit=10):
"""Detect text in an image.
Expand All @@ -217,4 +217,4 @@ def detect_text(self, limit=10):
"""
features = [Feature(FeatureTypes.TEXT_DETECTION, limit)]
annotations = self._detect_annotation(features)
return annotations.texts
return annotations[0].texts
33 changes: 33 additions & 0 deletions vision/unit_tests/_fixtures.py
Original file line number Diff line number Diff line change
Expand Up @@ -1688,6 +1688,39 @@
}


MULTIPLE_RESPONSE = {
'responses': [
{
'labelAnnotations': [
{
'mid': '/m/0k4j',
'description': 'automobile',
'score': 0.9776855
},
{
'mid': '/m/07yv9',
'description': 'vehicle',
'score': 0.947987
},
{
'mid': '/m/07r04',
'description': 'truck',
'score': 0.88429511
},
],
},
{
'safeSearchAnnotation': {
'adult': 'VERY_UNLIKELY',
'spoof': 'UNLIKELY',
'medical': 'POSSIBLE',
'violence': 'VERY_UNLIKELY'
},
},
],
}


SAFE_SEARCH_DETECTION_RESPONSE = {
'responses': [
{
Expand Down
25 changes: 17 additions & 8 deletions vision/unit_tests/test__gax.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,15 @@ def test_annotate_no_results(self):
gax_api._annotator_client = mock.Mock(
spec_set=['batch_annotate_images'], **mock_response)
with mock.patch('google.cloud.vision._gax.Annotations'):
self.assertIsNone(gax_api.annotate(image, [feature]))
response = gax_api.annotate(image, [feature])
self.assertEqual(len(response), 0)
self.assertIsInstance(response, list)

gax_api._annotator_client.batch_annotate_images.assert_called()

def test_annotate_multiple_results(self):
from google.cloud.grpc.vision.v1 import image_annotator_pb2
from google.cloud.vision.annotations import Annotations
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.vision.image import Image
Expand All @@ -95,16 +99,21 @@ def test_annotate_multiple_results(self):
'ImageAnnotatorClient'):
gax_api = self._make_one(client)

mock_response = {
'batch_annotate_images.return_value': mock.Mock(responses=[1, 2]),
}
responses = [
image_annotator_pb2.AnnotateImageResponse(),
image_annotator_pb2.AnnotateImageResponse(),
]
response = image_annotator_pb2.BatchAnnotateImagesResponse(
responses=responses)

gax_api._annotator_client = mock.Mock(
spec_set=['batch_annotate_images'], **mock_response)
with mock.patch('google.cloud.vision._gax.Annotations'):
with self.assertRaises(NotImplementedError):
gax_api.annotate(image, [feature])
spec_set=['batch_annotate_images'])
gax_api._annotator_client.batch_annotate_images.return_value = response
responses = gax_api.annotate(image, [feature])

self.assertEqual(len(responses), 2)
self.assertIsInstance(responses[0], Annotations)
self.assertIsInstance(responses[1], Annotations)
gax_api._annotator_client.batch_annotate_images.assert_called()


Expand Down
20 changes: 16 additions & 4 deletions vision/unit_tests/test__http.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,16 @@ def test_call_annotate_with_no_results(self):
http_api = self._make_one(client)
http_api._connection = mock.Mock(spec_set=['api_request'])
http_api._connection.api_request.return_value = {'responses': []}
self.assertIsNone(http_api.annotate(image, [feature]))
response = http_api.annotate(image, [feature])
self.assertEqual(len(response), 0)
self.assertIsInstance(response, list)

def test_call_annotate_with_more_than_one_result(self):
from google.cloud.vision.feature import Feature
from google.cloud.vision.feature import FeatureTypes
from google.cloud.vision.image import Image
from google.cloud.vision.likelihood import Likelihood
from unit_tests._fixtures import MULTIPLE_RESPONSE

client = mock.Mock(spec_set=['_connection'])
feature = Feature(FeatureTypes.LABEL_DETECTION, 5)
Expand All @@ -58,9 +62,17 @@ def test_call_annotate_with_more_than_one_result(self):

http_api = self._make_one(client)
http_api._connection = mock.Mock(spec_set=['api_request'])
http_api._connection.api_request.return_value = {'responses': [1, 2]}
with self.assertRaises(NotImplementedError):
http_api.annotate(image, [feature])
http_api._connection.api_request.return_value = MULTIPLE_RESPONSE
responses = http_api.annotate(image, [feature])

self.assertEqual(len(responses), 2)
image_one = responses[0]
image_two = responses[1]
self.assertEqual(len(image_one.labels), 3)
self.assertIsInstance(image_one.safe_searches, tuple)
self.assertEqual(image_two.safe_searches.adult,
Likelihood.VERY_UNLIKELY)
self.assertEqual(len(image_two.labels), 0)


class TestVisionRequest(unittest.TestCase):
Expand Down
8 changes: 6 additions & 2 deletions vision/unit_tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,10 @@ def test_face_annotation(self):
features = [Feature(feature_type=FeatureTypes.FACE_DETECTION,
max_results=3)]
image = client.image(content=IMAGE_CONTENT)
response = client._vision_api.annotate(image, features)
api_response = client._vision_api.annotate(image, features)

self.assertEqual(len(api_response), 1)
response = api_response[0]
self.assertEqual(REQUEST,
client._connection._requested[0]['data'])
self.assertIsInstance(response, Annotations)
Expand Down Expand Up @@ -166,8 +168,10 @@ def test_multiple_detection_from_content(self):
logo_feature = Feature(FeatureTypes.LOGO_DETECTION, limit)
features = [label_feature, logo_feature]
image = client.image(content=IMAGE_CONTENT)
items = image.detect(features)
detected_items = image.detect(features)

self.assertEqual(len(detected_items), 1)
items = detected_items[0]
self.assertEqual(len(items.logos), 2)
self.assertEqual(len(items.labels), 3)
first_logo = items.logos[0]
Expand Down

0 comments on commit f8ceb94

Please sign in to comment.