diff --git a/video/cloud-client/analyze/requirements.txt b/video/cloud-client/analyze/requirements.txt index ba966ee8deb7..cf61c0964c59 100644 --- a/video/cloud-client/analyze/requirements.txt +++ b/video/cloud-client/analyze/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-videointelligence==1.12.1 +google-cloud-videointelligence==1.13.0 google-cloud-storage==1.23.0 diff --git a/video/cloud-client/analyze/video_detect_faces_beta.py b/video/cloud-client/analyze/video_detect_faces_beta.py new file mode 100644 index 000000000000..064c6197d572 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_beta.py @@ -0,0 +1,85 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_faces_beta] +import io +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_faces(local_file_path="path/to/your/video-file.mp4"): + """Detects faces in a video from a local file.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + with io.open(local_file_path, "rb") as f: + input_content = f.read() + + # Configure the request + config = videointelligence.types.FaceDetectionConfig( + include_bounding_boxes=True, include_attributes=True + ) + context = videointelligence.types.VideoContext( + face_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_content=input_content, + features=[videointelligence.enums.Feature.FACE_DETECTION], + video_context=context, + ) + + print("\nProcessing video for face detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.face_detection_annotations: + print("Face detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped faces that include + # characteristics of the face detected. + # Grab the first timestamped face + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) + + # Attributes include glasses, headwear, facial hair, smiling, + # direction of gaze, etc. + print("Attributes:") + for attribute in timestamped_object.attributes: + print( + "\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + +# [END video_detect_faces_beta] diff --git a/video/cloud-client/analyze/video_detect_faces_beta_test.py b/video/cloud-client/analyze/video_detect_faces_beta_test.py new file mode 100644 index 000000000000..916c11bba248 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_beta_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_faces_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_faces(capsys): + local_file_path = os.path.join(RESOURCES, "googlework_short.mp4") + + video_detect_faces_beta.detect_faces(local_file_path=local_file_path) + + out, _ = capsys.readouterr() + + assert "Face detected:" in out + assert "Attributes:" in out diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py new file mode 100644 index 000000000000..e8fae6eea4bf --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta.py @@ -0,0 +1,81 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_faces_gcs_beta] +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): + """Detects faces in a video.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + # Configure the request + config = videointelligence.types.FaceDetectionConfig( + include_bounding_boxes=True, include_attributes=True + ) + context = videointelligence.types.VideoContext( + face_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_uri=gcs_uri, + features=[videointelligence.enums.Feature.FACE_DETECTION], + video_context=context, + ) + + print("\nProcessing video for face detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.face_detection_annotations: + print("Face detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped faces that include + # characteristics of the face detected. + # Grab the first timestamped face + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) + + # Attributes include glasses, headwear, facial hair, smiling, + # direction of gaze, etc. + print("Attributes:") + for attribute in timestamped_object.attributes: + print( + "\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + +# [END video_detect_faces_gcs_beta] diff --git a/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py new file mode 100644 index 000000000000..611a6f38c1df --- /dev/null +++ b/video/cloud-client/analyze/video_detect_faces_gcs_beta_test.py @@ -0,0 +1,30 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_faces_gcs_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_faces(capsys): + input_uri = "gs://cloud-samples-data/video/googlework_short.mp4" + + video_detect_faces_gcs_beta.detect_faces(gcs_uri=input_uri) + + out, _ = capsys.readouterr() + + assert "Face detected:" in out + assert "Attributes:" in out diff --git a/video/cloud-client/analyze/video_detect_person_beta.py b/video/cloud-client/analyze/video_detect_person_beta.py new file mode 100644 index 000000000000..2fc7e5942dd8 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_beta.py @@ -0,0 +1,100 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_person_beta] +import io +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_person(local_file_path="path/to/your/video-file.mp4"): + """Detects people in a video from a local file.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + with io.open(local_file_path, "rb") as f: + input_content = f.read() + + # Configure the request + config = videointelligence.types.PersonDetectionConfig( + include_bounding_boxes=True, + include_attributes=True, + include_pose_landmarks=True, + ) + context = videointelligence.types.VideoContext( + person_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_content=input_content, + features=[videointelligence.enums.Feature.PERSON_DETECTION], + video_context=context, + ) + + print("\nProcessing video for person detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.person_detection_annotations: + print("Person detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped objects that include + # characteristic - -e.g.clothes, posture of the person detected. + # Grab the first timestamped object + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) + + # Attributes include unique pieces of clothing, + # poses, or hair color. + print("Attributes:") + for attribute in timestamped_object.attributes: + print( + "\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + # Landmarks in person detection include body parts such as + # left_shoulder, right_ear, and right_ankle + print("Landmarks:") + for landmark in timestamped_object.landmarks: + print( + "\t{}: {} (x={}, y={})".format( + landmark.name, + landmark.confidence, + landmark.point.x, # Normalized vertex + landmark.point.y, # Normalized vertex + ) + ) + + +# [END video_detect_person_beta] diff --git a/video/cloud-client/analyze/video_detect_person_beta_test.py b/video/cloud-client/analyze/video_detect_person_beta_test.py new file mode 100644 index 000000000000..567fed336a32 --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_beta_test.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_person_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_person(capsys): + local_file_path = os.path.join(RESOURCES, "googlework_tiny.mp4") + + video_detect_person_beta.detect_person(local_file_path=local_file_path) + + out, _ = capsys.readouterr() + + assert "Person detected:" in out + assert "Attributes:" in out + assert "x=" in out + assert "y=" in out diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta.py b/video/cloud-client/analyze/video_detect_person_gcs_beta.py new file mode 100644 index 000000000000..b588891a2b4a --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta.py @@ -0,0 +1,96 @@ +# +# Copyright 2020 Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# [START video_detect_person_gcs_beta] +from google.cloud import videointelligence_v1p3beta1 as videointelligence + + +def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): + """Detects people in a video.""" + + client = videointelligence.VideoIntelligenceServiceClient() + + # Configure the request + config = videointelligence.types.PersonDetectionConfig( + include_bounding_boxes=True, + include_attributes=True, + include_pose_landmarks=True, + ) + context = videointelligence.types.VideoContext( + person_detection_config=config + ) + + # Start the asynchronous request + operation = client.annotate_video( + input_uri=gcs_uri, + features=[videointelligence.enums.Feature.PERSON_DETECTION], + video_context=context, + ) + + print("\nProcessing video for person detection annotations.") + result = operation.result(timeout=300) + + print("\nFinished processing.\n") + + # Retrieve the first result, because a single video was processed. + annotation_result = result.annotation_results[0] + + for annotation in annotation_result.person_detection_annotations: + print("Person detected:") + for track in annotation.tracks: + print( + "Segment: {}s to {}s".format( + track.segment.start_time_offset.seconds + + track.segment.start_time_offset.nanos / 1e9, + track.segment.end_time_offset.seconds + + track.segment.end_time_offset.nanos / 1e9, + ) + ) + + # Each segment includes timestamped objects that include + # characteristics - -e.g.clothes, posture of the person detected. + # Grab the first timestamped object + timestamped_object = track.timestamped_objects[0] + box = timestamped_object.normalized_bounding_box + print("Bounding box:") + print("\tleft : {}".format(box.left)) + print("\ttop : {}".format(box.top)) + print("\tright : {}".format(box.right)) + print("\tbottom: {}".format(box.bottom)) + + # Attributes include unique pieces of clothing, + # poses, or hair color. + print("Attributes:") + for attribute in timestamped_object.attributes: + print( + "\t{}:{} {}".format( + attribute.name, attribute.value, attribute.confidence + ) + ) + + # Landmarks in person detection include body parts such as + # left_shoulder, right_ear, and right_ankle + print("Landmarks:") + for landmark in timestamped_object.landmarks: + print( + "\t{}: {} (x={}, y={})".format( + landmark.name, + landmark.confidence, + landmark.point.x, # Normalized vertex + landmark.point.y, # Normalized vertex + ) + ) + + +# [END video_detect_person_gcs_beta] diff --git a/video/cloud-client/analyze/video_detect_person_gcs_beta_test.py b/video/cloud-client/analyze/video_detect_person_gcs_beta_test.py new file mode 100644 index 000000000000..521959253bac --- /dev/null +++ b/video/cloud-client/analyze/video_detect_person_gcs_beta_test.py @@ -0,0 +1,32 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os + +import video_detect_person_gcs_beta + +RESOURCES = os.path.join(os.path.dirname(__file__), "resources") + + +def test_detect_person(capsys): + input_uri = "gs://cloud-samples-data/video/googlework_tiny.mp4" + + video_detect_person_gcs_beta.detect_person(gcs_uri=input_uri) + + out, _ = capsys.readouterr() + + assert "Person detected:" in out + assert "Attributes:" in out + assert "x=" in out + assert "y=" in out