From 292260288496787c9ffb8c14111693dd80686406 Mon Sep 17 00:00:00 2001 From: Yu-Han Liu Date: Mon, 19 Mar 2018 13:29:14 -0700 Subject: [PATCH] Remove video face (#1348) --- video/cloud-client/analyze/README.rst | 10 +- video/cloud-client/analyze/analyze.py | 54 +--------- video/cloud-client/analyze/analyze_test.py | 11 --- video/cloud-client/faces/README.rst | 110 --------------------- video/cloud-client/faces/README.rst.in | 22 ----- video/cloud-client/faces/faces.py | 77 --------------- video/cloud-client/faces/faces_test.py | 35 ------- video/cloud-client/faces/requirements.txt | 1 - 8 files changed, 5 insertions(+), 315 deletions(-) delete mode 100644 video/cloud-client/faces/README.rst delete mode 100644 video/cloud-client/faces/README.rst.in delete mode 100644 video/cloud-client/faces/faces.py delete mode 100644 video/cloud-client/faces/faces_test.py delete mode 100644 video/cloud-client/faces/requirements.txt diff --git a/video/cloud-client/analyze/README.rst b/video/cloud-client/analyze/README.rst index 519822257809..8c13affd92ce 100644 --- a/video/cloud-client/analyze/README.rst +++ b/video/cloud-client/analyze/README.rst @@ -12,7 +12,7 @@ This directory contains samples for Google Cloud Video Intelligence API. `Google -.. _Google Cloud Video Intelligence API: https://cloud.google.com/video-intelligence/docs +.. _Google Cloud Video Intelligence API: https://cloud.google.com/video-intelligence/docs Setup ------------------------------------------------------------------------------- @@ -70,22 +70,20 @@ To run this sample: $ python analyze.py - usage: analyze.py [-h] {faces,labels,labels_file,explicit_content,shots} ... + usage: analyze.py [-h] {labels,labels_file,explicit_content,shots} ... - This application demonstrates face detection, label detection, + This application demonstrates label detection, explicit content, and shot change detection using the Google Cloud API. Usage Examples: - python analyze.py faces gs://demomaker/google_gmail.mp4 python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4 python analyze.py labels_file resources/cat.mp4 python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4 python analyze.py explicit_content gs://demomaker/gbikes_dinosaur.mp4 positional arguments: - {faces,labels,labels_file,explicit_content,shots} - faces Detects faces given a GCS path. + {labels,labels_file,explicit_content,shots} labels Detects labels given a GCS path. labels_file Detect labels given a file path. explicit_content Detects explicit content from the GCS path to a video. diff --git a/video/cloud-client/analyze/analyze.py b/video/cloud-client/analyze/analyze.py index 3cba9baefd7a..4042809b5cd6 100644 --- a/video/cloud-client/analyze/analyze.py +++ b/video/cloud-client/analyze/analyze.py @@ -14,12 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This application demonstrates face detection, label detection, +"""This application demonstrates label detection, explicit content, and shot change detection using the Google Cloud API. Usage Examples: - python analyze.py faces gs://demomaker/google_gmail.mp4 python analyze.py labels gs://cloud-ml-sandbox/video/chicago.mp4 python analyze.py labels_file resources/cat.mp4 python analyze.py shots gs://demomaker/gbikes_dinosaur.mp4 @@ -55,52 +54,6 @@ def analyze_explicit_content(path): likely_string[frame.pornography_likelihood])) -def analyze_faces(path): - """ Detects faces given a GCS path. """ - video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.FACE_DETECTION] - - config = videointelligence.types.FaceDetectionConfig( - include_bounding_boxes=True) - context = videointelligence.types.VideoContext( - face_detection_config=config) - - operation = video_client.annotate_video( - path, features=features, video_context=context) - print('\nProcessing video for face annotations:') - - result = operation.result(timeout=600) - print('\nFinished processing.') - - # first result is retrieved because a single video was processed - faces = result.annotation_results[0].face_annotations - for face_id, face in enumerate(faces): - print('Face {}'.format(face_id)) - print('Thumbnail size: {}'.format(len(face.thumbnail))) - - for segment_id, segment in enumerate(face.segments): - start_time = (segment.segment.start_time_offset.seconds + - segment.segment.start_time_offset.nanos / 1e9) - end_time = (segment.segment.end_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) - positions = '{}s to {}s'.format(start_time, end_time) - print('\tSegment {}: {}'.format(segment_id, positions)) - - # There are typically many frames for each face, - # here we print information on only the first frame. - frame = face.frames[0] - time_offset = (frame.time_offset.seconds + - frame.time_offset.nanos / 1e9) - box = frame.normalized_bounding_boxes[0] - print('First frame time offset: {}s'.format(time_offset)) - print('First frame normalized bounding box:') - print('\tleft: {}'.format(box.left)) - print('\ttop: {}'.format(box.top)) - print('\tright: {}'.format(box.right)) - print('\tbottom: {}'.format(box.bottom)) - print('\n') - - def analyze_labels(path): """ Detects labels given a GCS path. """ video_client = videointelligence.VideoIntelligenceServiceClient() @@ -275,9 +228,6 @@ def analyze_shots(path): description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) subparsers = parser.add_subparsers(dest='command') - analyze_faces_parser = subparsers.add_parser( - 'faces', help=analyze_faces.__doc__) - analyze_faces_parser.add_argument('path') analyze_labels_parser = subparsers.add_parser( 'labels', help=analyze_labels.__doc__) analyze_labels_parser.add_argument('path') @@ -293,8 +243,6 @@ def analyze_shots(path): args = parser.parse_args() - if args.command == 'faces': - analyze_faces(args.path) if args.command == 'labels': analyze_labels(args.path) if args.command == 'labels_file': diff --git a/video/cloud-client/analyze/analyze_test.py b/video/cloud-client/analyze/analyze_test.py index ba5255db2bf0..60a01789a1dc 100644 --- a/video/cloud-client/analyze/analyze_test.py +++ b/video/cloud-client/analyze/analyze_test.py @@ -23,7 +23,6 @@ BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] LABELS_FILE_PATH = '/video/cat.mp4' -FACES_FILE_PATH = '/video/googlework.mp4' EXPLICIT_CONTENT_FILE_PATH = '/video/cat.mp4' SHOTS_FILE_PATH = '/video/gbikes_dinosaur.mp4' @@ -36,16 +35,6 @@ def test_analyze_shots(capsys): assert 'Shot 1:' in out -@pytest.mark.xfail(reason='This feature is currently \ - not visible to all projects.') -@pytest.mark.slow -def test_analyze_faces(capsys): - analyze.analyze_faces( - 'gs://{}{}'.format(BUCKET, FACES_FILE_PATH)) - out, _ = capsys.readouterr() - assert 'Thumbnail' in out - - @pytest.mark.slow def test_analyze_labels(capsys): analyze.analyze_labels( diff --git a/video/cloud-client/faces/README.rst b/video/cloud-client/faces/README.rst deleted file mode 100644 index 1102e5f1e06f..000000000000 --- a/video/cloud-client/faces/README.rst +++ /dev/null @@ -1,110 +0,0 @@ -.. This file is automatically generated. Do not edit this file directly. - -Google Cloud Video Intelligence API Python Samples -=============================================================================== - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/faces/README.rst - - -This directory contains samples for Google Cloud Video Intelligence API. `Google Cloud Video Intelligence API`_ allows developers to easily integrate feature detection in video. - - - - -.. _Google Cloud Video Intelligence API: https://cloud.google.com/video-intelligence/docs - -Setup -------------------------------------------------------------------------------- - - -Authentication -++++++++++++++ - -This sample requires you to have authentication setup. Refer to the -`Authentication Getting Started Guide`_ for instructions on setting up -credentials for applications. - -.. _Authentication Getting Started Guide: - https://cloud.google.com/docs/authentication/getting-started - -Install Dependencies -++++++++++++++++++++ - -#. Install `pip`_ and `virtualenv`_ if you do not already have them. You may want to refer to the `Python Development Environment Setup Guide`_ for Google Cloud Platform for instructions. - - .. _Python Development Environment Setup Guide: - https://cloud.google.com/python/setup - -#. Create a virtualenv. Samples are compatible with Python 2.7 and 3.4+. - - .. code-block:: bash - - $ virtualenv env - $ source env/bin/activate - -#. Install the dependencies needed to run the samples. - - .. code-block:: bash - - $ pip install -r requirements.txt - -.. _pip: https://pip.pypa.io/ -.. _virtualenv: https://virtualenv.pypa.io/ - -Samples -------------------------------------------------------------------------------- - -faces -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -.. image:: https://gstatic.com/cloudssh/images/open-btn.png - :target: https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/GoogleCloudPlatform/python-docs-samples&page=editor&open_in_editor=video/cloud-client/faces/faces.py;video/cloud-client/faces/README.rst - - - - -To run this sample: - -.. code-block:: bash - - $ python faces.py - - usage: faces.py [-h] path - - This application demonstrates how to perform face - detection with the Google Cloud Video Intelligence API. - - For more information, check out the documentation at - https://cloud.google.com/videointelligence/docs. - - Usage Example: - - python faces.py gs://demomaker/google_gmail.mp4 - - positional arguments: - path GCS file path for face detection. - - optional arguments: - -h, --help show this help message and exit - - - - - -The client library -------------------------------------------------------------------------------- - -This sample uses the `Google Cloud Client Library for Python`_. -You can read the documentation for more details on API usage and use GitHub -to `browse the source`_ and `report issues`_. - -.. _Google Cloud Client Library for Python: - https://googlecloudplatform.github.io/google-cloud-python/ -.. _browse the source: - https://github.com/GoogleCloudPlatform/google-cloud-python -.. _report issues: - https://github.com/GoogleCloudPlatform/google-cloud-python/issues - - -.. _Google Cloud SDK: https://cloud.google.com/sdk/ \ No newline at end of file diff --git a/video/cloud-client/faces/README.rst.in b/video/cloud-client/faces/README.rst.in deleted file mode 100644 index 1eb41699c223..000000000000 --- a/video/cloud-client/faces/README.rst.in +++ /dev/null @@ -1,22 +0,0 @@ -# This file is used to generate README.rst - -product: - name: Google Cloud Video Intelligence API - short_name: Cloud Video Intelligence API - url: https://cloud.google.com/video-intelligence/docs - description: > - `Google Cloud Video Intelligence API`_ allows developers to easily - integrate feature detection in video. - -setup: -- auth -- install_deps - -samples: -- name: faces - file: faces.py - show_help: True - -cloud_client_library: true - -folder: video/cloud-client/faces \ No newline at end of file diff --git a/video/cloud-client/faces/faces.py b/video/cloud-client/faces/faces.py deleted file mode 100644 index 6f6be6cfafcd..000000000000 --- a/video/cloud-client/faces/faces.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Google Inc. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""This application demonstrates how to perform face -detection with the Google Cloud Video Intelligence API. - -For more information, check out the documentation at -https://cloud.google.com/videointelligence/docs. - -Usage Example: - - python faces.py gs://demomaker/google_gmail.mp4 - -""" - -# [START full_tutorial] -# [START imports] -import argparse - -from google.cloud import videointelligence -# [END imports] - - -def analyze_faces(path): - # [START construct_request] - """ Detects faces given a GCS path. """ - video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.FACE_DETECTION] - operation = video_client.annotate_video(path, features=features) - # [END construct_request] - print('\nProcessing video for face annotations:') - - # [START check_operation] - result = operation.result(timeout=600) - print('\nFinished processing.') - # [END check_operation] - - # [START parse_response] - # first result is retrieved because a single video was processed - faces = result.annotation_results[0].face_annotations - for face_id, face in enumerate(faces): - print('Thumbnail size: {}'.format(len(face.thumbnail))) - - for segment_id, segment in enumerate(face.segments): - start_time = (segment.segment.start_time_offset.seconds + - segment.segment.start_time_offset.nanos / 1e9) - end_time = (segment.segment.end_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) - positions = '{}s to {}s'.format(start_time, end_time) - print('\tSegment {}: {}'.format(segment_id, positions)) - # [END parse_response] - - -if __name__ == '__main__': - # [START running_app] - parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('path', help='GCS file path for face detection.') - args = parser.parse_args() - - analyze_faces(args.path) - # [END running_app] -# [END full_tutorial] diff --git a/video/cloud-client/faces/faces_test.py b/video/cloud-client/faces/faces_test.py deleted file mode 100644 index e85d8f26cdb5..000000000000 --- a/video/cloud-client/faces/faces_test.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python - -# Copyright 2017 Google, Inc -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import os - -import pytest - -import faces - - -BUCKET = os.environ['CLOUD_STORAGE_BUCKET'] -FACES_FILE_PATH = '/video/googlework.mp4' - - -@pytest.mark.xfail(reason='This feature is currently \ - not visible to all projects.') -@pytest.mark.slow -def test_work_video_faces(capsys): - faces.analyze_faces( - 'gs://{}{}'.format(BUCKET, FACES_FILE_PATH)) - out, _ = capsys.readouterr() - assert 'Thumbnail' in out diff --git a/video/cloud-client/faces/requirements.txt b/video/cloud-client/faces/requirements.txt deleted file mode 100644 index e2ad27b873e8..000000000000 --- a/video/cloud-client/faces/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -google-cloud-videointelligence==1.0.1