diff --git a/videointelligence/samples/analyze/analyze.py b/videointelligence/samples/analyze/analyze.py index fcfa535c893f..e5e650a8daa9 100644 --- a/videointelligence/samples/analyze/analyze.py +++ b/videointelligence/samples/analyze/analyze.py @@ -35,16 +35,17 @@ import io from google.cloud import videointelligence -from google.cloud.videointelligence import enums def analyze_explicit_content(path): # [START video_analyze_explicit_content] """ Detects explicit content from the GCS path to a video. """ video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.EXPLICIT_CONTENT_DETECTION] + features = [videointelligence.Feature.EXPLICIT_CONTENT_DETECTION] - operation = video_client.annotate_video(input_uri=path, features=features) + operation = video_client.annotate_video( + request={"features": features, "input_uri": path} + ) print("\nProcessing video for explicit content annotations:") result = operation.result(timeout=90) @@ -52,8 +53,8 @@ def analyze_explicit_content(path): # Retrieve first result because a single video was processed for frame in result.annotation_results[0].explicit_annotation.frames: - likelihood = enums.Likelihood(frame.pornography_likelihood) - frame_time = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + likelihood = videointelligence.Likelihood(frame.pornography_likelihood) + frame_time = frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 print("Time: {}s".format(frame_time)) print("\tpornography: {}".format(likelihood.name)) # [END video_analyze_explicit_content] @@ -63,14 +64,14 @@ def analyze_labels(path): # [START video_analyze_labels_gcs] """ Detects labels given a GCS path. """ video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.LABEL_DETECTION] + features = [videointelligence.Feature.LABEL_DETECTION] - mode = videointelligence.enums.LabelDetectionMode.SHOT_AND_FRAME_MODE - config = videointelligence.types.LabelDetectionConfig(label_detection_mode=mode) - context = videointelligence.types.VideoContext(label_detection_config=config) + mode = videointelligence.LabelDetectionMode.SHOT_AND_FRAME_MODE + config = videointelligence.LabelDetectionConfig(label_detection_mode=mode) + context = videointelligence.VideoContext(label_detection_config=config) operation = video_client.annotate_video( - input_uri=path, features=features, video_context=context + request={"features": features, "input_uri": path, "video_context": context} ) print("\nProcessing video for label annotations:") @@ -89,11 +90,11 @@ def analyze_labels(path): for i, segment in enumerate(segment_label.segments): start_time = ( segment.segment.start_time_offset.seconds - + segment.segment.start_time_offset.nanos / 1e9 + + segment.segment.start_time_offset.microseconds / 1e6 ) end_time = ( segment.segment.end_time_offset.seconds - + segment.segment.end_time_offset.nanos / 1e9 + + segment.segment.end_time_offset.microseconds / 1e6 ) positions = "{}s to {}s".format(start_time, end_time) confidence = segment.confidence @@ -113,11 +114,11 @@ def analyze_labels(path): for i, shot in enumerate(shot_label.segments): start_time = ( shot.segment.start_time_offset.seconds - + shot.segment.start_time_offset.nanos / 1e9 + + shot.segment.start_time_offset.microseconds / 1e6 ) end_time = ( shot.segment.end_time_offset.seconds - + shot.segment.end_time_offset.nanos / 1e9 + + shot.segment.end_time_offset.microseconds / 1e6 ) positions = "{}s to {}s".format(start_time, end_time) confidence = shot.confidence @@ -137,7 +138,7 @@ def analyze_labels(path): # Each frame_label_annotation has many frames, # here we print information only about the first frame. frame = frame_label.frames[0] - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + time_offset = frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 print("\tFirst frame time offset: {}s".format(time_offset)) print("\tFirst frame confidence: {}".format(frame.confidence)) print("\n") @@ -148,13 +149,13 @@ def analyze_labels_file(path): # [START video_analyze_labels] """Detect labels given a file path.""" video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.LABEL_DETECTION] + features = [videointelligence.Feature.LABEL_DETECTION] with io.open(path, "rb") as movie: input_content = movie.read() operation = video_client.annotate_video( - features=features, input_content=input_content + request={"features": features, "input_content": input_content} ) print("\nProcessing video for label annotations:") @@ -173,11 +174,11 @@ def analyze_labels_file(path): for i, segment in enumerate(segment_label.segments): start_time = ( segment.segment.start_time_offset.seconds - + segment.segment.start_time_offset.nanos / 1e9 + + segment.segment.start_time_offset.microseconds / 1e6 ) end_time = ( segment.segment.end_time_offset.seconds - + segment.segment.end_time_offset.nanos / 1e9 + + segment.segment.end_time_offset.microseconds / 1e6 ) positions = "{}s to {}s".format(start_time, end_time) confidence = segment.confidence @@ -197,11 +198,11 @@ def analyze_labels_file(path): for i, shot in enumerate(shot_label.segments): start_time = ( shot.segment.start_time_offset.seconds - + shot.segment.start_time_offset.nanos / 1e9 + + shot.segment.start_time_offset.microseconds / 1e6 ) end_time = ( shot.segment.end_time_offset.seconds - + shot.segment.end_time_offset.nanos / 1e9 + + shot.segment.end_time_offset.microseconds / 1e6 ) positions = "{}s to {}s".format(start_time, end_time) confidence = shot.confidence @@ -221,7 +222,7 @@ def analyze_labels_file(path): # Each frame_label_annotation has many frames, # here we print information only about the first frame. frame = frame_label.frames[0] - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + time_offset = frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 print("\tFirst frame time offset: {}s".format(time_offset)) print("\tFirst frame confidence: {}".format(frame.confidence)) print("\n") @@ -232,8 +233,10 @@ def analyze_shots(path): # [START video_analyze_shots] """ Detects camera shot changes. """ video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION] - operation = video_client.annotate_video(input_uri=path, features=features) + features = [videointelligence.Feature.SHOT_CHANGE_DETECTION] + operation = video_client.annotate_video( + request={"features": features, "input_uri": path} + ) print("\nProcessing video for shot change annotations:") result = operation.result(timeout=90) @@ -241,8 +244,12 @@ def analyze_shots(path): # first result is retrieved because a single video was processed for i, shot in enumerate(result.annotation_results[0].shot_annotations): - start_time = shot.start_time_offset.seconds + shot.start_time_offset.nanos / 1e9 - end_time = shot.end_time_offset.seconds + shot.end_time_offset.nanos / 1e9 + start_time = ( + shot.start_time_offset.seconds + shot.start_time_offset.microseconds / 1e6 + ) + end_time = ( + shot.end_time_offset.seconds + shot.end_time_offset.microseconds / 1e6 + ) print("\tShot {}: {} to {}".format(i, start_time, end_time)) # [END video_analyze_shots] @@ -253,17 +260,19 @@ def speech_transcription(path): from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION] + features = [videointelligence.Feature.SPEECH_TRANSCRIPTION] - config = videointelligence.types.SpeechTranscriptionConfig( + config = videointelligence.SpeechTranscriptionConfig( language_code="en-US", enable_automatic_punctuation=True ) - video_context = videointelligence.types.VideoContext( - speech_transcription_config=config - ) + video_context = videointelligence.VideoContext(speech_transcription_config=config) operation = video_client.annotate_video( - input_uri=path, features=features, video_context=video_context + request={ + "features": features, + "input_uri": path, + "video_context": video_context, + } ) print("\nProcessing video for speech transcription.") @@ -292,8 +301,8 @@ def speech_transcription(path): end_time = word_info.end_time print( "\t{}s - {}s: {}".format( - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9, + start_time.seconds + start_time.microseconds * 1e-6, + end_time.seconds + end_time.microseconds * 1e-6, word, ) ) @@ -306,9 +315,11 @@ def video_detect_text_gcs(input_uri): from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.TEXT_DETECTION] + features = [videointelligence.Feature.TEXT_DETECTION] - operation = video_client.annotate_video(input_uri=input_uri, features=features) + operation = video_client.annotate_video( + request={"features": features, "input_uri": input_uri} + ) print("\nProcessing video for text detection.") result = operation.result(timeout=600) @@ -325,8 +336,8 @@ def video_detect_text_gcs(input_uri): end_time = text_segment.segment.end_time_offset print( "start_time: {}, end_time: {}".format( - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9, + start_time.seconds + start_time.microseconds * 1e-6, + end_time.seconds + end_time.microseconds * 1e-6, ) ) @@ -337,7 +348,7 @@ def video_detect_text_gcs(input_uri): time_offset = frame.time_offset print( "Time offset for the first frame: {}".format( - time_offset.seconds + time_offset.nanos * 1e-9 + time_offset.seconds + time_offset.microseconds * 1e-6 ) ) print("Rotated Bounding Box Vertices:") @@ -352,16 +363,18 @@ def video_detect_text(path): from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.TEXT_DETECTION] - video_context = videointelligence.types.VideoContext() + features = [videointelligence.Feature.TEXT_DETECTION] + video_context = videointelligence.VideoContext() with io.open(path, "rb") as file: input_content = file.read() operation = video_client.annotate_video( - input_content=input_content, # the bytes of the video file - features=features, - video_context=video_context, + request={ + "features": features, + "input_content": input_content, + "video_context": video_context, + } ) print("\nProcessing video for text detection.") @@ -379,8 +392,8 @@ def video_detect_text(path): end_time = text_segment.segment.end_time_offset print( "start_time: {}, end_time: {}".format( - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9, + start_time.seconds + start_time.microseconds * 1e-6, + end_time.seconds + end_time.microseconds * 1e-6, ) ) @@ -391,7 +404,7 @@ def video_detect_text(path): time_offset = frame.time_offset print( "Time offset for the first frame: {}".format( - time_offset.seconds + time_offset.nanos * 1e-9 + time_offset.seconds + time_offset.microseconds * 1e-6 ) ) print("Rotated Bounding Box Vertices:") @@ -406,8 +419,10 @@ def track_objects_gcs(gcs_uri): from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.OBJECT_TRACKING] - operation = video_client.annotate_video(input_uri=gcs_uri, features=features) + features = [videointelligence.Feature.OBJECT_TRACKING] + operation = video_client.annotate_video( + request={"features": features, "input_uri": gcs_uri} + ) print("\nProcessing video for object annotations.") result = operation.result(timeout=300) @@ -424,9 +439,9 @@ def track_objects_gcs(gcs_uri): print( "Segment: {}s to {}s".format( object_annotation.segment.start_time_offset.seconds - + object_annotation.segment.start_time_offset.nanos / 1e9, + + object_annotation.segment.start_time_offset.microseconds / 1e6, object_annotation.segment.end_time_offset.seconds - + object_annotation.segment.end_time_offset.nanos / 1e9, + + object_annotation.segment.end_time_offset.microseconds / 1e6, ) ) @@ -437,7 +452,7 @@ def track_objects_gcs(gcs_uri): box = frame.normalized_bounding_box print( "Time offset of the first frame: {}s".format( - frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 ) ) print("Bounding box position:") @@ -455,13 +470,13 @@ def track_objects(path): from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.OBJECT_TRACKING] + features = [videointelligence.Feature.OBJECT_TRACKING] with io.open(path, "rb") as file: input_content = file.read() operation = video_client.annotate_video( - input_content=input_content, features=features + request={"features": features, "input_content": input_content} ) print("\nProcessing video for object annotations.") @@ -480,9 +495,9 @@ def track_objects(path): print( "Segment: {}s to {}s".format( object_annotation.segment.start_time_offset.seconds - + object_annotation.segment.start_time_offset.nanos / 1e9, + + object_annotation.segment.start_time_offset.microseconds / 1e6, object_annotation.segment.end_time_offset.seconds - + object_annotation.segment.end_time_offset.nanos / 1e9, + + object_annotation.segment.end_time_offset.microseconds / 1e6, ) ) @@ -493,7 +508,7 @@ def track_objects(path): box = frame.normalized_bounding_box print( "Time offset of the first frame: {}s".format( - frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 ) ) print("Bounding box position:") diff --git a/videointelligence/samples/analyze/analyze_test.py b/videointelligence/samples/analyze/analyze_test.py index 6623fd326983..2da6cb760f1b 100644 --- a/videointelligence/samples/analyze/analyze_test.py +++ b/videointelligence/samples/analyze/analyze_test.py @@ -74,7 +74,7 @@ def test_speech_transcription(capsys): def test_detect_text_gcs(capsys): analyze.video_detect_text_gcs("gs://cloud-samples-data/video/googlework_tiny.mp4") out, _ = capsys.readouterr() - assert 'Text' in out + assert "Text" in out # Flaky timeout @@ -82,7 +82,7 @@ def test_detect_text_gcs(capsys): def test_detect_text(capsys): analyze.video_detect_text("resources/googlework_tiny.mp4") out, _ = capsys.readouterr() - assert 'Text' in out + assert "Text" in out # Flaky timeout diff --git a/videointelligence/samples/analyze/beta_snippets.py b/videointelligence/samples/analyze/beta_snippets.py index 33d221ce4b90..0e484f8a6293 100644 --- a/videointelligence/samples/analyze/beta_snippets.py +++ b/videointelligence/samples/analyze/beta_snippets.py @@ -52,17 +52,19 @@ def speech_transcription(input_uri): video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.SPEECH_TRANSCRIPTION] + features = [videointelligence.Feature.SPEECH_TRANSCRIPTION] - config = videointelligence.types.SpeechTranscriptionConfig( + config = videointelligence.SpeechTranscriptionConfig( language_code="en-US", enable_automatic_punctuation=True ) - video_context = videointelligence.types.VideoContext( - speech_transcription_config=config - ) + video_context = videointelligence.VideoContext(speech_transcription_config=config) operation = video_client.annotate_video( - input_uri=input_uri, features=features, video_context=video_context + request={ + "features": features, + "input_uri": input_uri, + "video_context": video_context, + } ) print("\nProcessing video for speech transcription.") @@ -91,8 +93,8 @@ def speech_transcription(input_uri): end_time = word_info.end_time print( "\t{}s - {}s: {}".format( - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9, + start_time.seconds + start_time.microseconds * 1e-6, + end_time.seconds + end_time.microseconds * 1e-6, word, ) ) @@ -105,9 +107,11 @@ def video_detect_text_gcs(input_uri): from google.cloud import videointelligence_v1p2beta1 as videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.TEXT_DETECTION] + features = [videointelligence.Feature.TEXT_DETECTION] - operation = video_client.annotate_video(input_uri=input_uri, features=features) + operation = video_client.annotate_video( + request={"features": features, "input_uri": input_uri} + ) print("\nProcessing video for text detection.") result = operation.result(timeout=300) @@ -125,8 +129,8 @@ def video_detect_text_gcs(input_uri): end_time = text_segment.segment.end_time_offset print( "start_time: {}, end_time: {}".format( - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9, + start_time.seconds + start_time.microseconds * 1e-6, + end_time.seconds + end_time.microseconds * 1e-6, ) ) @@ -137,7 +141,7 @@ def video_detect_text_gcs(input_uri): time_offset = frame.time_offset print( "Time offset for the first frame: {}".format( - time_offset.seconds + time_offset.nanos * 1e-9 + time_offset.seconds + time_offset.microseconds * 1e-6 ) ) print("Rotated Bounding Box Vertices:") @@ -153,16 +157,18 @@ def video_detect_text(path): from google.cloud import videointelligence_v1p2beta1 as videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.TEXT_DETECTION] - video_context = videointelligence.types.VideoContext() + features = [videointelligence.Feature.TEXT_DETECTION] + video_context = videointelligence.VideoContext() with io.open(path, "rb") as file: input_content = file.read() operation = video_client.annotate_video( - input_content=input_content, # the bytes of the video file - features=features, - video_context=video_context, + request={ + "features": features, + "input_content": input_content, + "video_context": video_context, + } ) print("\nProcessing video for text detection.") @@ -181,8 +187,8 @@ def video_detect_text(path): end_time = text_segment.segment.end_time_offset print( "start_time: {}, end_time: {}".format( - start_time.seconds + start_time.nanos * 1e-9, - end_time.seconds + end_time.nanos * 1e-9, + start_time.seconds + start_time.microseconds * 1e-6, + end_time.seconds + end_time.microseconds * 1e-6, ) ) @@ -193,7 +199,7 @@ def video_detect_text(path): time_offset = frame.time_offset print( "Time offset for the first frame: {}".format( - time_offset.seconds + time_offset.nanos * 1e-9 + time_offset.seconds + time_offset.microseconds * 1e-6 ) ) print("Rotated Bounding Box Vertices:") @@ -211,9 +217,13 @@ def track_objects_gcs(gcs_uri): # It is recommended to use location_id as 'us-east1' for the best latency # due to different types of processors used in this region and others. video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.OBJECT_TRACKING] + features = [videointelligence.Feature.OBJECT_TRACKING] operation = video_client.annotate_video( - input_uri=gcs_uri, features=features, location_id="us-east1" + request={ + "features": features, + "input_uri": gcs_uri, + "location_id": "us-east1", + } ) print("\nProcessing video for object annotations.") @@ -233,9 +243,9 @@ def track_objects_gcs(gcs_uri): print( "Segment: {}s to {}s".format( object_annotation.segment.start_time_offset.seconds - + object_annotation.segment.start_time_offset.nanos / 1e9, + + object_annotation.segment.start_time_offset.microseconds / 1e6, object_annotation.segment.end_time_offset.seconds - + object_annotation.segment.end_time_offset.nanos / 1e9, + + object_annotation.segment.end_time_offset.microseconds / 1e6, ) ) @@ -246,7 +256,7 @@ def track_objects_gcs(gcs_uri): box = frame.normalized_bounding_box print( "Time offset of the first frame: {}s".format( - frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 ) ) print("Bounding box position:") @@ -265,7 +275,7 @@ def track_objects(path): from google.cloud import videointelligence_v1p2beta1 as videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.OBJECT_TRACKING] + features = [videointelligence.Feature.OBJECT_TRACKING] with io.open(path, "rb") as file: input_content = file.read() @@ -273,7 +283,11 @@ def track_objects(path): # It is recommended to use location_id as 'us-east1' for the best latency # due to different types of processors used in this region and others. operation = video_client.annotate_video( - input_content=input_content, features=features, location_id="us-east1" + request={ + "features": features, + "input_content": input_content, + "location_id": "us-east1", + } ) print("\nProcessing video for object annotations.") @@ -293,9 +307,9 @@ def track_objects(path): print( "Segment: {}s to {}s".format( object_annotation.segment.start_time_offset.seconds - + object_annotation.segment.start_time_offset.nanos / 1e9, + + object_annotation.segment.start_time_offset.microseconds / 1e6, object_annotation.segment.end_time_offset.seconds - + object_annotation.segment.end_time_offset.nanos / 1e9, + + object_annotation.segment.end_time_offset.microseconds / 1e6, ) ) @@ -306,7 +320,7 @@ def track_objects(path): box = frame.normalized_bounding_box print( "Time offset of the first frame: {}s".format( - frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 ) ) print("Bounding box position:") @@ -328,12 +342,12 @@ def detect_labels_streaming(path): client = videointelligence.StreamingVideoIntelligenceServiceClient() # Set streaming config. - config = videointelligence.types.StreamingVideoConfig( - feature=(videointelligence.enums.StreamingFeature.STREAMING_LABEL_DETECTION) + config = videointelligence.StreamingVideoConfig( + feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION) ) # config_request should be the first in the stream of requests. - config_request = videointelligence.types.StreamingAnnotateVideoRequest( + config_request = videointelligence.StreamingAnnotateVideoRequest( video_config=config ) @@ -352,9 +366,7 @@ def detect_labels_streaming(path): def stream_generator(): yield config_request for chunk in stream: - yield videointelligence.types.StreamingAnnotateVideoRequest( - input_content=chunk - ) + yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk) requests = stream_generator() @@ -380,7 +392,9 @@ def stream_generator(): for annotation in label_annotations: # Each annotation has one frame, which has a timeoffset. frame = annotation.frames[0] - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + time_offset = ( + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 + ) description = annotation.entity.description confidence = annotation.frames[0].confidence @@ -400,14 +414,12 @@ def detect_shot_change_streaming(path): client = videointelligence.StreamingVideoIntelligenceServiceClient() # Set streaming config. - config = videointelligence.types.StreamingVideoConfig( - feature=( - videointelligence.enums.StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION - ) + config = videointelligence.StreamingVideoConfig( + feature=(videointelligence.StreamingFeature.STREAMING_SHOT_CHANGE_DETECTION) ) # config_request should be the first in the stream of requests. - config_request = videointelligence.types.StreamingAnnotateVideoRequest( + config_request = videointelligence.StreamingAnnotateVideoRequest( video_config=config ) @@ -426,9 +438,7 @@ def detect_shot_change_streaming(path): def stream_generator(): yield config_request for chunk in stream: - yield videointelligence.types.StreamingAnnotateVideoRequest( - input_content=chunk - ) + yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk) requests = stream_generator() @@ -448,11 +458,11 @@ def stream_generator(): for annotation in response.annotation_results.shot_annotations: start = ( annotation.start_time_offset.seconds - + annotation.start_time_offset.nanos / 1e9 + + annotation.start_time_offset.microseconds / 1e6 ) end = ( annotation.end_time_offset.seconds - + annotation.end_time_offset.nanos / 1e9 + + annotation.end_time_offset.microseconds / 1e6 ) print("Shot: {}s to {}s".format(start, end)) @@ -468,12 +478,12 @@ def track_objects_streaming(path): client = videointelligence.StreamingVideoIntelligenceServiceClient() # Set streaming config. - config = videointelligence.types.StreamingVideoConfig( - feature=(videointelligence.enums.StreamingFeature.STREAMING_OBJECT_TRACKING) + config = videointelligence.StreamingVideoConfig( + feature=(videointelligence.StreamingFeature.STREAMING_OBJECT_TRACKING) ) # config_request should be the first in the stream of requests. - config_request = videointelligence.types.StreamingAnnotateVideoRequest( + config_request = videointelligence.StreamingAnnotateVideoRequest( video_config=config ) @@ -492,9 +502,7 @@ def track_objects_streaming(path): def stream_generator(): yield config_request for chunk in stream: - yield videointelligence.types.StreamingAnnotateVideoRequest( - input_content=chunk - ) + yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk) requests = stream_generator() @@ -520,7 +528,9 @@ def stream_generator(): for annotation in object_annotations: # Each annotation has one frame, which has a timeoffset. frame = annotation.frames[0] - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 + time_offset = ( + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 + ) description = annotation.entity.description confidence = annotation.confidence @@ -557,14 +567,14 @@ def detect_explicit_content_streaming(path): client = videointelligence.StreamingVideoIntelligenceServiceClient() # Set streaming config. - config = videointelligence.types.StreamingVideoConfig( + config = videointelligence.StreamingVideoConfig( feature=( - videointelligence.enums.StreamingFeature.STREAMING_EXPLICIT_CONTENT_DETECTION + videointelligence.StreamingFeature.STREAMING_EXPLICIT_CONTENT_DETECTION ) ) # config_request should be the first in the stream of requests. - config_request = videointelligence.types.StreamingAnnotateVideoRequest( + config_request = videointelligence.StreamingAnnotateVideoRequest( video_config=config ) @@ -583,9 +593,7 @@ def detect_explicit_content_streaming(path): def stream_generator(): yield config_request for chunk in stream: - yield videointelligence.types.StreamingAnnotateVideoRequest( - input_content=chunk - ) + yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk) requests = stream_generator() @@ -603,8 +611,10 @@ def stream_generator(): break for frame in response.annotation_results.explicit_annotation.frames: - time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9 - pornography_likelihood = videointelligence.enums.Likelihood( + time_offset = ( + frame.time_offset.seconds + frame.time_offset.microseconds / 1e6 + ) + pornography_likelihood = videointelligence.Likelihood( frame.pornography_likelihood ) @@ -624,19 +634,19 @@ def annotation_to_storage_streaming(path, output_uri): # Set streaming config specifying the output_uri. # The output_uri is the prefix of the actual output files. - storage_config = videointelligence.types.StreamingStorageConfig( + storage_config = videointelligence.StreamingStorageConfig( enable_storage_annotation_result=True, annotation_result_storage_directory=output_uri, ) # Here we use label detection as an example. # All features support output to GCS. - config = videointelligence.types.StreamingVideoConfig( - feature=(videointelligence.enums.StreamingFeature.STREAMING_LABEL_DETECTION), + config = videointelligence.StreamingVideoConfig( + feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION), storage_config=storage_config, ) # config_request should be the first in the stream of requests. - config_request = videointelligence.types.StreamingAnnotateVideoRequest( + config_request = videointelligence.StreamingAnnotateVideoRequest( video_config=config ) @@ -655,9 +665,7 @@ def annotation_to_storage_streaming(path, output_uri): def stream_generator(): yield config_request for chunk in stream: - yield videointelligence.types.StreamingAnnotateVideoRequest( - input_content=chunk - ) + yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk) requests = stream_generator() @@ -682,7 +690,6 @@ def streaming_automl_classification(path, project_id, model_id): import io from google.cloud import videointelligence_v1p3beta1 as videointelligence - from google.cloud.videointelligence_v1p3beta1 import enums # path = 'path_to_file' # project_id = 'gcp_project_id' @@ -695,17 +702,17 @@ def streaming_automl_classification(path, project_id, model_id): ) # Here we use classification as an example. - automl_config = videointelligence.types.StreamingAutomlClassificationConfig( + automl_config = videointelligence.StreamingAutomlClassificationConfig( model_name=model_path ) - video_config = videointelligence.types.StreamingVideoConfig( - feature=enums.StreamingFeature.STREAMING_AUTOML_CLASSIFICATION, + video_config = videointelligence.StreamingVideoConfig( + feature=videointelligence.StreamingFeature.STREAMING_AUTOML_CLASSIFICATION, automl_classification_config=automl_config, ) # config_request should be the first in the stream of requests. - config_request = videointelligence.types.StreamingAnnotateVideoRequest( + config_request = videointelligence.StreamingAnnotateVideoRequest( video_config=video_config ) @@ -727,9 +734,7 @@ def streaming_automl_classification(path, project_id, model_id): def stream_generator(): yield config_request for chunk in stream: - yield videointelligence.types.StreamingAnnotateVideoRequest( - input_content=chunk - ) + yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk) requests = stream_generator() diff --git a/videointelligence/samples/analyze/beta_snippets_test.py b/videointelligence/samples/analyze/beta_snippets_test.py index bcb6c7bc489e..1c9da43acf12 100644 --- a/videointelligence/samples/analyze/beta_snippets_test.py +++ b/videointelligence/samples/analyze/beta_snippets_test.py @@ -15,13 +15,13 @@ # limitations under the License. import os +from urllib.request import urlopen import uuid import backoff from google.api_core.exceptions import Conflict from google.cloud import storage import pytest -from six.moves.urllib.request import urlopen import beta_snippets @@ -55,7 +55,7 @@ def video_path(tmpdir_factory): @pytest.fixture(scope="function") def bucket(): # Create a temporaty bucket to store annotation output. - bucket_name = f'tmp-{uuid.uuid4().hex}' + bucket_name = f"tmp-{uuid.uuid4().hex}" storage_client = storage.Client() bucket = storage_client.create_bucket(bucket_name) @@ -128,7 +128,7 @@ def test_detect_text(capsys): in_file = "./resources/googlework_tiny.mp4" beta_snippets.video_detect_text(in_file) out, _ = capsys.readouterr() - assert 'Text' in out + assert "Text" in out # Flaky timeout @@ -137,7 +137,7 @@ def test_detect_text_gcs(capsys): in_file = "gs://python-docs-samples-tests/video/googlework_tiny.mp4" beta_snippets.video_detect_text_gcs(in_file) out, _ = capsys.readouterr() - assert 'Text' in out + assert "Text" in out # Flaky InvalidArgument diff --git a/videointelligence/samples/analyze/noxfile.py b/videointelligence/samples/analyze/noxfile.py index b90eef00f2d9..ab2c49227c3b 100644 --- a/videointelligence/samples/analyze/noxfile.py +++ b/videointelligence/samples/analyze/noxfile.py @@ -37,28 +37,25 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -73,12 +70,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -87,7 +84,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +133,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -145,9 +142,11 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -160,6 +159,7 @@ def blacken(session): session.run("black", *python_files) + # # Sample Tests # @@ -199,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/videointelligence/samples/analyze/video_detect_faces_beta.py b/videointelligence/samples/analyze/video_detect_faces_beta.py index 9d882d488e94..6e6a11f4d428 100644 --- a/videointelligence/samples/analyze/video_detect_faces_beta.py +++ b/videointelligence/samples/analyze/video_detect_faces_beta.py @@ -27,16 +27,18 @@ def detect_faces(local_file_path="path/to/your/video-file.mp4"): input_content = f.read() # Configure the request - config = videointelligence.types.FaceDetectionConfig( + config = videointelligence.FaceDetectionConfig( include_bounding_boxes=True, include_attributes=True ) - context = videointelligence.types.VideoContext(face_detection_config=config) + context = videointelligence.VideoContext(face_detection_config=config) # Start the asynchronous request operation = client.annotate_video( - input_content=input_content, - features=[videointelligence.enums.Feature.FACE_DETECTION], - video_context=context, + request={ + "features": [videointelligence.Feature.FACE_DETECTION], + "input_content": input_content, + "video_context": context, + } ) print("\nProcessing video for face detection annotations.") @@ -53,9 +55,9 @@ def detect_faces(local_file_path="path/to/your/video-file.mp4"): print( "Segment: {}s to {}s".format( track.segment.start_time_offset.seconds - + track.segment.start_time_offset.nanos / 1e9, + + track.segment.start_time_offset.microseconds / 1e6, track.segment.end_time_offset.seconds - + track.segment.end_time_offset.nanos / 1e9, + + track.segment.end_time_offset.microseconds / 1e6, ) ) diff --git a/videointelligence/samples/analyze/video_detect_faces_gcs_beta.py b/videointelligence/samples/analyze/video_detect_faces_gcs_beta.py index 262184b673ab..6646d6a54e33 100644 --- a/videointelligence/samples/analyze/video_detect_faces_gcs_beta.py +++ b/videointelligence/samples/analyze/video_detect_faces_gcs_beta.py @@ -22,16 +22,18 @@ def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): client = videointelligence.VideoIntelligenceServiceClient() # Configure the request - config = videointelligence.types.FaceDetectionConfig( + config = videointelligence.FaceDetectionConfig( include_bounding_boxes=True, include_attributes=True ) - context = videointelligence.types.VideoContext(face_detection_config=config) + context = videointelligence.VideoContext(face_detection_config=config) # Start the asynchronous request operation = client.annotate_video( - input_uri=gcs_uri, - features=[videointelligence.enums.Feature.FACE_DETECTION], - video_context=context, + request={ + "features": [videointelligence.Feature.FACE_DETECTION], + "input_uri": gcs_uri, + "video_context": context, + } ) print("\nProcessing video for face detection annotations.") @@ -48,9 +50,9 @@ def detect_faces(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): print( "Segment: {}s to {}s".format( track.segment.start_time_offset.seconds - + track.segment.start_time_offset.nanos / 1e9, + + track.segment.start_time_offset.microseconds / 1e6, track.segment.end_time_offset.seconds - + track.segment.end_time_offset.nanos / 1e9, + + track.segment.end_time_offset.microseconds / 1e6, ) ) diff --git a/videointelligence/samples/analyze/video_detect_logo.py b/videointelligence/samples/analyze/video_detect_logo.py index 319e1c8745a8..fbd0018f0c0b 100644 --- a/videointelligence/samples/analyze/video_detect_logo.py +++ b/videointelligence/samples/analyze/video_detect_logo.py @@ -26,9 +26,11 @@ def detect_logo(local_file_path="path/to/your/video.mp4"): with io.open(local_file_path, "rb") as f: input_content = f.read() - features = [videointelligence.enums.Feature.LOGO_RECOGNITION] + features = [videointelligence.Feature.LOGO_RECOGNITION] - operation = client.annotate_video(input_content=input_content, features=features) + operation = client.annotate_video( + request={"features": features, "input_content": input_content} + ) print(u"Waiting for operation to complete...") response = operation.result() @@ -53,13 +55,13 @@ def detect_logo(local_file_path="path/to/your/video.mp4"): print( u"\n\tStart Time Offset : {}.{}".format( track.segment.start_time_offset.seconds, - track.segment.start_time_offset.nanos, + track.segment.start_time_offset.microseconds * 1000, ) ) print( u"\tEnd Time Offset : {}.{}".format( track.segment.end_time_offset.seconds, - track.segment.end_time_offset.nanos, + track.segment.end_time_offset.microseconds * 1000, ) ) print(u"\tConfidence : {}".format(track.confidence)) @@ -91,12 +93,14 @@ def detect_logo(local_file_path="path/to/your/video.mp4"): for segment in logo_recognition_annotation.segments: print( u"\n\tStart Time Offset : {}.{}".format( - segment.start_time_offset.seconds, segment.start_time_offset.nanos, + segment.start_time_offset.seconds, + segment.start_time_offset.microseconds * 1000, ) ) print( u"\tEnd Time Offset : {}.{}".format( - segment.end_time_offset.seconds, segment.end_time_offset.nanos, + segment.end_time_offset.seconds, + segment.end_time_offset.microseconds * 1000, ) ) diff --git a/videointelligence/samples/analyze/video_detect_logo_gcs.py b/videointelligence/samples/analyze/video_detect_logo_gcs.py index a999888555f3..68db2e8bb634 100644 --- a/videointelligence/samples/analyze/video_detect_logo_gcs.py +++ b/videointelligence/samples/analyze/video_detect_logo_gcs.py @@ -21,9 +21,11 @@ def detect_logo_gcs(input_uri="gs://YOUR_BUCKET_ID/path/to/your/file.mp4"): client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.LOGO_RECOGNITION] + features = [videointelligence.Feature.LOGO_RECOGNITION] - operation = client.annotate_video(input_uri=input_uri, features=features) + operation = client.annotate_video( + request={"features": features, "input_uri": input_uri} + ) print(u"Waiting for operation to complete...") response = operation.result() @@ -49,13 +51,13 @@ def detect_logo_gcs(input_uri="gs://YOUR_BUCKET_ID/path/to/your/file.mp4"): print( u"\n\tStart Time Offset : {}.{}".format( track.segment.start_time_offset.seconds, - track.segment.start_time_offset.nanos, + track.segment.start_time_offset.microseconds * 1000, ) ) print( u"\tEnd Time Offset : {}.{}".format( track.segment.end_time_offset.seconds, - track.segment.end_time_offset.nanos, + track.segment.end_time_offset.microseconds * 1000, ) ) print(u"\tConfidence : {}".format(track.confidence)) @@ -86,12 +88,14 @@ def detect_logo_gcs(input_uri="gs://YOUR_BUCKET_ID/path/to/your/file.mp4"): for segment in logo_recognition_annotation.segments: print( u"\n\tStart Time Offset : {}.{}".format( - segment.start_time_offset.seconds, segment.start_time_offset.nanos, + segment.start_time_offset.seconds, + segment.start_time_offset.microseconds * 1000, ) ) print( u"\tEnd Time Offset : {}.{}".format( - segment.end_time_offset.seconds, segment.end_time_offset.nanos, + segment.end_time_offset.seconds, + segment.end_time_offset.microseconds * 1000, ) ) diff --git a/videointelligence/samples/analyze/video_detect_person_beta.py b/videointelligence/samples/analyze/video_detect_person_beta.py index a7afd34cab49..143d1b7d1514 100644 --- a/videointelligence/samples/analyze/video_detect_person_beta.py +++ b/videointelligence/samples/analyze/video_detect_person_beta.py @@ -36,9 +36,11 @@ def detect_person(local_file_path="path/to/your/video-file.mp4"): # Start the asynchronous request operation = client.annotate_video( - input_content=input_content, - features=[videointelligence.enums.Feature.PERSON_DETECTION], - video_context=context, + request={ + "features": [videointelligence.Feature.PERSON_DETECTION], + "input_content": input_content, + "video_context": context, + } ) print("\nProcessing video for person detection annotations.") @@ -55,9 +57,9 @@ def detect_person(local_file_path="path/to/your/video-file.mp4"): print( "Segment: {}s to {}s".format( track.segment.start_time_offset.seconds - + track.segment.start_time_offset.nanos / 1e9, + + track.segment.start_time_offset.microseconds / 1e6, track.segment.end_time_offset.seconds - + track.segment.end_time_offset.nanos / 1e9, + + track.segment.end_time_offset.microseconds / 1e6, ) ) diff --git a/videointelligence/samples/analyze/video_detect_person_gcs_beta.py b/videointelligence/samples/analyze/video_detect_person_gcs_beta.py index 9bd2d007b73f..7496fd254ca6 100644 --- a/videointelligence/samples/analyze/video_detect_person_gcs_beta.py +++ b/videointelligence/samples/analyze/video_detect_person_gcs_beta.py @@ -31,9 +31,11 @@ def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): # Start the asynchronous request operation = client.annotate_video( - input_uri=gcs_uri, - features=[videointelligence.enums.Feature.PERSON_DETECTION], - video_context=context, + request={ + "features": [videointelligence.Feature.PERSON_DETECTION], + "input_uri": gcs_uri, + "video_context": context, + } ) print("\nProcessing video for person detection annotations.") @@ -50,9 +52,9 @@ def detect_person(gcs_uri="gs://YOUR_BUCKET_ID/path/to/your/video.mp4"): print( "Segment: {}s to {}s".format( track.segment.start_time_offset.seconds - + track.segment.start_time_offset.nanos / 1e9, + + track.segment.start_time_offset.microseconds / 1e6, track.segment.end_time_offset.seconds - + track.segment.end_time_offset.nanos / 1e9, + + track.segment.end_time_offset.microseconds / 1e6, ) ) diff --git a/videointelligence/samples/labels/labels.py b/videointelligence/samples/labels/labels.py index cfb4ad0c4259..8f8c31ddd0c1 100644 --- a/videointelligence/samples/labels/labels.py +++ b/videointelligence/samples/labels/labels.py @@ -32,6 +32,7 @@ import argparse from google.cloud import videointelligence + # [END video_label_tutorial_imports] @@ -39,44 +40,50 @@ def analyze_labels(path): """ Detects labels given a GCS path. """ # [START video_label_tutorial_construct_request] video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.LABEL_DETECTION] - operation = video_client.annotate_video(input_uri=path, features=features) + features = [videointelligence.Feature.LABEL_DETECTION] + operation = video_client.annotate_video( + request={"features": features, "input_uri": path} + ) # [END video_label_tutorial_construct_request] - print('\nProcessing video for label annotations:') + print("\nProcessing video for label annotations:") # [START video_label_tutorial_check_operation] result = operation.result(timeout=90) - print('\nFinished processing.') + print("\nFinished processing.") # [END video_label_tutorial_check_operation] # [START video_label_tutorial_parse_response] segment_labels = result.annotation_results[0].segment_label_annotations for i, segment_label in enumerate(segment_labels): - print('Video label description: {}'.format( - segment_label.entity.description)) + print("Video label description: {}".format(segment_label.entity.description)) for category_entity in segment_label.category_entities: - print('\tLabel category description: {}'.format( - category_entity.description)) + print( + "\tLabel category description: {}".format(category_entity.description) + ) for i, segment in enumerate(segment_label.segments): - start_time = (segment.segment.start_time_offset.seconds + - segment.segment.start_time_offset.nanos / 1e9) - end_time = (segment.segment.end_time_offset.seconds + - segment.segment.end_time_offset.nanos / 1e9) - positions = '{}s to {}s'.format(start_time, end_time) + start_time = ( + segment.segment.start_time_offset.seconds + + segment.segment.start_time_offset.microseconds / 1e6 + ) + end_time = ( + segment.segment.end_time_offset.seconds + + segment.segment.end_time_offset.microseconds / 1e6 + ) + positions = "{}s to {}s".format(start_time, end_time) confidence = segment.confidence - print('\tSegment {}: {}'.format(i, positions)) - print('\tConfidence: {}'.format(confidence)) - print('\n') + print("\tSegment {}: {}".format(i, positions)) + print("\tConfidence: {}".format(confidence)) + print("\n") # [END video_label_tutorial_parse_response] -if __name__ == '__main__': +if __name__ == "__main__": # [START video_label_tutorial_run_application] parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('path', help='GCS file path for label detection.') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="GCS file path for label detection.") args = parser.parse_args() analyze_labels(args.path) diff --git a/videointelligence/samples/labels/labels_test.py b/videointelligence/samples/labels/labels_test.py index 1249f5b95cbe..c678ddb43adc 100644 --- a/videointelligence/samples/labels/labels_test.py +++ b/videointelligence/samples/labels/labels_test.py @@ -21,6 +21,6 @@ @pytest.mark.slow def test_feline_video_labels(capsys): - labels.analyze_labels('gs://cloud-samples-data/video/cat.mp4') + labels.analyze_labels("gs://cloud-samples-data/video/cat.mp4") out, _ = capsys.readouterr() - assert 'Video label description: cat' in out + assert "Video label description: cat" in out diff --git a/videointelligence/samples/labels/noxfile.py b/videointelligence/samples/labels/noxfile.py index b90eef00f2d9..ab2c49227c3b 100644 --- a/videointelligence/samples/labels/noxfile.py +++ b/videointelligence/samples/labels/noxfile.py @@ -37,28 +37,25 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -73,12 +70,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -87,7 +84,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +133,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -145,9 +142,11 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -160,6 +159,7 @@ def blacken(session): session.run("black", *python_files) + # # Sample Tests # @@ -199,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/videointelligence/samples/quickstart/noxfile.py b/videointelligence/samples/quickstart/noxfile.py index b90eef00f2d9..ab2c49227c3b 100644 --- a/videointelligence/samples/quickstart/noxfile.py +++ b/videointelligence/samples/quickstart/noxfile.py @@ -37,28 +37,25 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -73,12 +70,12 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) return ret @@ -87,7 +84,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +133,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -145,9 +142,11 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -160,6 +159,7 @@ def blacken(session): session.run("black", *python_files) + # # Sample Tests # @@ -199,9 +199,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/videointelligence/samples/quickstart/quickstart.py b/videointelligence/samples/quickstart/quickstart.py index 19d126f01cb3..89f75d866825 100644 --- a/videointelligence/samples/quickstart/quickstart.py +++ b/videointelligence/samples/quickstart/quickstart.py @@ -28,9 +28,12 @@ def run_quickstart(): from google.cloud import videointelligence video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.LABEL_DETECTION] + features = [videointelligence.Feature.LABEL_DETECTION] operation = video_client.annotate_video( - "gs://cloud-samples-data/video/cat.mp4", features=features + request={ + "features": features, + "input_uri": "gs://cloud-samples-data/video/cat.mp4", + } ) print("\nProcessing video for label annotations:") @@ -49,11 +52,11 @@ def run_quickstart(): for i, segment in enumerate(segment_label.segments): start_time = ( segment.segment.start_time_offset.seconds - + segment.segment.start_time_offset.nanos / 1e9 + + segment.segment.start_time_offset.microseconds / 1e6 ) end_time = ( segment.segment.end_time_offset.seconds - + segment.segment.end_time_offset.nanos / 1e9 + + segment.segment.end_time_offset.microseconds / 1e6 ) positions = "{}s to {}s".format(start_time, end_time) confidence = segment.confidence diff --git a/videointelligence/samples/shotchange/noxfile.py b/videointelligence/samples/shotchange/noxfile.py index b90eef00f2d9..e38c11b7a7e2 100644 --- a/videointelligence/samples/shotchange/noxfile.py +++ b/videointelligence/samples/shotchange/noxfile.py @@ -37,28 +37,25 @@ TEST_CONFIG = { # You can opt out from the test for specific Python versions. - 'ignored_versions': ["2.7"], - + "ignored_versions": ["2.7"], # Old samples are opted out of enforcing Python type hints # All new samples should feature them - 'enforce_type_hints': False, - + "enforce_type_hints": False, # An envvar key for determining the project id to use. Change it # to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a # build specific Cloud project. You can also use your own string # to use your own Cloud project. - 'gcloud_project_env': 'GOOGLE_CLOUD_PROJECT', + "gcloud_project_env": "GOOGLE_CLOUD_PROJECT", # 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT', - # A dictionary you want to inject into your test. Don't put any # secrets here. These values will override predefined values. - 'envs': {}, + "envs": {}, } try: # Ensure we can import noxfile_config in the project's directory. - sys.path.append('.') + sys.path.append(".") from noxfile_config import TEST_CONFIG_OVERRIDE except ImportError as e: print("No user noxfile_config found: detail: {}".format(e)) @@ -73,12 +70,13 @@ def get_pytest_env_vars(): ret = {} # Override the GCLOUD_PROJECT and the alias. - env_key = TEST_CONFIG['gcloud_project_env'] + env_key = TEST_CONFIG["gcloud_project_env"] # This should error out if not set. - ret['GOOGLE_CLOUD_PROJECT'] = os.environ[env_key] + ret["GOOGLE_CLOUD_PROJECT"] = os.environ[env_key] # Apply user supplied envs. - ret.update(TEST_CONFIG['envs']) + ret.update(TEST_CONFIG["envs"]) + return ret @@ -87,7 +85,7 @@ def get_pytest_env_vars(): ALL_VERSIONS = ["2.7", "3.6", "3.7", "3.8"] # Any default versions that should be ignored. -IGNORED_VERSIONS = TEST_CONFIG['ignored_versions'] +IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] TESTED_VERSIONS = sorted([v for v in ALL_VERSIONS if v not in IGNORED_VERSIONS]) @@ -136,7 +134,7 @@ def _determine_local_import_names(start_dir): @nox.session def lint(session): - if not TEST_CONFIG['enforce_type_hints']: + if not TEST_CONFIG["enforce_type_hints"]: session.install("flake8", "flake8-import-order") else: session.install("flake8", "flake8-import-order", "flake8-annotations") @@ -145,9 +143,11 @@ def lint(session): args = FLAKE8_COMMON_ARGS + [ "--application-import-names", ",".join(local_names), - "." + ".", ] session.run("flake8", *args) + + # # Black # @@ -160,6 +160,7 @@ def blacken(session): session.run("black", *python_files) + # # Sample Tests # @@ -199,9 +200,9 @@ def py(session): if session.python in TESTED_VERSIONS: _session_tests(session) else: - session.skip("SKIPPED: {} tests are disabled for this sample.".format( - session.python - )) + session.skip( + "SKIPPED: {} tests are disabled for this sample.".format(session.python) + ) # diff --git a/videointelligence/samples/shotchange/shotchange.py b/videointelligence/samples/shotchange/shotchange.py index 40edf0012cec..241289f96a62 100644 --- a/videointelligence/samples/shotchange/shotchange.py +++ b/videointelligence/samples/shotchange/shotchange.py @@ -31,6 +31,7 @@ import argparse from google.cloud import videointelligence + # [END video_shot_tutorial_imports] @@ -38,32 +39,37 @@ def analyze_shots(path): """ Detects camera shot changes. """ # [START video_shot_tutorial_construct_request] video_client = videointelligence.VideoIntelligenceServiceClient() - features = [videointelligence.enums.Feature.SHOT_CHANGE_DETECTION] - operation = video_client.annotate_video(input_uri=path, features=features) + features = [videointelligence.Feature.SHOT_CHANGE_DETECTION] + operation = video_client.annotate_video( + request={"features": features, "input_uri": path} + ) # [END video_shot_tutorial_construct_request] - print('\nProcessing video for shot change annotations:') + print("\nProcessing video for shot change annotations:") # [START video_shot_tutorial_check_operation] result = operation.result(timeout=120) - print('\nFinished processing.') + print("\nFinished processing.") + # [END video_shot_tutorial_check_operation] # [START video_shot_tutorial_parse_response] for i, shot in enumerate(result.annotation_results[0].shot_annotations): - start_time = (shot.start_time_offset.seconds + - shot.start_time_offset.nanos / 1e9) - end_time = (shot.end_time_offset.seconds + - shot.end_time_offset.nanos / 1e9) - print('\tShot {}: {} to {}'.format(i, start_time, end_time)) + start_time = ( + shot.start_time_offset.seconds + shot.start_time_offset.microseconds / 1e6 + ) + end_time = ( + shot.end_time_offset.seconds + shot.end_time_offset.microseconds / 1e6 + ) + print("\tShot {}: {} to {}".format(i, start_time, end_time)) # [END video_shot_tutorial_parse_response] -if __name__ == '__main__': +if __name__ == "__main__": # [START video_shot_tutorial_run_application] parser = argparse.ArgumentParser( - description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) - parser.add_argument('path', help='GCS path for shot change detection.') + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) + parser.add_argument("path", help="GCS path for shot change detection.") args = parser.parse_args() analyze_shots(args.path) diff --git a/videointelligence/samples/shotchange/shotchange_test.py b/videointelligence/samples/shotchange/shotchange_test.py index 0722e0c66aaf..bb480351673d 100644 --- a/videointelligence/samples/shotchange/shotchange_test.py +++ b/videointelligence/samples/shotchange/shotchange_test.py @@ -21,7 +21,6 @@ @pytest.mark.slow def test_shots_dino(capsys): - shotchange.analyze_shots( - 'gs://cloud-samples-data/video/gbikes_dinosaur.mp4') + shotchange.analyze_shots("gs://cloud-samples-data/video/gbikes_dinosaur.mp4") out, _ = capsys.readouterr() - assert 'Shot 1:' in out + assert "Shot 1:" in out