Skip to content

Commit 050ea11

Browse files
gcf-owl-bot[bot]dandhlee
authored andcommitted
chore(python): run blacken session for all directories with a noxfile (#133)
Source-Link: googleapis/synthtool@bc0de6e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:39ad8c0570e4f5d2d3124a509de4fe975e799e2b97e0f58aed88f8880d5a8b60 Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 674fbea commit 050ea11

File tree

2 files changed

+27
-25
lines changed

2 files changed

+27
-25
lines changed

media-translation/snippets/translate_from_file_test.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,11 @@
1717

1818
import translate_from_file
1919

20-
RESOURCES = os.path.join(os.path.dirname(__file__), 'resources')
20+
RESOURCES = os.path.join(os.path.dirname(__file__), "resources")
2121

2222

2323
def test_translate_streaming(capsys):
24-
translate_from_file.translate_from_file(
25-
os.path.join(RESOURCES, 'audio.raw'))
24+
translate_from_file.translate_from_file(os.path.join(RESOURCES, "audio.raw"))
2625
out, err = capsys.readouterr()
2726

28-
assert re.search(r'Partial translation', out, re.DOTALL | re.I)
27+
assert re.search(r"Partial translation", out, re.DOTALL | re.I)

media-translation/snippets/translate_from_mic.py

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -48,8 +48,10 @@ def __enter__(self):
4848
self._audio_interface = pyaudio.PyAudio()
4949
self._audio_stream = self._audio_interface.open(
5050
format=pyaudio.paInt16,
51-
channels=1, rate=self._rate,
52-
input=True, frames_per_buffer=self._chunk,
51+
channels=1,
52+
rate=self._rate,
53+
input=True,
54+
frames_per_buffer=self._chunk,
5355
# Run the audio stream asynchronously to fill the buffer object.
5456
# This is necessary so that the input device's buffer doesn't
5557
# overflow while the calling thread makes network requests, etc.
@@ -97,7 +99,7 @@ def generator(self):
9799
except queue.Empty:
98100
break
99101

100-
yield b''.join(data)
102+
yield b"".join(data)
101103

102104

103105
def listen_print_loop(responses):
@@ -106,45 +108,46 @@ def listen_print_loop(responses):
106108
The responses passed is a generator that will block until a response
107109
is provided by the server.
108110
"""
109-
translation = ''
111+
translation = ""
110112
for response in responses:
111113
# Once the transcription settles, the response contains the
112114
# END_OF_SINGLE_UTTERANCE event.
113-
if (response.speech_event_type ==
114-
SpeechEventType.END_OF_SINGLE_UTTERANCE):
115+
if response.speech_event_type == SpeechEventType.END_OF_SINGLE_UTTERANCE:
115116

116-
print(u'\nFinal translation: {0}'.format(translation))
117+
print(u"\nFinal translation: {0}".format(translation))
117118
return 0
118119

119120
result = response.result
120121
translation = result.text_translation_result.translation
121122

122-
print(u'\nPartial translation: {0}'.format(translation))
123+
print(u"\nPartial translation: {0}".format(translation))
123124

124125

125126
def do_translation_loop():
126-
print('Begin speaking...')
127+
print("Begin speaking...")
127128

128129
client = media.SpeechTranslationServiceClient()
129130

130131
speech_config = media.TranslateSpeechConfig(
131-
audio_encoding='linear16',
132-
source_language_code='en-US',
133-
target_language_code='es-ES')
132+
audio_encoding="linear16",
133+
source_language_code="en-US",
134+
target_language_code="es-ES",
135+
)
134136

135137
config = media.StreamingTranslateSpeechConfig(
136-
audio_config=speech_config, single_utterance=True)
138+
audio_config=speech_config, single_utterance=True
139+
)
137140

138141
# The first request contains the configuration.
139142
# Note that audio_content is explicitly set to None.
140-
first_request = media.StreamingTranslateSpeechRequest(
141-
streaming_config=config)
143+
first_request = media.StreamingTranslateSpeechRequest(streaming_config=config)
142144

143145
with MicrophoneStream(RATE, CHUNK) as stream:
144146
audio_generator = stream.generator()
145-
mic_requests = (media.StreamingTranslateSpeechRequest(
146-
audio_content=content)
147-
for content in audio_generator)
147+
mic_requests = (
148+
media.StreamingTranslateSpeechRequest(audio_content=content)
149+
for content in audio_generator
150+
)
148151

149152
requests = itertools.chain(iter([first_request]), mic_requests)
150153

@@ -159,14 +162,14 @@ def do_translation_loop():
159162
def main():
160163
while True:
161164
print()
162-
option = input('Press any key to translate or \'q\' to quit: ')
165+
option = input("Press any key to translate or 'q' to quit: ")
163166

164-
if option.lower() == 'q':
167+
if option.lower() == "q":
165168
break
166169

167170
do_translation_loop()
168171

169172

170-
if __name__ == '__main__':
173+
if __name__ == "__main__":
171174
main()
172175
# [END mediatranslation_translate_from_mic]

0 commit comments

Comments
 (0)