diff --git a/speech/betaFeatures.js b/speech/betaFeatures.js index 8f4ee55eea..72ab47848d 100644 --- a/speech/betaFeatures.js +++ b/speech/betaFeatures.js @@ -23,7 +23,7 @@ 'use strict'; -function speechTranscribeDiarization(fileName) { +async function speechTranscribeDiarization(fileName) { // [START speech_transcribe_diarization_beta] const fs = require('fs'); @@ -56,32 +56,25 @@ function speechTranscribeDiarization(fileName) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - console.log(`Speaker Diarization:`); - const result = response.results[response.results.length - 1]; - const wordsInfo = result.alternatives[0].words; - // Note: The transcript within each result is separate and sequential per result. - // However, the words list within an alternative includes all the words - // from all the results thus far. Thus, to get all the words with speaker - // tags, you only have to take the words list from the last result: - wordsInfo.forEach(a => - console.log(` word: ${a.word}, speakerTag: ${a.speakerTag}`) - ); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); + console.log(`Speaker Diarization:`); + const result = response.results[response.results.length - 1]; + const wordsInfo = result.alternatives[0].words; + // Note: The transcript within each result is separate and sequential per result. + // However, the words list within an alternative includes all the words + // from all the results thus far. Thus, to get all the words with speaker + // tags, you only have to take the words list from the last result: + wordsInfo.forEach(a => + console.log(` word: ${a.word}, speakerTag: ${a.speakerTag}`) + ); // [END speech_transcribe_diarization_beta] } -function asyncSpeechTranscribeDiarizationGCS(gcsUri) { +async function asyncSpeechTranscribeDiarizationGCS(gcsUri) { // [START speech_transcribe_diarization_gcs_beta] // Imports the Google Cloud client library const speech = require('@google-cloud/speech').v1p1beta1; @@ -112,32 +105,25 @@ function asyncSpeechTranscribeDiarizationGCS(gcsUri) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - console.log(`Speaker Diarization:`); - const result = response.results[response.results.length - 1]; - const wordsInfo = result.alternatives[0].words; - // Note: The transcript within each result is separate and sequential per result. - // However, the words list within an alternative includes all the words - // from all the results thus far. Thus, to get all the words with speaker - // tags, you only have to take the words list from the last result: - wordsInfo.forEach(a => - console.log(` word: ${a.word}, speakerTag: ${a.speakerTag}`) - ); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); + console.log(`Speaker Diarization:`); + const result = response.results[response.results.length - 1]; + const wordsInfo = result.alternatives[0].words; + // Note: The transcript within each result is separate and sequential per result. + // However, the words list within an alternative includes all the words + // from all the results thus far. Thus, to get all the words with speaker + // tags, you only have to take the words list from the last result: + wordsInfo.forEach(a => + console.log(` word: ${a.word}, speakerTag: ${a.speakerTag}`) + ); // [END speech_transcribe_diarization_gcs_beta] } -function speechTranscribeMultiChannel(fileName) { +async function speechTranscribeMultiChannel(fileName) { // [START speech_transcribe_multichannel_beta] const fs = require('fs'); @@ -168,28 +154,17 @@ function speechTranscribeMultiChannel(fileName) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map( - result => - ` Channel Tag: ` + - result.channelTag + - ` ` + - result.alternatives[0].transcript - ) - .join('\n'); - console.log(`Transcription: \n${transcription}`); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => { + ` Channel Tag: ${result.channelTag} ${result.alternatives[0].transcript}`; }) - .catch(err => { - console.error('ERROR:', err); - }); + .join('\n'); + console.log(`Transcription: \n${transcription}`); // [END speech_transcribe_multichannel_beta] } -function speechTranscribeMultichannelGCS(gcsUri) { +async function speechTranscribeMultichannelGCS(gcsUri) { // [START speech_transcribe_multichannel_gcs_beta] const speech = require('@google-cloud/speech').v1p1beta1; @@ -212,28 +187,17 @@ function speechTranscribeMultichannelGCS(gcsUri) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map( - result => - ` Channel Tag: ` + - result.channelTag + - ` ` + - result.alternatives[0].transcript - ) - .join('\n'); - console.log(`Transcription: \n${transcription}`); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => { + ` Channel Tag: ${result.channelTag} ${result.alternatives[0].transcript}`; }) - .catch(err => { - console.error('ERROR:', err); - }); + .join('\n'); + console.log(`Transcription: \n${transcription}`); // [END speech_transcribe_multichannel_gcs_beta] } -function speechTranscribeMultilang(fileName) { +async function speechTranscribeMultilang(fileName) { // [START speech_transcribe_multilanguage_beta] const fs = require('fs'); @@ -264,22 +228,15 @@ function speechTranscribeMultilang(fileName) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); // [END speech_transcribe_multilanguage_beta] } -function speechTranscribeMultilangGCS(gcsUri) { +async function speechTranscribeMultilangGCS(gcsUri) { // [START speech_transcribe_multilanguage_gcs_beta] // Imports the Google Cloud client library const speech = require('@google-cloud/speech').v1p1beta1; @@ -308,26 +265,16 @@ function speechTranscribeMultilangGCS(gcsUri) { audio: audio, }; - client - .longRunningRecognize(request) - .then(data => { - const operation = data[0]; - return operation.promise(); - }) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [operation] = await client.longRunningRecognize(request); + const [response] = await operation.promise(); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); // [END speech_transcribe_multilanguage_gcs_beta] } -function speechTranscribeWordLevelConfidence(fileName) { +async function speechTranscribeWordLevelConfidence(fileName) { // [START speech_transcribe_word_level_confidence_beta] const fs = require('fs'); @@ -358,33 +305,24 @@ function speechTranscribeWordLevelConfidence(fileName) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - const confidence = response.results - .map(result => result.alternatives[0].confidence) - .join(`\n`); - console.log( - `Transcription: ${transcription} \n Confidence: ${confidence}` - ); - - console.log(`Word-Level-Confidence:`); - const words = response.results.map(result => result.alternatives[0]); - words[0].words.forEach(a => { - console.log(` word: ${a.word}, confidence: ${a.confidence}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + const confidence = response.results + .map(result => result.alternatives[0].confidence) + .join(`\n`); + console.log(`Transcription: ${transcription} \n Confidence: ${confidence}`); + + console.log(`Word-Level-Confidence:`); + const words = response.results.map(result => result.alternatives[0]); + words[0].words.forEach(a => { + console.log(` word: ${a.word}, confidence: ${a.confidence}`); + }); // [END speech_transcribe_word_level_confidence_beta] } -function speechTranscribeWordLevelConfidenceGCS(gcsUri) { +async function speechTranscribeWordLevelConfidenceGCS(gcsUri) { // [START speech_transcribe_word_level_confidence_gcs_beta] // Imports the Google Cloud client library const speech = require('@google-cloud/speech').v1p1beta1; @@ -413,29 +351,20 @@ function speechTranscribeWordLevelConfidenceGCS(gcsUri) { audio: audio, }; - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - const confidence = response.results - .map(result => result.alternatives[0].confidence) - .join(`\n`); - console.log( - `Transcription: ${transcription} \n Confidence: ${confidence}` - ); - - console.log(`Word-Level-Confidence:`); - const words = response.results.map(result => result.alternatives[0]); - words[0].words.forEach(a => { - console.log(` word: ${a.word}, confidence: ${a.confidence}`); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + const confidence = response.results + .map(result => result.alternatives[0].confidence) + .join(`\n`); + console.log(`Transcription: ${transcription} \n Confidence: ${confidence}`); + + console.log(`Word-Level-Confidence:`); + const words = response.results.map(result => result.alternatives[0]); + words[0].words.forEach(a => { + console.log(` word: ${a.word}, confidence: ${a.confidence}`); + }); // [END speech_transcribe_word_level_confidence_gcs_beta] } diff --git a/speech/quickstart.js b/speech/quickstart.js index f95b8aaf22..ef0e40c5b9 100644 --- a/speech/quickstart.js +++ b/speech/quickstart.js @@ -16,45 +16,41 @@ 'use strict'; // [START speech_quickstart] -// Imports the Google Cloud client library -const speech = require('@google-cloud/speech'); -const fs = require('fs'); +async function main() { + // Imports the Google Cloud client library + const speech = require('@google-cloud/speech'); + const fs = require('fs'); -// Creates a client -const client = new speech.SpeechClient(); + // Creates a client + const client = new speech.SpeechClient(); -// The name of the audio file to transcribe -const fileName = './resources/audio.raw'; + // The name of the audio file to transcribe + const fileName = './resources/audio.raw'; -// Reads a local audio file and converts it to base64 -const file = fs.readFileSync(fileName); -const audioBytes = file.toString('base64'); + // Reads a local audio file and converts it to base64 + const file = fs.readFileSync(fileName); + const audioBytes = file.toString('base64'); -// The audio file's encoding, sample rate in hertz, and BCP-47 language code -const audio = { - content: audioBytes, -}; -const config = { - encoding: 'LINEAR16', - sampleRateHertz: 16000, - languageCode: 'en-US', -}; -const request = { - audio: audio, - config: config, -}; + // The audio file's encoding, sample rate in hertz, and BCP-47 language code + const audio = { + content: audioBytes, + }; + const config = { + encoding: 'LINEAR16', + sampleRateHertz: 16000, + languageCode: 'en-US', + }; + const request = { + audio: audio, + config: config, + }; -// Detects speech in the audio file -client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); + // Detects speech in the audio file + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); +} +main().catch(console.error); // [END speech_quickstart] diff --git a/speech/recognize.js b/speech/recognize.js index 7f14b43289..6b74c4d95f 100644 --- a/speech/recognize.js +++ b/speech/recognize.js @@ -23,7 +23,12 @@ 'use strict'; -function syncRecognize(filename, encoding, sampleRateHertz, languageCode) { +async function syncRecognize( + filename, + encoding, + sampleRateHertz, + languageCode +) { // [START speech_transcribe_sync] // Imports the Google Cloud client library const fs = require('fs'); @@ -55,22 +60,20 @@ function syncRecognize(filename, encoding, sampleRateHertz, languageCode) { }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: `, transcription); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: `, transcription); // [END speech_transcribe_sync] } -function syncRecognizeGCS(gcsUri, encoding, sampleRateHertz, languageCode) { +async function syncRecognizeGCS( + gcsUri, + encoding, + sampleRateHertz, + languageCode +) { // [START speech_transcribe_sync_gcs] // Imports the Google Cloud client library const speech = require('@google-cloud/speech'); @@ -101,22 +104,20 @@ function syncRecognizeGCS(gcsUri, encoding, sampleRateHertz, languageCode) { }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: `, transcription); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: `, transcription); // [END speech_transcribe_sync_gcs] } -function syncRecognizeWords(filename, encoding, sampleRateHertz, languageCode) { +async function syncRecognizeWords( + filename, + encoding, + sampleRateHertz, + languageCode +) { // [START speech_sync_recognize_words] // Imports the Google Cloud client library const fs = require('fs'); @@ -149,35 +150,33 @@ function syncRecognizeWords(filename, encoding, sampleRateHertz, languageCode) { }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - response.results.forEach(result => { - console.log(`Transcription: `, result.alternatives[0].transcript); - result.alternatives[0].words.forEach(wordInfo => { - // NOTE: If you have a time offset exceeding 2^32 seconds, use the - // wordInfo.{x}Time.seconds.high to calculate seconds. - const startSecs = - `${wordInfo.startTime.seconds}` + - `.` + - wordInfo.startTime.nanos / 100000000; - const endSecs = - `${wordInfo.endTime.seconds}` + - `.` + - wordInfo.endTime.nanos / 100000000; - console.log(`Word: ${wordInfo.word}`); - console.log(`\t ${startSecs} secs - ${endSecs} secs`); - }); - }); - }) - .catch(err => { - console.error('ERROR:', err); + const [response] = await client.recognize(request); + response.results.forEach(result => { + console.log(`Transcription: `, result.alternatives[0].transcript); + result.alternatives[0].words.forEach(wordInfo => { + // NOTE: If you have a time offset exceeding 2^32 seconds, use the + // wordInfo.{x}Time.seconds.high to calculate seconds. + const startSecs = + `${wordInfo.startTime.seconds}` + + `.` + + wordInfo.startTime.nanos / 100000000; + const endSecs = + `${wordInfo.endTime.seconds}` + + `.` + + wordInfo.endTime.nanos / 100000000; + console.log(`Word: ${wordInfo.word}`); + console.log(`\t ${startSecs} secs - ${endSecs} secs`); }); + }); // [END speech_sync_recognize_words] } -function asyncRecognize(filename, encoding, sampleRateHertz, languageCode) { +async function asyncRecognize( + filename, + encoding, + sampleRateHertz, + languageCode +) { // [START speech_transcribe_async] // Imports the Google Cloud client library const speech = require('@google-cloud/speech'); @@ -210,28 +209,23 @@ function asyncRecognize(filename, encoding, sampleRateHertz, languageCode) { // Detects speech in the audio file. This creates a recognition job that you // can wait for now, or get its result later. - client - .longRunningRecognize(request) - .then(data => { - const response = data[0]; - const operation = response; - // Get a Promise representation of the final result of the job - return operation.promise(); - }) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [operation] = await client.longRunningRecognize(request); + + // Get a Promise representation of the final result of the job + const [response] = await operation.promise(); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); // [END speech_transcribe_async] } -function asyncRecognizeGCS(gcsUri, encoding, sampleRateHertz, languageCode) { +async function asyncRecognizeGCS( + gcsUri, + encoding, + sampleRateHertz, + languageCode +) { // [START speech_transcribe_async_gcs] // Imports the Google Cloud client library const speech = require('@google-cloud/speech'); @@ -264,27 +258,17 @@ function asyncRecognizeGCS(gcsUri, encoding, sampleRateHertz, languageCode) { // Detects speech in the audio file. This creates a recognition job that you // can wait for now, or get its result later. - client - .longRunningRecognize(request) - .then(data => { - const operation = data[0]; - // Get a Promise representation of the final result of the job - return operation.promise(); - }) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: ${transcription}`); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [operation] = await client.longRunningRecognize(request); + // Get a Promise representation of the final result of the job + const [response] = await operation.promise(); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: ${transcription}`); // [END speech_transcribe_async_gcs] } -function asyncRecognizeGCSWords( +async function asyncRecognizeGCSWords( gcsUri, encoding, sampleRateHertz, @@ -323,40 +307,36 @@ function asyncRecognizeGCSWords( // Detects speech in the audio file. This creates a recognition job that you // can wait for now, or get its result later. - client - .longRunningRecognize(request) - .then(data => { - const operation = data[0]; - // Get a Promise representation of the final result of the job - return operation.promise(); - }) - .then(data => { - const response = data[0]; - response.results.forEach(result => { - console.log(`Transcription: ${result.alternatives[0].transcript}`); - result.alternatives[0].words.forEach(wordInfo => { - // NOTE: If you have a time offset exceeding 2^32 seconds, use the - // wordInfo.{x}Time.seconds.high to calculate seconds. - const startSecs = - `${wordInfo.startTime.seconds}` + - `.` + - wordInfo.startTime.nanos / 100000000; - const endSecs = - `${wordInfo.endTime.seconds}` + - `.` + - wordInfo.endTime.nanos / 100000000; - console.log(`Word: ${wordInfo.word}`); - console.log(`\t ${startSecs} secs - ${endSecs} secs`); - }); - }); - }) - .catch(err => { - console.error('ERROR:', err); + const [operation] = await client.longRunningRecognize(request); + + // Get a Promise representation of the final result of the job + const [response] = await operation.promise(); + response.results.forEach(result => { + console.log(`Transcription: ${result.alternatives[0].transcript}`); + result.alternatives[0].words.forEach(wordInfo => { + // NOTE: If you have a time offset exceeding 2^32 seconds, use the + // wordInfo.{x}Time.seconds.high to calculate seconds. + const startSecs = + `${wordInfo.startTime.seconds}` + + `.` + + wordInfo.startTime.nanos / 100000000; + const endSecs = + `${wordInfo.endTime.seconds}` + + `.` + + wordInfo.endTime.nanos / 100000000; + console.log(`Word: ${wordInfo.word}`); + console.log(`\t ${startSecs} secs - ${endSecs} secs`); }); + }); // [END speech_transcribe_async_word_time_offsets_gcs] } -function streamingRecognize(filename, encoding, sampleRateHertz, languageCode) { +async function streamingRecognize( + filename, + encoding, + sampleRateHertz, + languageCode +) { // [START speech_transcribe_streaming] const fs = require('fs'); @@ -453,7 +433,7 @@ function streamingMicRecognize(encoding, sampleRateHertz, languageCode) { // [END speech_transcribe_streaming_mic] } -function syncRecognizeModelSelection( +async function syncRecognizeModelSelection( filename, model, encoding, @@ -497,22 +477,15 @@ function syncRecognizeModelSelection( }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: `, transcription); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: `, transcription); // [END speech_transcribe_model_selection] } -function syncRecognizeModelSelectionGCS( +async function syncRecognizeModelSelectionGCS( gcsUri, model, encoding, @@ -555,22 +528,15 @@ function syncRecognizeModelSelectionGCS( }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: `, transcription); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: `, transcription); // [END speech_transcribe_model_selection_gcs] } -function syncRecognizeWithAutoPunctuation( +async function syncRecognizeWithAutoPunctuation( filename, encoding, sampleRateHertz, @@ -611,22 +577,15 @@ function syncRecognizeWithAutoPunctuation( }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - const transcription = response.results - .map(result => result.alternatives[0].transcript) - .join('\n'); - console.log(`Transcription: `, transcription); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + const transcription = response.results + .map(result => result.alternatives[0].transcript) + .join('\n'); + console.log(`Transcription: `, transcription); // [END speech_transcribe_auto_punctuation] } -function syncRecognizeWithEnhancedModel( +async function syncRecognizeWithEnhancedModel( filename, encoding, sampleRateHertz, @@ -668,18 +627,11 @@ function syncRecognizeWithEnhancedModel( }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - response.results.forEach(result => { - const alternative = result.alternatives[0]; - console.log(alternative.transcript); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + response.results.forEach(result => { + const alternative = result.alternatives[0]; + console.log(alternative.transcript); + }); // [END speech_transcribe_enhanced_model] } diff --git a/speech/recognize.v1p1beta1.js b/speech/recognize.v1p1beta1.js index 1b2e0e032d..cfe724bc0a 100644 --- a/speech/recognize.v1p1beta1.js +++ b/speech/recognize.v1p1beta1.js @@ -23,7 +23,7 @@ 'use strict'; -function syncRecognizeWithMetaData( +async function syncRecognizeWithMetaData( filename, encoding, sampleRateHertz, @@ -73,18 +73,11 @@ function syncRecognizeWithMetaData( }; // Detects speech in the audio file - client - .recognize(request) - .then(data => { - const response = data[0]; - response.results.forEach(result => { - const alternative = result.alternatives[0]; - console.log(alternative.transcript); - }); - }) - .catch(err => { - console.error('ERROR:', err); - }); + const [response] = await client.recognize(request); + response.results.forEach(result => { + const alternative = result.alternatives[0]; + console.log(alternative.transcript); + }); // [END speech_transcribe_recognition_metadata_beta] }