diff --git a/.jscsrc b/.jscsrc index c9551063232..91d4a292952 100644 --- a/.jscsrc +++ b/.jscsrc @@ -20,8 +20,9 @@ "allowUrlComments": true }, "excludeFiles": [ - "system-test/data/*", - "test/testdata/*", + "**/src/*/**/*", + "**/system-test/*/**/*", + "**/test/*/**/*", "**/node_modules/" ] } diff --git a/.jshintignore b/.jshintignore index 39b3b18d124..626b737fbdc 100644 --- a/.jshintignore +++ b/.jshintignore @@ -1,3 +1,4 @@ -**/system-test/data/* -**/test/testdata/* +**/src/*/**/* +**/system-test/*/**/* +**/test/*/**/* **/node_modules/ diff --git a/README.md b/README.md index bb0acd82203..93696df5c7c 100644 --- a/README.md +++ b/README.md @@ -997,7 +997,7 @@ var speechClient = speech({ // Detect the speech in an audio file. speechClient.recognize('./audio.raw', { encoding: 'LINEAR16', - sampleRate: 16000 + sampleRateHertz: 16000 }, function(err, transcript) { // transcript = 'how old is the Brooklyn Bridge' }); @@ -1008,34 +1008,14 @@ fs.createReadStream('./audio.raw') .pipe(speechClient.createRecognizeStream({ config: { encoding: 'LINEAR16', - sampleRate: 16000 + sampleRateHertz: 16000 }, singleUtterance: false, interimResults: false })) .on('error', console.error) .on('data', function(data) { - // The first "data" event emitted might look like: - // data = { - // endpointerType: Speech.endpointerTypes.START_OF_SPEECH, - // results: "", - // ... - // } - // - // A later "data" event emitted might look like: - // data = { - // endpointerType: Speech.endpointerTypes.END_OF_AUDIO, - // results: "", - // ... - // } - // - // A final "data" event emitted might look like: - // data = { - // endpointerType: - // Speech.endpointerTypes.ENDPOINTER_EVENT_UNSPECIFIED, - // results: "how old is the Brooklyn Bridge", - // ... - // } + // data.results = "how old is the Brooklyn Bridge" }); ``` diff --git a/packages/speech/README.md b/packages/speech/README.md index 55cfd6f5866..cf0fab6e737 100644 --- a/packages/speech/README.md +++ b/packages/speech/README.md @@ -19,7 +19,7 @@ var speech = require('@google-cloud/speech')({ // Detect the speech in an audio file. speech.recognize('./audio.raw', { encoding: 'LINEAR16', - sampleRate: 16000 + sampleRateHertz: 16000 }, function(err, transcript) { // transcript = 'how old is the Brooklyn Bridge' }); @@ -30,37 +30,20 @@ fs.createReadStream('./audio.raw') .pipe(speech.createRecognizeStream({ config: { encoding: 'LINEAR16', - sampleRate: 16000 + sampleRateHertz: 16000 }, singleUtterance: false, interimResults: false })) .on('error', console.error) .on('data', function(data) { - // The first "data" event emitted might look like: - // data = { - // endpointerType: Speech.endpointerTypes.START_OF_SPEECH, - // ... - // } - // - // A later "data" event emitted might look like: - // data = { - // endpointerType: Speech.endpointerTypes.END_OF_AUDIO, - // ... - // } - // - // A final "data" event emitted might look like: - // data = { - // endpointerType: Speech.endpointerTypes.END_OF_AUDIO, - // results: "how old is the Brooklyn Bridge", - // ... - // } + // data.results = "how old is the Brooklyn Bridge" }); // Promises are also supported by omitting callbacks. speech.recognize('./audio.raw', { encoding: 'LINEAR16', - sampleRate: 16000 + sampleRateHertz: 16000 }).then(function(data) { var transcript = data[0]; }); diff --git a/packages/speech/package.json b/packages/speech/package.json index bdd98f24926..502deb117a7 100644 --- a/packages/speech/package.json +++ b/packages/speech/package.json @@ -58,8 +58,8 @@ "@google-cloud/common-grpc": "^0.3.0", "events-intercept": "^2.0.0", "extend": "^3.0.0", - "google-gax": "^0.12.0", - "google-proto-files": "^0.10.0", + "google-gax": "^0.12.3", + "google-proto-files": "^0.11.0", "is": "^3.1.0", "propprop": "^0.3.1", "pumpify": "^1.3.5", diff --git a/packages/speech/src/index.js b/packages/speech/src/index.js index 40b4d95c616..feabc3095d5 100644 --- a/packages/speech/src/index.js +++ b/packages/speech/src/index.js @@ -35,7 +35,7 @@ var request = require('request'); var streamEvents = require('stream-events'); var through = require('through2'); var util = require('util'); -var v1beta1 = require('./v1beta1'); +var v1 = require('./v1'); /** * The [Cloud Speech API](https://cloud.google.com/speech/docs) enables easy @@ -67,7 +67,7 @@ function Speech(options) { }); this.api = { - Speech: v1beta1(options).speechClient(options) + Speech: v1(options).speechClient(options) }; var config = { @@ -75,11 +75,6 @@ function Speech(options) { projectIdRequired: false, service: 'speech', protoServices: { - Speech: { - path: googleProtoFiles.speech.v1beta1, - service: 'cloud.speech', - apiVersion: 'v1beta1' - }, Operations: { path: googleProtoFiles('longrunning', 'operations.proto'), service: 'longrunning' @@ -97,20 +92,13 @@ function Speech(options) { util.inherits(Speech, commonGrpc.Service); /** - * The endpointer types that the Speech API will return while processing a + * The event types that the Speech API will return while processing a * {module:speech#createRecognizeStream} request. You can track the progress of - * audio recognition by comparing the `data.endpointerType` property with these + * audio recognition by comparing the `data.eventType` property with these * values. * - * - `Speech.endpointerTypes.ENDPOINTER_EVENT_UNSPECIFIED`: No endpointer - * event specified. - * - `Speech.endpointerTypes.START_OF_SPEECH`: Speech has been detected in the - * audio stream. - * - `Speech.endpointerTypes.END_OF_SPEECH`: Speech has ceased to be detected - * in the audio stream. - * - `Speech.endpointerTypes.END_OF_AUDIO`: The end of the audio stream has - * been reached and it is being processed. - * - `Speech.endpointerTypes.END_OF_UTTERANCE`: This event is only sent when + * - `Speech.eventTypes.ENDPOINTER_EVENT_UNSPECIFIED`: No event specified. + * - `Speech.eventTypes.END_OF_SINGLE_UTTERANCE`: This event is only sent when * `config.singleUtterance` passed to {module:speech#createRecognizeStream} * is `true`. It indicates that the server has detected the end of the * user's speech utterance and expects no additional speech. Therefore, the @@ -119,13 +107,10 @@ util.inherits(Speech, commonGrpc.Service); * * @type {object} */ -Speech.endpointerTypes = -Speech.prototype.endpointerTypes = { - END_OF_AUDIO: 'END_OF_AUDIO', - END_OF_SPEECH: 'END_OF_SPEECH', - END_OF_UTTERANCE: 'END_OF_UTTERANCE', - ENDPOINTER_EVENT_UNSPECIFIED: 'ENDPOINTER_EVENT_UNSPECIFIED', - START_OF_SPEECH: 'START_OF_SPEECH' +Speech.eventTypes = +Speech.prototype.eventTypes = { + END_OF_SINGLE_UTTERANCE: 'END_OF_SINGLE_UTTERANCE', + ENDPOINTER_EVENT_UNSPECIFIED: 'ENDPOINTER_EVENT_UNSPECIFIED' }; /** @@ -371,7 +356,7 @@ Speech.formatResults_ = function(resultSets, verboseMode) { * [`StreamingRecognizeResponse`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#streamingrecognizeresponse) * object, containing these properties: * - * - **`endpointerType`** See {module:speech#endpointerTypes}. + * - **`eventType`** See {module:speech#eventTypes}. * - **`results`** By default, a combined string of transcripts. When * `config.verbose` is enabled, this is an object including a `transcript` * property, a `confidence` score from `0` - `100`, and an `alternatives` @@ -405,7 +390,7 @@ Speech.formatResults_ = function(resultSets, verboseMode) { * var request = { * config: { * encoding: 'LINEAR16', - * sampleRate: 16000 + * sampleRateHertz: 16000 * }, * singleUtterance: false, * interimResults: false @@ -416,27 +401,7 @@ Speech.formatResults_ = function(resultSets, verboseMode) { * .pipe(speech.createRecognizeStream(request)) * .on('error', console.error) * .on('data', function(data) { - * // The first "data" event emitted might look like: - * // data = { - * // endpointerType: Speech.endpointerTypes.START_OF_SPEECH, - * // results: "", - * // ... - * // } - * - * // A later "data" event emitted might look like: - * // data = { - * // endpointerType: Speech.endpointerTypes.END_OF_AUDIO, - * // results: "", - * // ... - * // } - * - * // A final "data" event emitted might look like: - * // data = { - * // endpointerType: - * // Speech.endpointerTypes.ENDPOINTER_EVENT_UNSPECIFIED, - * // results: "how old is the Brooklyn Bridge", - * // ... - * // } + * // data.results = "how old is the Brooklyn Bridge" * }); * * //- @@ -445,7 +410,7 @@ Speech.formatResults_ = function(resultSets, verboseMode) { * var request = { * config: { * encoding: 'LINEAR16', - * sampleRate: 16000 + * sampleRateHertz: 16000 * }, * singleUtterance: false, * interimResults: false, @@ -457,32 +422,7 @@ Speech.formatResults_ = function(resultSets, verboseMode) { * .pipe(speech.createRecognizeStream(request)) * .on('error', console.error) * .on('data', function(data) { - * // The first "data" event emitted might look like: - * // data = { - * // endpointerType: Speech.endpointerTypes.START_OF_SPEECH, - * // results: [], - * // ... - * // } - * - * // A later "data" event emitted might look like: - * // data = { - * // endpointerType: Speech.endpointerTypes.END_OF_AUDIO, - * // results: [], - * // ... - * // } - * - * // A final "data" event emitted might look like: - * // data = { - * // endpointerType: - * // Speech.endpointerTypes.ENDPOINTER_EVENT_UNSPECIFIED, - * // results: [ - * // { - * // transcript: "how old is the Brooklyn Bridge", - * // confidence: 88.15 - * // } - * // ], - * // ... - * // } + * // data.results = "how old is the Brooklyn Bridge" * }); */ Speech.prototype.createRecognizeStream = function(config) { @@ -492,6 +432,12 @@ Speech.prototype.createRecognizeStream = function(config) { throw new Error('A recognize request requires a configuration object.'); } + config = extend(true, { + config: { + languageCode: 'en-US' + } + }, config); + var verboseMode = config.verbose === true; delete config.verbose; @@ -507,6 +453,10 @@ Speech.prototype.createRecognizeStream = function(config) { recognizeStream.once('writing', function() { var requestStream = self.api.Speech.streamingRecognize(gaxOptions); + requestStream.on('error', function(err) { + recognizeStream.destroy(err); + }); + requestStream.on('response', function(response) { recognizeStream.emit('response', response); }); @@ -564,8 +514,8 @@ Speech.prototype.operation = function(name) { * larger files, you will need to use {module:speech#startRecognition} or * {module:speech#createRecognizeStream}. * - * @resource [SyncRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.Speech.SyncRecognize} - * @resource [SyncRecognizeRequest API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.SyncRecognizeRequest} + * @resource [Recognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.Speech.Recognize} + * @resource [RecognizeRequest API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.RecognizeRequest} * * @param {string|object|module:storage/file} file - The source file to run the * detection on. It can be either a local file path, a remote file URL, a @@ -585,12 +535,12 @@ Speech.prototype.operation = function(name) { * array consisting of other transcription possibilities. See the examples * below for more. * @param {object} callback.apiResponse - Raw API response. See - * [`SyncRecognizeResponse`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#syncrecognizeresponse). + * [`RecognizeResponse`](https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#recognizeresponse). * * @example * var config = { * encoding: 'LINEAR16', - * sampleRate: 16000 + * sampleRateHertz: 16000 * }; * * function callback(err, transcript, apiResponse) { @@ -632,7 +582,7 @@ Speech.prototype.operation = function(name) { * //- * var config = { * encoding: 'LINEAR16', - * sampleRate: 16000, + * sampleRateHertz: 16000, * verbose: true * }; * @@ -670,7 +620,9 @@ Speech.prototype.recognize = function(file, config, callback) { throw new Error('A recognize request requires a configuration object.'); } - config = extend({}, config); + config = extend({ + languageCode: 'en-US' + }, config); if (!config.encoding) { config.encoding = Speech.detectEncoding_(file); @@ -685,7 +637,7 @@ Speech.prototype.recognize = function(file, config, callback) { return; } - self.api.Speech.syncRecognize({ + self.api.Speech.recognize({ config: config, audio: foundFile }, function(err, resp) { @@ -694,8 +646,7 @@ Speech.prototype.recognize = function(file, config, callback) { return; } - var response = new self.protos.Speech.SyncRecognizeResponse(resp); - var results = Speech.formatResults_(response.results, verboseMode); + var results = Speech.formatResults_(resp.results, verboseMode); callback(null, results, resp); }); @@ -710,9 +661,9 @@ Speech.prototype.recognize = function(file, config, callback) { * events to see how the operation finishes. Follow along with the examples * below. * - * @resource [AsyncRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.Speech.AsyncRecognize} - * @resource [AsyncRecognizeRequest API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.AsyncRecognizeRequest} - * @resource [AsyncRecognizeResponse API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1beta1.AsyncRecognizeResponse} + * @resource [LongRunningRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1.Speech.LongRunningRecognize} + * @resource [LongRunningRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1.LongRunningRecognizeRequest} + * @resource [LongRunningRecognize API Reference]{@link https://cloud.google.com/speech/reference/rpc/google.cloud.speech.v1beta1#google.cloud.speech.v1.LongRunningRecognizeResponse} * * @param {string|object|module:storage/file} file - The source file to run the * detection on. It can be either a local file path, a remote file URL, a @@ -732,7 +683,7 @@ Speech.prototype.recognize = function(file, config, callback) { * @example * var config = { * encoding: 'LINEAR16', - * sampleRate: 16000 + * sampleRateHertz: 16000 * }; * * function callback(err, operation, apiResponse) { @@ -781,7 +732,7 @@ Speech.prototype.recognize = function(file, config, callback) { * //- * var config = { * encoding: 'LINEAR16', - * sampleRate: 16000, + * sampleRateHertz: 16000, * verbose: true * }; * @@ -813,7 +764,9 @@ Speech.prototype.recognize = function(file, config, callback) { Speech.prototype.startRecognition = function(file, config, callback) { var self = this; - config = extend({}, config); + config = extend({ + languageCode: 'en-US' + }, config); if (!config.encoding) { config.encoding = Speech.detectEncoding_(file); @@ -828,7 +781,7 @@ Speech.prototype.startRecognition = function(file, config, callback) { return; } - self.api.Speech.asyncRecognize({ + self.api.Speech.longRunningRecognize({ config: config, audio: foundFile }, function(err, operation, resp) { @@ -857,4 +810,4 @@ common.util.promisifyAll(Speech, { }); module.exports = Speech; -module.exports.v1beta1 = v1beta1; +module.exports.v1 = v1; diff --git a/packages/speech/src/v1beta1/index.js b/packages/speech/src/v1/index.js similarity index 81% rename from packages/speech/src/v1beta1/index.js rename to packages/speech/src/v1/index.js index 9e78f821b1d..65030f74032 100644 --- a/packages/speech/src/v1beta1/index.js +++ b/packages/speech/src/v1/index.js @@ -19,15 +19,15 @@ var speechClient = require('./speech_client'); var gax = require('google-gax'); var extend = require('extend'); -function v1beta1(options) { +function v1(options) { options = extend({ - scopes: v1beta1.ALL_SCOPES + scopes: v1.ALL_SCOPES }, options); var gaxGrpc = gax.grpc(options); return speechClient(gaxGrpc); } -v1beta1.SERVICE_ADDRESS = speechClient.SERVICE_ADDRESS; -v1beta1.ALL_SCOPES = speechClient.ALL_SCOPES; +v1.SERVICE_ADDRESS = speechClient.SERVICE_ADDRESS; +v1.ALL_SCOPES = speechClient.ALL_SCOPES; -module.exports = v1beta1; +module.exports = v1; diff --git a/packages/speech/src/v1beta1/speech_client.js b/packages/speech/src/v1/speech_client.js similarity index 79% rename from packages/speech/src/v1beta1/speech_client.js rename to packages/speech/src/v1/speech_client.js index 33d3ae6414d..f6975908973 100644 --- a/packages/speech/src/v1beta1/speech_client.js +++ b/packages/speech/src/v1/speech_client.js @@ -1,11 +1,11 @@ /* - * Copyright 2016 Google Inc. All rights reserved. + * Copyright 2017, Google Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -15,7 +15,7 @@ * * EDITING INSTRUCTIONS * This file was generated from the file - * https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1beta1/cloud_speech.proto, + * https://github.com/googleapis/googleapis/blob/master/google/cloud/speech/v1/cloud_speech.proto, * and updates to that file get reflected here through a refresh process. * For the short term, the refresh process will only be runnable by Google * engineers. @@ -35,7 +35,7 @@ var SERVICE_ADDRESS = 'speech.googleapis.com'; var DEFAULT_SERVICE_PORT = 443; -var CODE_GEN_NAME_VERSION = 'gapic/0.1.0'; +var CODE_GEN_NAME_VERSION = 'gapic/0.7.1'; var STREAM_DESCRIPTORS = { streamingRecognize: new gax.StreamDescriptor(gax.StreamType.BIDI_STREAMING) @@ -50,17 +50,17 @@ var ALL_SCOPES = [ ]; /** - * Service that implements Cloud Speech API. + * Service that implements Google Cloud Speech API. * * This will be created through a builder function which can be obtained by the module. * See the following example of how to initialize the module and how to access to the builder. * @see {@link speechClient} * * @example - * var speechV1beta1 = require('@google-cloud/speech').v1beta1({ + * var speechV1 = require('@google-cloud/speech').v1({ * // optional auth parameters. * }); - * var client = speechV1beta1.speechClient(); + * var client = speechV1.speechClient(); * * @class */ @@ -83,21 +83,20 @@ function SpeechClient(gaxGrpc, grpcClients, opts) { 'grpc/' + gaxGrpc.grpcVersion ); - this.operationsClient = new gax.lro({ auth: gaxGrpc.auth, grpc: gaxGrpc.grpc }).operationsClient(opts); this.longrunningDescriptors = { - asyncRecognize: new gax.LongrunningDescriptor( + longRunningRecognize: new gax.LongrunningDescriptor( this.operationsClient, - grpcClients.google.cloud.speech.v1beta1.AsyncRecognizeResponse.decode, - grpcClients.google.cloud.speech.v1beta1.AsyncRecognizeMetadata.decode) + grpcClients.google.cloud.speech.v1.LongRunningRecognizeResponse.decode, + grpcClients.google.cloud.speech.v1.LongRunningRecognizeMetadata.decode) }; var defaults = gaxGrpc.constructSettings( - 'google.cloud.speech.v1beta1.Speech', + 'google.cloud.speech.v1.Speech', configData, opts.clientConfig, {'x-goog-api-client': googleApiClient.join(' ')}); @@ -106,11 +105,11 @@ function SpeechClient(gaxGrpc, grpcClients, opts) { this.auth = gaxGrpc.auth; var speechStub = gaxGrpc.createStub( - grpcClients.google.cloud.speech.v1beta1.Speech, + grpcClients.google.cloud.speech.v1.Speech, opts); var speechStubMethods = [ - 'syncRecognize', - 'asyncRecognize', + 'recognize', + 'longRunningRecognize', 'streamingRecognize' ]; speechStubMethods.forEach(function(methodName) { @@ -126,9 +125,10 @@ function SpeechClient(gaxGrpc, grpcClients, opts) { }); } + /** * Get the project ID used by this class. - * @aram {function(Error, string)} callback - the callback to be called with + * @param {function(Error, string)} callback - the callback to be called with * the current project Id. */ SpeechClient.prototype.getProjectId = function(callback) { @@ -138,18 +138,18 @@ SpeechClient.prototype.getProjectId = function(callback) { // Service calls /** - * Perform synchronous speech-recognition: receive results after all audio + * Performs synchronous speech recognition: receive results after all audio * has been sent and processed. * * @param {Object} request * The request object that will be sent. * @param {Object} request.config - * [Required] The `config` message provides information to the recognizer - * that specifies how to process the request. + * *Required* Provides information to the recognizer that specifies how to + * process the request. * * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig} * @param {Object} request.audio - * [Required] The audio data to be recognized. + * *Required* The audio data to be recognized. * * This object should have the same structure as [RecognitionAudio]{@link RecognitionAudio} * @param {Object=} options @@ -158,19 +158,21 @@ SpeechClient.prototype.getProjectId = function(callback) { * @param {function(?Error, ?Object)=} callback * The function which will be called with the result of the API call. * - * The second parameter to the callback is an object representing [SyncRecognizeResponse]{@link SyncRecognizeResponse}. + * The second parameter to the callback is an object representing [RecognizeResponse]{@link RecognizeResponse}. * @return {Promise} - The promise which resolves to an array. - * The first element of the array is an object representing [SyncRecognizeResponse]{@link SyncRecognizeResponse}. + * The first element of the array is an object representing [RecognizeResponse]{@link RecognizeResponse}. * The promise has a method named "cancel" which cancels the ongoing API call. * * @example * - * var client = speechV1beta1.speechClient(); - * var encoding = speechV1beta1.RecognitionConfig.AudioEncoding.FLAC; - * var sampleRate = 44100; + * var client = speechV1.speechClient(); + * var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC; + * var sampleRateHertz = 44100; + * var languageCode = 'en-US'; * var config = { * encoding : encoding, - * sampleRate : sampleRate + * sampleRateHertz : sampleRateHertz, + * languageCode : languageCode * }; * var uri = 'gs://bucket_name/file_name.flac'; * var audio = { @@ -180,14 +182,14 @@ SpeechClient.prototype.getProjectId = function(callback) { * config: config, * audio: audio * }; - * client.syncRecognize(request).then(function(responses) { + * client.recognize(request).then(function(responses) { * var response = responses[0]; * // doThingsWith(response) * }).catch(function(err) { * console.error(err); * }); */ -SpeechClient.prototype.syncRecognize = function(request, options, callback) { +SpeechClient.prototype.recognize = function(request, options, callback) { if (options instanceof Function && callback === undefined) { callback = options; options = {}; @@ -196,24 +198,24 @@ SpeechClient.prototype.syncRecognize = function(request, options, callback) { options = {}; } - return this._syncRecognize(request, options, callback); + return this._recognize(request, options, callback); }; /** - * Perform asynchronous speech-recognition: receive results via the + * Performs asynchronous speech recognition: receive results via the * google.longrunning.Operations interface. Returns either an * `Operation.error` or an `Operation.response` which contains - * an `AsyncRecognizeResponse` message. + * a `LongRunningRecognizeResponse` message. * * @param {Object} request * The request object that will be sent. * @param {Object} request.config - * [Required] The `config` message provides information to the recognizer - * that specifies how to process the request. + * *Required* Provides information to the recognizer that specifies how to + * process the request. * * This object should have the same structure as [RecognitionConfig]{@link RecognitionConfig} * @param {Object} request.audio - * [Required] The audio data to be recognized. + * *Required* The audio data to be recognized. * * This object should have the same structure as [RecognitionAudio]{@link RecognitionAudio} * @param {Object=} options @@ -229,12 +231,14 @@ SpeechClient.prototype.syncRecognize = function(request, options, callback) { * * @example * - * var client = speechV1beta1.speechClient(); - * var encoding = speechV1beta1.RecognitionConfig.AudioEncoding.FLAC; - * var sampleRate = 44100; + * var client = speechV1.speechClient(); + * var encoding = speechV1.RecognitionConfig.AudioEncoding.FLAC; + * var sampleRateHertz = 44100; + * var languageCode = 'en-US'; * var config = { * encoding : encoding, - * sampleRate : sampleRate + * sampleRateHertz : sampleRateHertz, + * languageCode : languageCode * }; * var uri = 'gs://bucket_name/file_name.flac'; * var audio = { @@ -246,7 +250,7 @@ SpeechClient.prototype.syncRecognize = function(request, options, callback) { * }; * * // Handle the operation using the promise pattern. - * client.asyncRecognize(request).then(function(responses) { + * client.longRunningRecognize(request).then(function(responses) { * var operation = responses[0]; * var initialApiResponse = responses[1]; * @@ -266,7 +270,7 @@ SpeechClient.prototype.syncRecognize = function(request, options, callback) { * }); * * // Handle the operation using the event emitter pattern. - * client.asyncRecognize(request).then(function(responses) { + * client.longRunningRecognize(request).then(function(responses) { * var operation = responses[0]; * var initialApiResponse = responses[1]; * @@ -290,7 +294,7 @@ SpeechClient.prototype.syncRecognize = function(request, options, callback) { * console.error(err); * }); */ -SpeechClient.prototype.asyncRecognize = function(request, options, callback) { +SpeechClient.prototype.longRunningRecognize = function(request, options, callback) { if (options instanceof Function && callback === undefined) { callback = options; options = {}; @@ -299,11 +303,11 @@ SpeechClient.prototype.asyncRecognize = function(request, options, callback) { options = {}; } - return this._asyncRecognize(request, options, callback); + return this._longRunningRecognize(request, options, callback); }; /** - * Perform bidirectional streaming speech-recognition: receive results while + * Performs bidirectional streaming speech recognition: receive results while * sending audio. This method is only available via the gRPC API (not REST). * * @param {Object=} options @@ -316,7 +320,7 @@ SpeechClient.prototype.asyncRecognize = function(request, options, callback) { * * @example * - * var client = speechV1beta1.speechClient(); + * var client = speechV1.speechClient(); * var stream = client.streamingRecognize().on('data', function(response) { * // doThingsWith(response); * }); @@ -339,9 +343,9 @@ function SpeechClientBuilder(gaxGrpc) { var speechClient = gaxGrpc.load([{ root: require('google-proto-files')('..'), - file: 'google/cloud/speech/v1beta1/cloud_speech.proto' + file: 'google/cloud/speech/v1/cloud_speech.proto' }]); - extend(this, speechClient.google.cloud.speech.v1beta1); + extend(this, speechClient.google.cloud.speech.v1); /** diff --git a/packages/speech/src/v1beta1/speech_client_config.json b/packages/speech/src/v1/speech_client_config.json similarity index 77% rename from packages/speech/src/v1beta1/speech_client_config.json rename to packages/speech/src/v1/speech_client_config.json index da873998217..d5b79b33657 100644 --- a/packages/speech/src/v1beta1/speech_client_config.json +++ b/packages/speech/src/v1/speech_client_config.json @@ -1,38 +1,40 @@ { "interfaces": { - "google.cloud.speech.v1beta1.Speech": { + "google.cloud.speech.v1.Speech": { "retry_codes": { "idempotent": [ "DEADLINE_EXCEEDED", "UNAVAILABLE" ], - "non_idempotent": [] + "non_idempotent": [ + "UNAVAILABLE" + ] }, "retry_params": { "default": { "initial_retry_delay_millis": 100, "retry_delay_multiplier": 1.3, "max_retry_delay_millis": 60000, - "initial_rpc_timeout_millis": 60000, + "initial_rpc_timeout_millis": 190000, "rpc_timeout_multiplier": 1.0, - "max_rpc_timeout_millis": 60000, + "max_rpc_timeout_millis": 190000, "total_timeout_millis": 600000 } }, "methods": { - "SyncRecognize": { + "Recognize": { "timeout_millis": 60000, "retry_codes_name": "idempotent", "retry_params_name": "default" }, - "AsyncRecognize": { + "LongRunningRecognize": { "timeout_millis": 60000, - "retry_codes_name": "idempotent", + "retry_codes_name": "non_idempotent", "retry_params_name": "default" }, "StreamingRecognize": { "timeout_millis": 190000, - "retry_codes_name": "non_idempotent", + "retry_codes_name": "idempotent", "retry_params_name": "default" } } diff --git a/packages/speech/system-test/speech.js b/packages/speech/system-test/speech.js index d41f627d768..597cb879156 100644 --- a/packages/speech/system-test/speech.js +++ b/packages/speech/system-test/speech.js @@ -49,13 +49,15 @@ describe('Speech', function() { var OPTIONS = { encoding: 'LINEAR16', - sampleRate: 16000 + sampleRateHertz: 16000 }; var OPTIONS_VERBOSE = extend({}, OPTIONS, { verbose: true }); + var TRANSCRIPTION = 'how old is the Brooklyn Bridge'; + before(function(done) { async.waterfall([ function(next) { @@ -125,7 +127,7 @@ describe('Speech', function() { it('recognizes speech from local file', function(done) { speech.recognize(AUDIO_FILES.bridge.path, { // encoding should be automatically detected - sampleRate: 16000 + sampleRateHertz: 16000 }, assertSimplifiedResponse(done)); }); @@ -178,7 +180,7 @@ describe('Speech', function() { it('recognizes speech from local file', function(done) { var options = { // encoding should be automatically detected - sampleRate: 16000 + sampleRateHertz: 16000 }; var path = AUDIO_FILES.bridge.path; @@ -229,7 +231,7 @@ describe('Speech', function() { describe('createRecognizeStream', function() { it('recognizes speech from raw audio', function(done) { - var correctDetectionsEmitted = 0; + var transcribed = false; var responseEmitted = false; fs.createReadStream(AUDIO_FILES.bridge.path) @@ -244,41 +246,23 @@ describe('Speech', function() { responseEmitted = true; }) .on('data', function(data) { - switch (data.endpointerType) { - case Speech.endpointerTypes.START_OF_SPEECH: { - if (data.results.length === 0) { - correctDetectionsEmitted++; - } - return; - } - - case Speech.endpointerTypes.END_OF_AUDIO: { - if (data.results.length === 0) { - correctDetectionsEmitted++; - } - return; - } - - case Speech.endpointerTypes.ENDPOINTER_EVENT_UNSPECIFIED: { - var transcript = data.results; - if (transcript === 'how old is the Brooklyn Bridge') { - correctDetectionsEmitted++; - } - return; + if (data.speechEventType === 'SPEECH_EVENT_UNSPECIFIED') { + if (data.results === TRANSCRIPTION) { + transcribed = true; } } }) .on('end', function() { setTimeout(function() { assert.strictEqual(responseEmitted, true); - assert.strictEqual(correctDetectionsEmitted, 3); + assert.strictEqual(transcribed, true); done(); }, 1500); }); }); it('recognizes speech from raw audio in verbose mode', function(done) { - var correctDetectionsEmitted = 0; + var transcribed = false; var responseEmitted = false; fs.createReadStream(AUDIO_FILES.bridge.path) @@ -294,34 +278,16 @@ describe('Speech', function() { responseEmitted = true; }) .on('data', function(data) { - switch (data.endpointerType) { - case Speech.endpointerTypes.START_OF_SPEECH: { - if (data.results.length === 0) { - correctDetectionsEmitted++; - } - return; - } - - case Speech.endpointerTypes.END_OF_AUDIO: { - if (data.results.length === 0) { - correctDetectionsEmitted++; - } - return; - } - - case Speech.endpointerTypes.ENDPOINTER_EVENT_UNSPECIFIED: { - var transcript = data.results[0].transcript; - if (transcript === 'how old is the Brooklyn Bridge') { - correctDetectionsEmitted++; - } - return; + if (data.speechEventType === 'SPEECH_EVENT_UNSPECIFIED') { + if (data.results[0].transcript === TRANSCRIPTION) { + transcribed = true; } } }) .on('end', function() { setTimeout(function() { assert.strictEqual(responseEmitted, true); - assert.strictEqual(correctDetectionsEmitted, 3); + assert.strictEqual(transcribed, true); done(); }, 1500); }); @@ -331,7 +297,7 @@ describe('Speech', function() { function assertSimplifiedResponse(done) { return function(err, transcript) { assert.ifError(err); - assert.strictEqual(transcript, 'how old is the Brooklyn Bridge'); + assert.strictEqual(transcript, TRANSCRIPTION); done(); }; } @@ -343,7 +309,7 @@ describe('Speech', function() { assert(results.length > 0); var transcript = results[0].transcript; - assert.strictEqual(transcript, 'how old is the Brooklyn Bridge'); + assert.strictEqual(transcript, TRANSCRIPTION); done(); }; diff --git a/packages/speech/test/index.js b/packages/speech/test/index.js index 9058536389c..d7c81c90b90 100644 --- a/packages/speech/test/index.js +++ b/packages/speech/test/index.js @@ -47,10 +47,10 @@ function FakeGrpcService() { this.calledWith_ = arguments; } -var fakeV1Beta1Override; -function fakeV1Beta1() { - if (fakeV1Beta1Override) { - return fakeV1Beta1Override.apply(null, arguments); +var fakeV1Override; +function fakeV1() { + if (fakeV1Override) { + return fakeV1Override.apply(null, arguments); } return { @@ -83,7 +83,7 @@ describe('Speech', function() { Service: FakeGrpcService }, request: fakeRequest, - './v1beta1': fakeV1Beta1 + './v1': fakeV1 }); originalStaticMembers = Object.keys(Speech).reduce(function(statics, key) { @@ -93,7 +93,7 @@ describe('Speech', function() { }); beforeEach(function() { - fakeV1Beta1Override = null; + fakeV1Override = null; requestOverride = null; speech = new Speech(OPTIONS); @@ -128,7 +128,7 @@ describe('Speech', function() { it('should create a gax api client', function() { var expectedSpeechService = {}; - fakeV1Beta1Override = function(options) { + fakeV1Override = function(options) { var expected = extend({}, OPTIONS, { libName: 'gccl', libVersion: require('../package.json').version @@ -160,11 +160,6 @@ describe('Speech', function() { projectIdRequired: false, service: 'speech', protoServices: { - Speech: { - path: googleProtoFiles.speech.v1beta1, - service: 'cloud.speech', - apiVersion: 'v1beta1' - }, Operations: { path: googleProtoFiles('longrunning', 'operations.proto'), service: 'longrunning' @@ -178,21 +173,18 @@ describe('Speech', function() { }); }); - describe('endpointerTypes', function() { - var ENDPOINTER_TYPES = { - END_OF_AUDIO: 'END_OF_AUDIO', - END_OF_SPEECH: 'END_OF_SPEECH', - END_OF_UTTERANCE: 'END_OF_UTTERANCE', - ENDPOINTER_EVENT_UNSPECIFIED: 'ENDPOINTER_EVENT_UNSPECIFIED', - START_OF_SPEECH: 'START_OF_SPEECH' + describe('eventTypes', function() { + var EVENT_TYPES = { + END_OF_SINGLE_UTTERANCE: 'END_OF_SINGLE_UTTERANCE', + ENDPOINTER_EVENT_UNSPECIFIED: 'ENDPOINTER_EVENT_UNSPECIFIED' }; - it('should export static endpointerTypes', function() { - assert.deepEqual(Speech.endpointerTypes, ENDPOINTER_TYPES); + it('should export static eventTypes', function() { + assert.deepEqual(Speech.eventTypes, EVENT_TYPES); }); - it('should export instance endpointerTypes', function() { - assert.deepEqual(speech.endpointerTypes, ENDPOINTER_TYPES); + it('should export instance eventTypes', function() { + assert.deepEqual(speech.eventTypes, EVENT_TYPES); }); }); @@ -493,6 +485,29 @@ describe('Speech', function() { stream.emit('writing'); }); + it('should destroy user stream if request stream errors', function(done) { + var error = new Error('Error.'); + + stream.on('error', function(err) { + assert.strictEqual(error, err); + done(); + }); + + speech.api.Speech = { + streamingRecognize: function() { + var requestStream = through.obj(); + + setImmediate(function() { + requestStream.destroy(error); + }); + + return requestStream; + } + }; + + stream.emit('writing'); + }); + it('should emit the response event on the user stream', function(done) { var response = {}; @@ -523,7 +538,11 @@ describe('Speech', function() { requestStream.once('data', function(data) { assert.deepEqual(data, { - streamingConfig: CONFIG + streamingConfig: extend(true, { + config: { + languageCode: 'en-US' + } + }, CONFIG) }); done(); }); @@ -669,6 +688,34 @@ describe('Speech', function() { stream.emit('writing'); }); + + it('should allow specifying a languageCode', function(done) { + var languageCode = 'uk'; + + speech.api.Speech = { + streamingRecognize: function() { + var stream = through.obj(); + + stream.on('data', function(data) { + assert.strictEqual( + data.streamingConfig.config.languageCode, + languageCode + ); + done(); + }); + + return stream; + } + }; + + var stream = speech.createRecognizeStream({ + config: { + languageCode: languageCode + } + }); + + stream.emit('writing'); + }); }); describe('operation', function() { @@ -704,7 +751,7 @@ describe('Speech', function() { }; speech.api.Speech = { - syncRecognize: util.noop + recognize: util.noop }; }); @@ -725,10 +772,11 @@ describe('Speech', function() { it('should make the correct request', function(done) { speech.api.Speech = { - syncRecognize: function(reqOpts) { - var expectedConfig = extend({}, CONFIG, { - encoding: DETECTED_ENCODING - }); + recognize: function(reqOpts) { + var expectedConfig = extend({ + encoding: DETECTED_ENCODING, + languageCode: 'en-US' + }, CONFIG); assert.deepEqual(reqOpts.config, expectedConfig); assert.strictEqual(reqOpts.audio, FOUND_FILE); @@ -740,6 +788,23 @@ describe('Speech', function() { speech.recognize(FILE, CONFIG, assert.ifError); }); + it('should allow setting a languageCode', function(done) { + var languageCode = 'uk'; + + var config = { + languageCode: languageCode + }; + + speech.api.Speech = { + recognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.languageCode, languageCode); + done(); + } + }; + + speech.recognize(FILE, config, assert.ifError); + }); + it('should respect the provided encoding', function(done) { var config = { encoding: 'LINEAR32' @@ -750,7 +815,7 @@ describe('Speech', function() { }; speech.api.Speech = { - syncRecognize: function(reqOpts) { + recognize: function(reqOpts) { assert.strictEqual(reqOpts.config.encoding, config.encoding); done(); } @@ -768,7 +833,7 @@ describe('Speech', function() { }; speech.api.Speech = { - syncRecognize: function(reqOpts) { + recognize: function(reqOpts) { assert.strictEqual(reqOpts.config.encoding, expectedEncoding); done(); } @@ -796,7 +861,7 @@ describe('Speech', function() { beforeEach(function() { speech.api.Speech = { - syncRecognize: function(reqOpts, callback) { + recognize: function(reqOpts, callback) { callback(error, apiResponse); } }; @@ -816,43 +881,23 @@ describe('Speech', function() { var apiResponse = { results: [] }; - var decodedResponse = { - results: [] - }; var formattedResults = []; beforeEach(function() { - speech.protos = { - Speech: { - SyncRecognizeResponse: function() { - return decodedResponse; - } - } - }; - Speech.formatResults_ = function() { return formattedResults; }; speech.api.Speech = { - syncRecognize: function(reqOpts, callback) { + recognize: function(reqOpts, callback) { callback(null, apiResponse); } }; }); it('should return the detections & API response', function(done) { - speech.protos = { - Speech: { - SyncRecognizeResponse: function(response) { - assert.strictEqual(response, apiResponse); - return decodedResponse; - } - } - }; - Speech.formatResults_ = function(results, verboseMode) { - assert.strictEqual(results, decodedResponse.results); + assert.strictEqual(results, apiResponse.results); assert.strictEqual(verboseMode, false); return formattedResults; }; @@ -893,7 +938,7 @@ describe('Speech', function() { it('should delete verbose option from request object', function(done) { speech.api.Speech = { - syncRecognize: function(reqOpts) { + recognize: function(reqOpts) { assert.strictEqual(reqOpts.config.verbose, undefined); done(); } @@ -924,7 +969,7 @@ describe('Speech', function() { }; speech.api.Speech = { - asyncRecognize: util.noop + longRunningRecognize: util.noop }; }); @@ -939,9 +984,10 @@ describe('Speech', function() { it('should make the correct request', function(done) { speech.api.Speech = { - asyncRecognize: function(reqOpts) { + longRunningRecognize: function(reqOpts) { var expectedConfig = extend({}, CONFIG, { - encoding: DETECTED_ENCODING + encoding: DETECTED_ENCODING, + languageCode: 'en-US' }); assert.deepEqual(reqOpts.config, expectedConfig); @@ -954,6 +1000,23 @@ describe('Speech', function() { speech.startRecognition(FILE, CONFIG, assert.ifError); }); + it('should respect the provided language code', function(done) { + var languageCode = 'uk'; + + var config = { + languageCode: languageCode + }; + + speech.api.Speech = { + longRunningRecognize: function(reqOpts) { + assert.strictEqual(reqOpts.config.languageCode, languageCode); + done(); + } + }; + + speech.startRecognition(FILE, config, assert.ifError); + }); + it('should respect the provided encoding', function(done) { var config = { encoding: 'LINEAR32' @@ -964,7 +1027,7 @@ describe('Speech', function() { }; speech.api.Speech = { - asyncRecognize: function(reqOpts) { + longRunningRecognize: function(reqOpts) { assert.strictEqual(reqOpts.config.encoding, config.encoding); done(); } @@ -982,7 +1045,7 @@ describe('Speech', function() { }; speech.api.Speech = { - asyncRecognize: function(reqOpts) { + longRunningRecognize: function(reqOpts) { assert.strictEqual(reqOpts.config.encoding, expectedEncoding); done(); } @@ -1010,7 +1073,7 @@ describe('Speech', function() { it('should return the error & API response', function(done) { speech.api.Speech = { - asyncRecognize: function(reqOpts, callback) { + longRunningRecognize: function(reqOpts, callback) { callback(error, null, apiResponse); } }; @@ -1034,7 +1097,7 @@ describe('Speech', function() { it('should format the results', function(done) { speech.api.Speech = { - asyncRecognize: function(reqOpts, callback) { + longRunningRecognize: function(reqOpts, callback) { var operation = through.obj(); callback(null, operation, apiResponse); } @@ -1064,7 +1127,7 @@ describe('Speech', function() { it('should format results in verbose mode', function(done) { speech.api.Speech = { - asyncRecognize: function(reqOpts, callback) { + longRunningRecognize: function(reqOpts, callback) { var operation = through.obj(); callback(null, operation, apiResponse); } @@ -1088,7 +1151,7 @@ describe('Speech', function() { it('should delete verbose option from request object', function(done) { speech.api.Speech = { - asyncRecognize: function(reqOpts) { + longRunningRecognize: function(reqOpts) { assert.strictEqual(reqOpts.config.verbose, undefined); done(); } diff --git a/packages/speech/test/v1/v1.js b/packages/speech/test/v1/v1.js new file mode 100644 index 00000000000..3da63317a9e --- /dev/null +++ b/packages/speech/test/v1/v1.js @@ -0,0 +1,212 @@ +/* + * Copyright 2016 Google Inc. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var assert = require('assert'); +var speechV1 = require('../src/v1')(); +var through2 = require('through2'); + +var FAKE_STATUS_CODE = 1; +var error = new Error(); +error.code = FAKE_STATUS_CODE; + +describe('SpeechClient', function() { + describe('recognize', function() { + it('invokes recognize without error', function(done) { + var client = speechV1.speechClient(); + // Mock request + var config = {}; + var audio = {}; + var request = { + config : config, + audio : audio + }; + + // Mock response + var expectedResponse = {}; + + // Mock Grpc layer + client._recognize = mockSimpleGrpcMethod(request, expectedResponse); + + client.recognize(request, function(err, response) { + assert.ifError(err); + assert.deepStrictEqual(response, expectedResponse); + done(); + }); + }); + + it('invokes recognize with error', function(done) { + var client = speechV1.speechClient(); + // Mock request + var config = {}; + var audio = {}; + var request = { + config : config, + audio : audio + }; + + // Mock Grpc layer + client._recognize = mockSimpleGrpcMethod(request, null, error); + + client.recognize(request, function(err, response) { + assert(err instanceof Error); + assert.equal(err.code, FAKE_STATUS_CODE); + done(); + }); + }); + }); + + describe('longRunningRecognize', function() { + it('invokes longRunningRecognize without error', function(done) { + var client = speechV1.speechClient(); + // Mock request + var config = {}; + var audio = {}; + var request = { + config : config, + audio : audio + }; + + // Mock response + var expectedResponse = {}; + + // Mock Grpc layer + client._longRunningRecognize = mockLongRunningGrpcMethod(request, expectedResponse); + + client.longRunningRecognize(request).then(function(responses) { + var operation = responses[0]; + return operation.promise(); + }).then(function(responses) { + assert.deepStrictEqual(responses[0], expectedResponse); + done(); + }).catch(function(err) { + done(err); + }); + }); + + it('invokes longRunningRecognize with error', function(done) { + var client = speechV1.speechClient(); + // Mock request + var config = {}; + var audio = {}; + var request = { + config : config, + audio : audio + }; + + // Mock Grpc layer + client._longRunningRecognize = mockLongRunningGrpcMethod(request, null, error); + + client.longRunningRecognize(request).then(function(responses) { + var operation = responses[0]; + return operation.promise(); + }).then(function(responses) { + assert.fail(); + }).catch(function(err) { + assert(err instanceof Error); + assert.equal(err.code, FAKE_STATUS_CODE); + done(); + }); + }); + }); + + describe('streamingRecognize', function() { + it('invokes streamingRecognize without error', function(done) { + var client = speechV1.speechClient(); + // Mock request + var request = {}; + + // Mock response + var expectedResponse = {}; + + // Mock Grpc layer + client._streamingRecognize = mockBidiStreamingGrpcMethod(request, expectedResponse); + + var stream = client.streamingRecognize().on('data', function(response) { + assert.deepStrictEqual(response, expectedResponse); + done() + }).on('error', function(err) { + done(err); + }); + + stream.write(request); + }); + + it('invokes streamingRecognize with error', function(done) { + var client = speechV1.speechClient(); + // Mock request + var request = {}; + + // Mock Grpc layer + client._streamingRecognize = mockBidiStreamingGrpcMethod(request, null, error); + + var stream = client.streamingRecognize().on('data', function(response) { + assert.fail(); + }).on('error', function(err) { + assert(err instanceof Error); + assert.equal(err.code, FAKE_STATUS_CODE); + done(); + }); + + stream.write(request); + }); + }); + +}); + +function mockSimpleGrpcMethod(expectedRequest, response, error) { + return function(actualRequest, options, callback) { + assert.deepStrictEqual(actualRequest, expectedRequest); + if (error) { + callback(error); + } else if (response) { + callback(null, response); + } else { + callback(null); + } + }; +} + +function mockBidiStreamingGrpcMethod(expectedRequest, response, error) { + return function() { + var mockStream = through2.obj(function (chunk, enc, callback) { + assert.deepStrictEqual(chunk, expectedRequest); + if (error) { + callback(error); + } else { + callback(null, response); + } + }); + return mockStream; + } +} + +function mockLongRunningGrpcMethod(expectedRequest, response, error) { + return function(request) { + assert.deepStrictEqual(request, expectedRequest); + var mockOperation = { + promise: function() { + return new Promise(function(resolve, reject) { + if (error) { + reject(error) + } else { + resolve([response]); + } + }); + } + }; + return Promise.resolve([mockOperation]); + }; +}