diff --git a/generative-ai/snippets/test/countTokens.test.js b/generative-ai/snippets/test/countTokens.test.js index 9cae763dba..027151d3f2 100644 --- a/generative-ai/snippets/test/countTokens.test.js +++ b/generative-ai/snippets/test/countTokens.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Count tokens', async () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should count tokens', async () => { const output = execSync( - `node ./countTokens.js ${project} ${location} ${model}` + `node ./countTokens.js ${projectId} ${location} ${model}` ); // Expect 6 tokens diff --git a/generative-ai/snippets/test/functionCallingStreamChat.test.js b/generative-ai/snippets/test/functionCallingStreamChat.test.js index 00439a3d04..385d4efb3b 100644 --- a/generative-ai/snippets/test/functionCallingStreamChat.test.js +++ b/generative-ai/snippets/test/functionCallingStreamChat.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Generative AI Function Calling Stream Chat', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should create stream chat and begin the conversation the same in each instance', async () => { const output = execSync( - `node ./functionCallingStreamChat.js ${project} ${location} ${model}` + `node ./functionCallingStreamChat.js ${projectId} ${location} ${model}` ); // Assert that the response is what we expect diff --git a/generative-ai/snippets/test/functionCallingStreamContent.test.js b/generative-ai/snippets/test/functionCallingStreamContent.test.js index 7524a7493e..5b24f8e609 100644 --- a/generative-ai/snippets/test/functionCallingStreamContent.test.js +++ b/generative-ai/snippets/test/functionCallingStreamContent.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Generative AI Function Calling Stream Content', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should create stream chat and begin the conversation the same in each instance', async () => { const output = execSync( - `node ./functionCallingStreamContent.js ${project} ${location} ${model}` + `node ./functionCallingStreamContent.js ${projectId} ${location} ${model}` ); // Assert that the response is what we expect diff --git a/generative-ai/snippets/test/nonStreamingChat.test.js b/generative-ai/snippets/test/nonStreamingChat.test.js index 9a6ccdc50b..73eea8fae4 100644 --- a/generative-ai/snippets/test/nonStreamingChat.test.js +++ b/generative-ai/snippets/test/nonStreamingChat.test.js @@ -17,22 +17,31 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Generative AI NonStreaming Chat', async () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; - it('should create nonstreaming chat and begin the conversation the same in each instance', async () => { - const output = execSync( - `node ./nonStreamingChat.js ${project} ${location} ${model}` - ); + describe('Generative AI NonStreaming Chat', async () => { + it('should create nonstreaming chat and begin the conversation the same in each instance', async () => { + const output = execSync( + `node ./nonStreamingChat.js ${projectId} ${location} ${model}` + ); - // Ensure that the beginning of the conversation is consistent - assert(output.match(/User: Hello/)); - assert(output.match(/User: Can you tell me a scientific fun fact?/)); - assert(output.match(/User: How can I learn more about that?/)); + // Ensure that the beginning of the conversation is consistent + assert(output.match(/User: Hello/)); + assert(output.match(/User: Can you tell me a scientific fun fact?/)); + assert(output.match(/User: How can I learn more about that?/)); + }); }); }); diff --git a/generative-ai/snippets/test/nonStreamingContent.test.js b/generative-ai/snippets/test/nonStreamingContent.test.js index f0eab652cf..3c5b795a13 100644 --- a/generative-ai/snippets/test/nonStreamingContent.test.js +++ b/generative-ai/snippets/test/nonStreamingContent.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Generative AI NonStreaming Content', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should create nonstreaming content and begin the conversation the same in each instance', async () => { const output = execSync( - `node ./nonStreamingContent.js ${project} ${location} ${model}` + `node ./nonStreamingContent.js ${projectId} ${location} ${model}` ); // Ensure that the beginning of the conversation is consistent diff --git a/generative-ai/snippets/test/nonStreamingMultipartContent.test.js b/generative-ai/snippets/test/nonStreamingMultipartContent.test.js index ecfeb2eae3..c48e11abe7 100644 --- a/generative-ai/snippets/test/nonStreamingMultipartContent.test.js +++ b/generative-ai/snippets/test/nonStreamingMultipartContent.test.js @@ -17,18 +17,26 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro-vision'; + describe('Generative AI NonStreaming Multipart Content', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro-vision'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro-vision'; + const image = 'gs://generativeai-downloads/images/scones.jpg'; it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => { const output = execSync( - `node ./nonStreamingMultipartContent.js ${project} ${location} ${model} ${image}` + `node ./nonStreamingMultipartContent.js ${projectId} ${location} ${model} ${image}` ); // Ensure that the conversation is what we expect for this scone image diff --git a/generative-ai/snippets/test/safetySettings.test.js b/generative-ai/snippets/test/safetySettings.test.js index 454ae29c58..046b0dfca1 100644 --- a/generative-ai/snippets/test/safetySettings.test.js +++ b/generative-ai/snippets/test/safetySettings.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Safety settings', async () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should reject a dangerous request', async () => { const output = execSync( - `node ./safetySettings.js ${project} ${location} ${model}` + `node ./safetySettings.js ${projectId} ${location} ${model}` ); // Expect rejection due to safety concerns diff --git a/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js b/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js index 91a951552b..37db1d2349 100644 --- a/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js +++ b/generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro-vision'; + describe('Generative AI Stream MultiModal with Image', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro-vision'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro-vision'; it('should create stream multimodal content', async () => { const output = execSync( - `node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}` + `node ./sendMultiModalPromptWithImage.js ${projectId} ${location} ${model}` ); // Ensure that the conversation is what we expect for these images assert(output.match(/city: Rio de Janeiro, Landmark: Christ the Redeemer/)); diff --git a/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js b/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js index 3b7972890e..fc6f3090ac 100644 --- a/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js +++ b/generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro-vision'; + describe('Generative AI Stream MultiModal with Video', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro-vision'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro-vision'; it('should create stream multimodal content', async () => { const output = execSync( - `node ./sendMultiModalPromptWithVideo.js ${project} ${location} ${model}` + `node ./sendMultiModalPromptWithVideo.js ${projectId} ${location} ${model}` ); // Ensure that the conversation is what we expect for these images assert(output.match(/Zootopia/)); diff --git a/generative-ai/snippets/test/streamChat.test.js b/generative-ai/snippets/test/streamChat.test.js index 1c9a128d6b..117a6dfe47 100644 --- a/generative-ai/snippets/test/streamChat.test.js +++ b/generative-ai/snippets/test/streamChat.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Generative AI Stream Chat', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should create stream chat and begin the conversation the same in each instance', async () => { const output = execSync( - `node ./streamChat.js ${project} ${location} ${model}` + `node ./streamChat.js ${projectId} ${location} ${model}` ); // Assert that the advice given for learning is what we expect diff --git a/generative-ai/snippets/test/streamContent.test.js b/generative-ai/snippets/test/streamContent.test.js index 03a0f47676..3882905cb3 100644 --- a/generative-ai/snippets/test/streamContent.test.js +++ b/generative-ai/snippets/test/streamContent.test.js @@ -17,17 +17,24 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro'; + describe('Generative AI Stream Content', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro'; it('should create stream content', async () => { const output = execSync( - `node ./streamContent.js ${project} ${location} ${model}` + `node ./streamContent.js ${projectId} ${location} ${model}` ); // Ensure that the beginning of the conversation is consistent assert(output.match(/Prompt:/)); diff --git a/generative-ai/snippets/test/streamMultipartContent.test.js b/generative-ai/snippets/test/streamMultipartContent.test.js index 06153467d8..14ef562e71 100644 --- a/generative-ai/snippets/test/streamMultipartContent.test.js +++ b/generative-ai/snippets/test/streamMultipartContent.test.js @@ -17,18 +17,26 @@ const {assert} = require('chai'); const {describe, it} = require('mocha'); const cp = require('child_process'); - const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'}); +const projectId = process.env.CAIP_PROJECT_ID; +const location = process.env.LOCATION; +const model = 'gemini-1.0-pro-vision'; + describe('Generative AI Stream Multipart Content', () => { - const project = 'cloud-llm-preview1'; - const location = 'us-central1'; - const model = 'gemini-1.0-pro-vision'; + /** + * TODO(developer): Uncomment these variables before running the sample.\ + * (Not necessary if passing values as arguments) + */ + // const projectId = 'YOUR_PROJECT_ID'; + // const location = 'YOUR_LOCATION'; + // const model = 'gemini-1.0-pro-vision'; + const image = 'gs://generativeai-downloads/images/scones.jpg'; it('should create stream multipart content', async () => { const output = execSync( - `node ./streamMultipartContent.js ${project} ${location} ${model} ${image}` + `node ./streamMultipartContent.js ${projectId} ${location} ${model} ${image}` ); // Split up conversation output const conversation = output.split('\n');