Skip to content

Commit

Permalink
fix: updating genai tests to run on same gcp project as ai platform s…
Browse files Browse the repository at this point in the history
…amples (GoogleCloudPlatform#3596)

* refactor: updating tests to run on same gcp project as ai platform snippets

* refactor: replace hardcoded location with environment variable

* fix: resolve linting
  • Loading branch information
pattishin authored Feb 21, 2024
1 parent a2248a8 commit 4892dff
Show file tree
Hide file tree
Showing 12 changed files with 155 additions and 67 deletions.
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/countTokens.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Count tokens', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should count tokens', async () => {
const output = execSync(
`node ./countTokens.js ${project} ${location} ${model}`
`node ./countTokens.js ${projectId} ${location} ${model}`
);

// Expect 6 tokens
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/functionCallingStreamChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Generative AI Function Calling Stream Chat', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./functionCallingStreamChat.js ${project} ${location} ${model}`
`node ./functionCallingStreamChat.js ${projectId} ${location} ${model}`
);

// Assert that the response is what we expect
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/functionCallingStreamContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Generative AI Function Calling Stream Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./functionCallingStreamContent.js ${project} ${location} ${model}`
`node ./functionCallingStreamContent.js ${projectId} ${location} ${model}`
);

// Assert that the response is what we expect
Expand Down
33 changes: 21 additions & 12 deletions generative-ai/snippets/test/nonStreamingChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,22 +17,31 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Generative AI NonStreaming Chat', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingChat.js ${project} ${location} ${model}`
);
describe('Generative AI NonStreaming Chat', async () => {
it('should create nonstreaming chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingChat.js ${projectId} ${location} ${model}`
);

// Ensure that the beginning of the conversation is consistent
assert(output.match(/User: Hello/));
assert(output.match(/User: Can you tell me a scientific fun fact?/));
assert(output.match(/User: How can I learn more about that?/));
// Ensure that the beginning of the conversation is consistent
assert(output.match(/User: Hello/));
assert(output.match(/User: Can you tell me a scientific fun fact?/));
assert(output.match(/User: How can I learn more about that?/));
});
});
});
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/nonStreamingContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Generative AI NonStreaming Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should create nonstreaming content and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingContent.js ${project} ${location} ${model}`
`node ./nonStreamingContent.js ${projectId} ${location} ${model}`
);

// Ensure that the beginning of the conversation is consistent
Expand Down
18 changes: 13 additions & 5 deletions generative-ai/snippets/test/nonStreamingMultipartContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,26 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision';

describe('Generative AI NonStreaming Multipart Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro-vision';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';

const image = 'gs://generativeai-downloads/images/scones.jpg';

it('should create nonstreaming multipart content and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./nonStreamingMultipartContent.js ${project} ${location} ${model} ${image}`
`node ./nonStreamingMultipartContent.js ${projectId} ${location} ${model} ${image}`
);

// Ensure that the conversation is what we expect for this scone image
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/safetySettings.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Safety settings', async () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should reject a dangerous request', async () => {
const output = execSync(
`node ./safetySettings.js ${project} ${location} ${model}`
`node ./safetySettings.js ${projectId} ${location} ${model}`
);

// Expect rejection due to safety concerns
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/sendMultiModalPromptWithImage.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision';

describe('Generative AI Stream MultiModal with Image', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro-vision';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';

it('should create stream multimodal content', async () => {
const output = execSync(
`node ./sendMultiModalPromptWithImage.js ${project} ${location} ${model}`
`node ./sendMultiModalPromptWithImage.js ${projectId} ${location} ${model}`
);
// Ensure that the conversation is what we expect for these images
assert(output.match(/city: Rio de Janeiro, Landmark: Christ the Redeemer/));
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/sendMultiModalPromptWithVideo.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision';

describe('Generative AI Stream MultiModal with Video', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro-vision';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';

it('should create stream multimodal content', async () => {
const output = execSync(
`node ./sendMultiModalPromptWithVideo.js ${project} ${location} ${model}`
`node ./sendMultiModalPromptWithVideo.js ${projectId} ${location} ${model}`
);
// Ensure that the conversation is what we expect for these images
assert(output.match(/Zootopia/));
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/streamChat.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Generative AI Stream Chat', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should create stream chat and begin the conversation the same in each instance', async () => {
const output = execSync(
`node ./streamChat.js ${project} ${location} ${model}`
`node ./streamChat.js ${projectId} ${location} ${model}`
);

// Assert that the advice given for learning is what we expect
Expand Down
17 changes: 12 additions & 5 deletions generative-ai/snippets/test/streamContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,24 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro';

describe('Generative AI Stream Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro';

it('should create stream content', async () => {
const output = execSync(
`node ./streamContent.js ${project} ${location} ${model}`
`node ./streamContent.js ${projectId} ${location} ${model}`
);
// Ensure that the beginning of the conversation is consistent
assert(output.match(/Prompt:/));
Expand Down
18 changes: 13 additions & 5 deletions generative-ai/snippets/test/streamMultipartContent.test.js
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,26 @@
const {assert} = require('chai');
const {describe, it} = require('mocha');
const cp = require('child_process');

const execSync = cmd => cp.execSync(cmd, {encoding: 'utf-8'});

const projectId = process.env.CAIP_PROJECT_ID;
const location = process.env.LOCATION;
const model = 'gemini-1.0-pro-vision';

describe('Generative AI Stream Multipart Content', () => {
const project = 'cloud-llm-preview1';
const location = 'us-central1';
const model = 'gemini-1.0-pro-vision';
/**
* TODO(developer): Uncomment these variables before running the sample.\
* (Not necessary if passing values as arguments)
*/
// const projectId = 'YOUR_PROJECT_ID';
// const location = 'YOUR_LOCATION';
// const model = 'gemini-1.0-pro-vision';

const image = 'gs://generativeai-downloads/images/scones.jpg';

it('should create stream multipart content', async () => {
const output = execSync(
`node ./streamMultipartContent.js ${project} ${location} ${model} ${image}`
`node ./streamMultipartContent.js ${projectId} ${location} ${model} ${image}`
);
// Split up conversation output
const conversation = output.split('\n');
Expand Down

0 comments on commit 4892dff

Please sign in to comment.