Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(generative_ai): Add code completion example #11809

Merged
merged 11 commits into from
Jun 3, 2024
56 changes: 56 additions & 0 deletions generative_ai/chat_completion/non_streaming_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def example(project_id: str, location: str) -> str:
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
"""Streaming Chat Example with a Large Language Model."""
# [START generativeaionvertexai_gemini_chat_completion_non_streaming_image]
import vertexai
import openai

from google.auth import default, transport

# TODO(developer): update project_id & location
vertexai.init(project=project_id, location=location)

# Programmatically get an access token
creds, _ = default()
auth_req = transport.requests.Request()
creds.refresh(auth_req)

# OpenAI Client
client = openai.OpenAI(
base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi",
api_key=creds.token,
)

response = client.chat.completions.create(
model="google/gemini-1.5-flash-001",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Describe the following image:"},
{
"type": "image_url",
"image_url": "gs://cloud-samples-data/generative-ai/image/scones.jpg",
},
],
}
],
)

print(response)
# [END generativeaionvertexai_gemini_chat_completion_non_streaming_image]
return f"{response.model}:{response.choices[0].message.content}"
45 changes: 45 additions & 0 deletions generative_ai/chat_completion/non_streaming_text.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def example(project_id: str, location: str) -> str:
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
"""Streaming Chat Example with a Large Language Model."""
# [START generativeaionvertexai_gemini_chat_completion_non_streaming]
import vertexai
import openai

from google.auth import default, transport

# TODO(developer): update project_id & location
vertexai.init(project=project_id, location=location)

# Programmatically get an access token
creds, _ = default()
auth_req = transport.requests.Request()
creds.refresh(auth_req)

# OpenAI Client
client = openai.OpenAI(
base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi",
api_key=creds.token,
)

response = client.chat.completions.create(
model="google/gemini-1.5-flash-001",
messages=[{"role": "user", "content": "Who are you?"}],
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
)

print(response)
# [END generativeaionvertexai_gemini_chat_completion_non_streaming]
return f"{response.model}:{response.choices[0].message.content}"
42 changes: 42 additions & 0 deletions generative_ai/chat_completion/noxfile_config.py
gericdong marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Copyright 2021 Google LLC
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# Default TEST_CONFIG_OVERRIDE for python repos.

# You can copy this file into your directory, then it will be imported from
# the noxfile.py.

# The source of truth:
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob/main/noxfile_config.py

TEST_CONFIG_OVERRIDE = {
# You can opt out from the test for specific Python versions.
"ignored_versions": ["2.7", "3.7", "3.9", "3.10", "3.12"],
# Old samples are opted out of enforcing Python type hints
# All new samples should feature them
"enforce_type_hints": True,
# An envvar key for determining the project id to use. Change it
# to 'BUILD_SPECIFIC_GCLOUD_PROJECT' if you want to opt in using a
# build specific Cloud project. You can also use your own string
# to use your own Cloud project.
"gcloud_project_env": "GOOGLE_CLOUD_PROJECT",
# 'gcloud_project_env': 'BUILD_SPECIFIC_GCLOUD_PROJECT',
# If you need to use a specific version of pip,
# change pip_version_override to the string representation
# of the version number, for example, "20.2.4"
"pip_version_override": None,
# A dictionary you want to inject into your test. Don't put any
# secrets here. These values will override predefined values.
"envs": {},
}
2 changes: 2 additions & 0 deletions generative_ai/chat_completion/requirements.txt
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
google-cloud-aiplatform==1.51.0
openai==1.30.5
57 changes: 57 additions & 0 deletions generative_ai/chat_completion/streaming_image.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def example(project_id: str, location: str) -> str:
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
"""Streaming Chat Example with a Large Language Model."""
# [START generativeaionvertexai_gemini_chat_completion_streaming_image]
import vertexai
import openai

from google.auth import default, transport

# TODO(developer): update project_id & location
vertexai.init(project=project_id, location=location)

# Programmatically get an access token
creds, _ = default()
auth_req = transport.requests.Request()
creds.refresh(auth_req)

# OpenAI Client
client = openai.OpenAI(
base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi",
api_key=creds.token,
)

responses = client.chat.completions.create(
model="google/gemini-1.5-flash-001",
messages=[
{
"role": "user",
"content": [
{"type": "text", "text": "Describe the following image:"},
{
"type": "image_url",
"image_url": "gs://cloud-samples-data/generative-ai/image/scones.jpg",
},
],
}
],
stream=True,
)
for chunk in responses:
print(chunk)
# [END generativeaionvertexai_gemini_chat_completion_streaming_image]
return f"{chunk.model}:{chunk.choices[0].delta.content}"
46 changes: 46 additions & 0 deletions generative_ai/chat_completion/streaming_text.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.


def example(project_id: str, location: str) -> str:
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
"""Streaming Chat Example with a Large Language Model."""
# [START generativeaionvertexai_gemini_chat_completion_streaming]
import vertexai
import openai

from google.auth import default, transport

# TODO(developer): update project_id & location
vertexai.init(project=project_id, location=location)

# Programmatically get an access token
creds, _ = default()
auth_req = transport.requests.Request()
creds.refresh(auth_req)

# OpenAI Client
client = openai.OpenAI(
base_url=f"https://{location}-aiplatform.googleapis.com/v1beta1/projects/{project_id}/locations/{location}/endpoints/openapi",
api_key=creds.token,
)

responses = client.chat.completions.create(
model="google/gemini-1.5-flash-001",
messages=[{"role": "user", "content": "Who are you?"}],
msampathkumar marked this conversation as resolved.
Show resolved Hide resolved
stream=True,
)
for chunk in responses:
print(chunk)
# [END generativeaionvertexai_gemini_chat_completion_streaming]
return f"{chunk.model}:{chunk.choices[0].delta.content}"
51 changes: 51 additions & 0 deletions generative_ai/chat_completion/test_streaming_examples.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Copyright 2024 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os

import non_streaming_image

import non_streaming_text

import streaming_image

import streaming_text


PROJECT_ID = os.getenv("GOOGLE_CLOUD_PROJECT")
REGION = "us-central1"


def test_streaming_text() -> None:
response = streaming_text.example(PROJECT_ID, REGION)
assert type(response) is str, "Expected text response"
assert len(response) > 0, "Expected text response"


def test_non_streaming_text() -> None:
response = streaming_image.example(PROJECT_ID, REGION)
assert type(response) is str, "Expected text response"
assert len(response) > 0, "Expected text response"


def test_streaming_image() -> None:
response = non_streaming_text.example(PROJECT_ID, REGION)
assert type(response) is str, "Expected text response"
assert len(response) > 0, "Expected text response"


def test_non_streaming_image() -> None:
response = non_streaming_image.example(PROJECT_ID, REGION)
assert type(response) is str, "Expected text response"
assert len(response) > 0, "Expected text response"
Loading