Skip to content

Commit

Permalink
feat: update gemini samples imports (#11205)
Browse files Browse the repository at this point in the history
* feat: update VertexAI Gemini samples imports

* feat: update function_calling.py imports

* feat: update model name

* feat: update requirements.txt

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

* fix: update tests cases

Update validation checks for new Gemini-1.0 reponses

---------

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
msampathkumar and gcf-owl-bot[bot] authored Feb 15, 2024
1 parent 207aaa4 commit 1985da6
Show file tree
Hide file tree
Showing 11 changed files with 27 additions and 26 deletions.
6 changes: 3 additions & 3 deletions generative_ai/function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
# limitations under the License.

# [START aiplatform_gemini_function_calling]
from vertexai.preview.generative_models import (
from vertexai.generative_models import (
FunctionDeclaration,
GenerativeModel,
Tool,
Expand All @@ -22,7 +22,7 @@

def generate_function_call(prompt: str) -> str:
# Load the Vertex AI Gemini API to use function calling
model = GenerativeModel("gemini-pro")
model = GenerativeModel("gemini-1.0-pro")

# Specify a function declaration and parameters for an API request
get_current_weather_func = FunctionDeclaration(
Expand Down Expand Up @@ -56,4 +56,4 @@ def generate_function_call(prompt: str) -> str:
# [END aiplatform_gemini_function_calling]

if __name__ == "__main__":
generate_function_call("What is the weather like in Boston?")
print(generate_function_call("What is the weather like in Boston?"))
8 changes: 4 additions & 4 deletions generative_ai/gemini_chat_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@
def chat_text_example(project_id: str, location: str) -> str:
# [START aiplatform_gemini_multiturn_chat]
import vertexai
from vertexai.preview.generative_models import GenerativeModel, ChatSession
from vertexai.generative_models import GenerativeModel, ChatSession

# TODO(developer): Update and un-comment below lines
# project_id = "PROJECT_ID"
# location = "us-central1"
vertexai.init(project=project_id, location=location)

model = GenerativeModel("gemini-pro")
model = GenerativeModel("gemini-1.0-pro")
chat = model.start_chat()

def get_chat_response(chat: ChatSession, prompt: str) -> str:
Expand All @@ -45,13 +45,13 @@ def get_chat_response(chat: ChatSession, prompt: str) -> str:
def chat_stream_example(project_id: str, location: str) -> str:
# [START aiplatform_gemini_multiturn_chat_stream]
import vertexai
from vertexai.preview.generative_models import GenerativeModel, ChatSession
from vertexai.generative_models import GenerativeModel, ChatSession

# TODO(developer): Update and un-comment below lines
# project_id = "PROJECT_ID"
# location = "us-central1"
vertexai.init(project=project_id, location=location)
model = GenerativeModel("gemini-pro")
model = GenerativeModel("gemini-1.0-pro")
chat = model.start_chat()

def get_chat_response(chat: ChatSession, prompt: str) -> str:
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_count_token_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@

# [START aiplatform_gemini_token_count]
import vertexai
from vertexai.preview.generative_models import GenerativeModel
from vertexai.generative_models import GenerativeModel


def generate_text(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel("gemini-pro")
model = GenerativeModel("gemini-1.0-pro")

# prompt tokens count
print(model.count_tokens("why is sky blue?"))
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_guide_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,14 @@
# gcloud auth application-default login

import vertexai
from vertexai.preview.generative_models import GenerativeModel, Part
from vertexai.generative_models import GenerativeModel, Part


def generate_text(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)
# Load the model
multimodal_model = GenerativeModel("gemini-pro-vision")
multimodal_model = GenerativeModel("gemini-1.0-pro-vision")
# Query the model
response = multimodal_model.generate_content(
[
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_multi_image_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def generate_text_multimodal(project_id: str, location: str) -> str:
import http.client
import typing
import urllib.request
from vertexai.preview.generative_models import GenerativeModel, Image
from vertexai.generative_models import GenerativeModel, Image

# create helper function
def load_image_from_url(image_url: str) -> Image:
Expand All @@ -44,7 +44,7 @@ def load_image_from_url(image_url: str) -> Image:
)

# Pass multimodal prompt
model = GenerativeModel("gemini-pro-vision")
model = GenerativeModel("gemini-1.0-pro-vision")
response = model.generate_content(
[
landmark1,
Expand Down
2 changes: 1 addition & 1 deletion generative_ai/gemini_pro_basic_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# [START aiplatform_gemini_pro_example]
import vertexai
from vertexai.preview.generative_models import GenerativeModel, Part
from vertexai.generative_models import GenerativeModel, Part


def generate_text(project_id: str, location: str) -> None:
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_pro_config_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,15 @@
import base64

import vertexai
from vertexai.preview.generative_models import GenerativeModel, Part
from vertexai.generative_models import GenerativeModel, Part


def generate_text(project_id: str, location: str) -> None:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel("gemini-pro-vision")
model = GenerativeModel("gemini-1.0-pro-vision")

# Load example image from local storage
encoded_image = base64.b64encode(open("scones.jpg", "rb").read()).decode("utf-8")
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_safety_config_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@
import vertexai

# [START aiplatform_gemini_safety_settings]
from vertexai.preview import generative_models
from vertexai import generative_models


def generate_text(project_id: str, location: str, image: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = generative_models.GenerativeModel("gemini-pro-vision")
model = generative_models.GenerativeModel("gemini-1.0-pro-vision")

# Generation config
config = {"max_output_tokens": 2048, "temperature": 0.4, "top_p": 1, "top_k": 32}
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_single_turn_video_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,14 @@
# [START aiplatform_gemini_single_turn_video]
import vertexai

from vertexai.preview.generative_models import GenerativeModel, Part
from vertexai.generative_models import GenerativeModel, Part


def generate_text(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)
# Load the model
vision_model = GenerativeModel("gemini-pro-vision")
vision_model = GenerativeModel("gemini-1.0-pro-vision")
# Generate text
response = vision_model.generate_content(
[
Expand Down
2 changes: 1 addition & 1 deletion generative_ai/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
pandas==1.3.5; python_version == '3.7'
pandas==2.0.1; python_version > '3.7'
google-cloud-aiplatform[pipelines]==1.38.0
google-cloud-aiplatform[pipelines]==1.42.0
google-auth==2.17.3
11 changes: 6 additions & 5 deletions generative_ai/test_gemini_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,24 +107,25 @@ def load_image_from_url(image_url: str) -> str:
text = gemini_safety_config_example.generate_text(PROJECT_ID, LOCATION, image)
text = text.lower()
assert len(text) > 0
assert "scones" in text
assert any(
[_ in text for _ in ("scone", "blueberry", "coffee,", "flower", "table")]
)


def test_gemini_single_turn_video_example() -> None:
text = gemini_single_turn_video_example.generate_text(PROJECT_ID, LOCATION)
text = text.lower()
assert len(text) > 0
assert "zoo" in text
assert "tiger" in text
assert any([_ in text for _ in ("zoo", "tiger", "leaf", "water")])


def test_gemini_chat_example() -> None:
text = gemini_chat_example.chat_text_example(PROJECT_ID, LOCATION)
text = text.lower()
assert len(text) > 0
assert ("hi" in text) or ("hello" in text)
assert any([_ in text for _ in ("hi", "hello", "greeting")])

text = gemini_chat_example.chat_stream_example(PROJECT_ID, LOCATION)
text = text.lower()
assert len(text) > 0
assert ("hi" in text) or ("hello" in text)
assert any([_ in text for _ in ("hi", "hello", "greeting")])

0 comments on commit 1985da6

Please sign in to comment.