Skip to content

Commit

Permalink
feat: gemini code samples updates (#11579)
Browse files Browse the repository at this point in the history
* chore: minor code readability improvements

* feat: update region tags and imports

* move region tags & imports to inside a function

* feat: Add readme notes for Generative AI sample developers

* chore: simplify README.md

* 🦉 Updates from OwlBot post-processor

See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md

---------

Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
  • Loading branch information
msampathkumar and gcf-owl-bot[bot] authored Apr 23, 2024
1 parent 768f82b commit dfafd5f
Show file tree
Hide file tree
Showing 18 changed files with 134 additions and 90 deletions.
49 changes: 49 additions & 0 deletions generative_ai/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
# Generative AI on Google Cloud

Product Page: https://cloud.google.com/ai/generative-ai?hl=en
Code samples: https://cloud.google.com/docs/samples?text=Generative%20AI

## Developer Notes

In developing code samples for Generative AI products, a scripting style format is recommended. It's important to wrap the code samples and region tags to be with in a function definition.

This change is motivated by the desire to provide a code format that can effortlessly integrate with popular data science community tools. These tools include Colab, Jupyter Notebooks, and the IPython shell.

Example:

```python
def create_hello_world_file(filename):
# <region tag: starts here>
import os

# TODO(developer): Update and uncomment below code
# filename = `/tmp/test.txt`

if os.path.isfile(filename):
print(f'Overriding content in file(name: {filename})!')

with open(filename) as fp:
fp.write('Hello world!')
# <region tag: ends here>
```

In Google Cloud documentation page, code sample is shown as below

```python
import os

# TODO(developer): Update and uncomment below code
# filename = `/tmp/test.txt`

if os.path.isfile(filename):
print(f'Overriding content in file(name: {filename})!')

with open(filename) as fp:
fp.write('Hello world!')
```






2 changes: 1 addition & 1 deletion generative_ai/function_calling.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def generate_function_call(prompt: str, project_id: str, location: str) -> tuple
vertexai.init(project=project_id, location=location)

# Initialize Gemini model
model = GenerativeModel("gemini-1.0-pro-001")
model = GenerativeModel(model_name="gemini-1.0-pro-001")

# Specify a function declaration and parameters for an API request
get_current_weather_func = FunctionDeclaration(
Expand Down
2 changes: 1 addition & 1 deletion generative_ai/function_calling_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def generate_function_call_chat(project_id: str, location: str) -> tuple:

# Initialize Gemini model
model = GenerativeModel(
"gemini-1.0-pro-001",
model_name="gemini-1.0-pro-001",
generation_config=GenerationConfig(temperature=0),
tools=[retail_tool],
)
Expand Down
2 changes: 1 addition & 1 deletion generative_ai/gemini_all_modalities.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def analyze_all_modalities(project_id: str) -> str:

vertexai.init(project=project_id, location="us-central1")

model = GenerativeModel("gemini-1.5-pro-preview-0409")
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")

video_file_uri = (
"gs://cloud-samples-data/generative-ai/video/behind_the_scenes_pixel.mp4"
Expand Down
4 changes: 2 additions & 2 deletions generative_ai/gemini_audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def summarize_audio(project_id: str) -> str:

vertexai.init(project=project_id, location="us-central1")

model = GenerativeModel("gemini-1.5-pro-preview-0409")
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")

prompt = """
Please provide a summary for the audio.
Expand Down Expand Up @@ -55,7 +55,7 @@ def transcript_audio(project_id: str) -> str:

vertexai.init(project=project_id, location="us-central1")

model = GenerativeModel("gemini-1.5-pro-preview-0409")
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")

prompt = """
Can you transcribe this interview, in the format of timecode, speaker, caption.
Expand Down
6 changes: 4 additions & 2 deletions generative_ai/gemini_chat_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@
def chat_text_example(project_id: str, location: str) -> str:
# [START generativeaionvertexai_gemini_multiturn_chat]
import vertexai

from vertexai.generative_models import GenerativeModel, ChatSession

# TODO(developer): Update and un-comment below lines
# project_id = "PROJECT_ID"
# location = "us-central1"
vertexai.init(project=project_id, location=location)

model = GenerativeModel("gemini-1.0-pro")
model = GenerativeModel(model_name="gemini-1.0-pro-002")
chat = model.start_chat()

def get_chat_response(chat: ChatSession, prompt: str) -> str:
Expand All @@ -45,13 +46,14 @@ def get_chat_response(chat: ChatSession, prompt: str) -> str:
def chat_stream_example(project_id: str, location: str) -> str:
# [START generativeaionvertexai_gemini_multiturn_chat_stream]
import vertexai

from vertexai.generative_models import GenerativeModel, ChatSession

# TODO(developer): Update and un-comment below lines
# project_id = "PROJECT_ID"
# location = "us-central1"
vertexai.init(project=project_id, location=location)
model = GenerativeModel("gemini-1.0-pro")
model = GenerativeModel(model_name="gemini-1.0-pro-002")
chat = model.start_chat()

def get_chat_response(chat: ChatSession, prompt: str) -> str:
Expand Down
14 changes: 6 additions & 8 deletions generative_ai/gemini_count_token_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,17 @@
# limitations under the License.


# [START generativeaionvertexai_gemini_token_count]
import vertexai
from vertexai.generative_models import GenerativeModel
def generate_text(project_id: str, location: str) -> str:
# [START generativeaionvertexai_gemini_token_count]
import vertexai

from vertexai.generative_models import GenerativeModel

def generate_text(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel("gemini-1.0-pro")
model = GenerativeModel(model_name="gemini-1.0-pro-002")

# prompt tokens count
print(model.count_tokens("why is sky blue?"))
Expand All @@ -33,7 +33,5 @@ def generate_text(project_id: str, location: str) -> str:

# response tokens count
print(response._raw_response.usage_metadata)
# [END generativeaionvertexai_gemini_token_count]
return response.text


# [END generativeaionvertexai_gemini_token_count]
31 changes: 14 additions & 17 deletions generative_ai/gemini_grounding_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,29 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# [START generativeaionvertexai_gemini_grounding_with_web]
# [START generativeaionvertexai_gemini_grounding_with_vais]

import vertexai
from vertexai.preview.generative_models import (
GenerationConfig,
GenerationResponse,
GenerativeModel,
grounding,
Tool,
)

# [END generativeaionvertexai_gemini_grounding_with_vais]
from vertexai.preview.generative_models import GenerationResponse


def generate_text_with_grounding_web(
project_id: str, location: str
) -> GenerationResponse:
# [START generativeaionvertexai_gemini_grounding_with_web]
import vertexai
from vertexai.preview.generative_models import grounding
from vertexai.generative_models import GenerationConfig, GenerativeModel, Tool

# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel(model_name="gemini-1.0-pro")
model = GenerativeModel(model_name="gemini-1.0-pro-002")

# Use Google Search for grounding
tool = Tool.from_google_search_retrieval(grounding.GoogleSearchRetrieval())
Expand All @@ -54,17 +48,20 @@ def generate_text_with_grounding_web(
return response


# [START generativeaionvertexai_gemini_grounding_with_vais]


def generate_text_with_grounding_vertex_ai_search(
project_id: str, location: str, data_store_path: str
) -> GenerationResponse:
# [START generativeaionvertexai_gemini_grounding_with_vais]
import vertexai

from vertexai.preview.generative_models import grounding
from vertexai.generative_models import GenerationConfig, GenerativeModel, Tool

# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel(model_name="gemini-1.0-pro")
model = GenerativeModel(model_name="gemini-1.0-pro-002")

# Use Vertex AI Search data store
# Format: projects/{project_id}/locations/{location}/collections/default_collection/dataStores/{data_store_id}
Expand Down
16 changes: 8 additions & 8 deletions generative_ai/gemini_guide_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# [START generativeaionvertexai_gemini_get_started]
# TODO(developer): Vertex AI SDK - uncomment below & run
# pip3 install --upgrade --user google-cloud-aiplatform
# gcloud auth application-default login

import vertexai
from vertexai.generative_models import GenerativeModel, Part
def generate_text(project_id: str, location: str) -> str:
# [START generativeaionvertexai_gemini_get_started]
# TODO(developer): Vertex AI SDK - uncomment below & run
# pip3 install --upgrade --user google-cloud-aiplatform
# gcloud auth application-default login

import vertexai
from vertexai.generative_models import GenerativeModel, Part

def generate_text(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)
# Load the model
multimodal_model = GenerativeModel("gemini-1.0-pro-vision")
multimodal_model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")
# Query the model
response = multimodal_model.generate_content(
[
Expand Down
12 changes: 6 additions & 6 deletions generative_ai/gemini_multi_image_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import vertexai


def generate_text_multimodal(project_id: str, location: str) -> str:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# [START generativeaionvertexai_gemini_single_turn_multi_image]
import http.client
import typing
import urllib.request
import vertexai

from vertexai.generative_models import GenerativeModel, Image

# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# create helper function
def load_image_from_url(image_url: str) -> Image:
with urllib.request.urlopen(image_url) as response:
Expand All @@ -44,7 +44,7 @@ def load_image_from_url(image_url: str) -> Image:
)

# Pass multimodal prompt
model = GenerativeModel("gemini-1.0-pro-vision")
model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")
response = model.generate_content(
[
landmark1,
Expand Down
6 changes: 3 additions & 3 deletions generative_ai/gemini_pdf_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,16 +15,16 @@

def analyze_pdf(project_id: str) -> str:
# [START generativeaionvertexai_gemini_pdf]

import vertexai

from vertexai.generative_models import GenerativeModel, Part

# TODO(developer): Update and un-comment below lines
# project_id = "PROJECT_ID"

vertexai.init(project=project_id, location="us-central1")

model = GenerativeModel("gemini-1.5-pro-preview-0409")
model = GenerativeModel(model_name="gemini-1.5-pro-preview-0409")

prompt = """
Your are a very professional document summarization specialist.
Expand All @@ -37,6 +37,6 @@ def analyze_pdf(project_id: str) -> str:

response = model.generate_content(contents)
print(response.text)

# [END generativeaionvertexai_gemini_pdf]

return response.text
15 changes: 7 additions & 8 deletions generative_ai/gemini_pro_basic_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# [START generativeaionvertexai_gemini_pro_example]
import vertexai
from vertexai.generative_models import GenerativeModel, Part


def generate_text(project_id: str, location: str) -> None:
# [START generativeaionvertexai_gemini_pro_example]
import vertexai

from vertexai.generative_models import GenerativeModel, Part

# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel(model_name="gemini-pro-vision")
model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")

# Load example image
image_url = "gs://generativeai-downloads/images/scones.jpg"
Expand All @@ -32,7 +33,5 @@ def generate_text(project_id: str, location: str) -> None:
response = model.generate_content([image_content, "what is this image?"])
print(response)

# [END generativeaionvertexai_gemini_pro_example]
return response.text


# [END generativeaionvertexai_gemini_pro_example]
17 changes: 8 additions & 9 deletions generative_ai/gemini_pro_config_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# [START generativeaionvertexai_gemini_pro_config_example]
import base64

import vertexai
from vertexai.generative_models import GenerationConfig, GenerativeModel, Part
def generate_text(project_id: str, location: str) -> None:
# [START generativeaionvertexai_gemini_pro_config_example]
import base64
import vertexai

from vertexai.generative_models import GenerationConfig, GenerativeModel, Part

def generate_text(project_id: str, location: str) -> None:
# Initialize Vertex AI
vertexai.init(project=project_id, location=location)

# Load the model
model = GenerativeModel("gemini-1.0-pro-vision")
model = GenerativeModel(model_name="gemini-1.0-pro-vision-001")

# Load example image from local storage
encoded_image = base64.b64encode(open("scones.jpg", "rb").read()).decode("utf-8")
Expand All @@ -42,7 +42,6 @@ def generate_text(project_id: str, location: str) -> None:
[image_content, "what is this image?"], generation_config=config
)
print(response.text)
return response.text
# [END generativeaionvertexai_gemini_pro_config_example]


# [END generativeaionvertexai_gemini_pro_config_example]
return response.text
Loading

0 comments on commit dfafd5f

Please sign in to comment.