Skip to content

Commit f659fa3

Browse files
ricmestresimonw
andauthored
Add support for grounding using google_search (#29)
Co-authored-by: Simon Willison <swillison@gmail.com>
1 parent 19783a4 commit f659fa3

File tree

2 files changed

+42
-2
lines changed

2 files changed

+42
-2
lines changed

README.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,20 @@ To enable this feature, use `-o code_execution 1`:
9696
llm -m gemini-1.5-pro-latest -o code_execution 1 \
9797
'use python to calculate (factorial of 13) * 3'
9898
```
99+
### Google search
100+
101+
Some Gemini models support [Grounding with Google Search](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#web-ground-gemini), where the model can run a Google search and use the results as part of answering a prompt.
102+
103+
Using this feature may incur additional requirements in terms of how you use the results. Consult [Google's documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#web-ground-gemini) for more details.
104+
105+
To run a prompt with Google search enabled, use `-o google_search 1`:
106+
107+
```bash
108+
llm -m gemini-1.5-pro-latest -o google_search 1 \
109+
'What happened in Ireland today?'
110+
```
111+
112+
Use `llm logs -c --json` after running a prompt to see the full JSON response, which includes [additional information](https://github.com/simonw/llm-gemini/pull/29#issuecomment-2606201877) about grounded results.
99113

100114
### Chat
101115

llm_gemini.py

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,17 @@
2323
},
2424
]
2525

26+
# https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#supported_models_2
27+
GOOGLE_SEARCH_MODELS = {
28+
"gemini-1.5-pro-latest",
29+
"gemini-1.5-flash-latest",
30+
"gemini-1.5-pro-001",
31+
"gemini-1.5-flash-001",
32+
"gemini-1.5-pro-002",
33+
"gemini-1.5-flash-002",
34+
"gemini-2.0-flash-exp",
35+
}
36+
2637

2738
@llm.hookimpl
2839
def register_models(register):
@@ -45,7 +56,11 @@ def register_models(register):
4556
"gemini-2.0-flash-thinking-exp-1219",
4657
"gemini-2.0-flash-thinking-exp-01-21",
4758
]:
48-
register(GeminiPro(model_id), AsyncGeminiPro(model_id))
59+
can_google_search = model_id in GOOGLE_SEARCH_MODELS
60+
register(
61+
GeminiPro(model_id, can_google_search=can_google_search),
62+
AsyncGeminiPro(model_id, can_google_search=can_google_search),
63+
)
4964

5065

5166
def resolve_type(attachment):
@@ -139,8 +154,17 @@ class Options(llm.Options):
139154
default=None,
140155
)
141156

142-
def __init__(self, model_id):
157+
class OptionsWithGoogleSearch(Options):
158+
google_search: Optional[bool] = Field(
159+
description="Enables the model to use Google Search to improve the accuracy and recency of responses from the model",
160+
default=None,
161+
)
162+
163+
def __init__(self, model_id, can_google_search=False):
143164
self.model_id = model_id
165+
self.can_google_search = can_google_search
166+
if can_google_search:
167+
self.Options = self.OptionsWithGoogleSearch
144168

145169
def build_messages(self, prompt, conversation):
146170
messages = []
@@ -188,6 +212,8 @@ def build_request_body(self, prompt, conversation):
188212
}
189213
if prompt.options and prompt.options.code_execution:
190214
body["tools"] = [{"codeExecution": {}}]
215+
if prompt.options and self.can_google_search and prompt.options.google_search:
216+
body["tools"] = [{"google_search_retrieval": {}}]
191217
if prompt.system:
192218
body["systemInstruction"] = {"parts": [{"text": prompt.system}]}
193219

0 commit comments

Comments
 (0)