Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,20 @@ To enable this feature, use `-o code_execution 1`:
llm -m gemini-1.5-pro-latest -o code_execution 1 \
'use python to calculate (factorial of 13) * 3'
```
### Google search

Some Gemini models support [Grounding with Google Search](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#web-ground-gemini), where the model can run a Google search and use the results as part of answering a prompt.

Using this feature may incur additional requirements in terms of how you use the results. Consult [Google's documentation](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#web-ground-gemini) for more details.

To run a prompt with Google search enabled, use `-o google_search 1`:

```bash
llm -m gemini-1.5-pro-latest -o google_search 1 \
'What happened in Ireland today?'
```

Use `llm logs -c --json` after running a prompt to see the full JSON response, which includes [additional information](https://github.com/simonw/llm-gemini/pull/29#issuecomment-2606201877) about grounded results.

### Chat

Expand Down
30 changes: 28 additions & 2 deletions llm_gemini.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,17 @@
},
]

# https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/ground-gemini#supported_models_2
GOOGLE_SEARCH_MODELS = {
"gemini-1.5-pro-latest",
"gemini-1.5-flash-latest",
"gemini-1.5-pro-001",
"gemini-1.5-flash-001",
"gemini-1.5-pro-002",
"gemini-1.5-flash-002",
"gemini-2.0-flash-exp",
}


@llm.hookimpl
def register_models(register):
Expand All @@ -42,7 +53,11 @@ def register_models(register):
"gemini-exp-1206",
"gemini-2.0-flash-exp",
]:
register(GeminiPro(model_id), AsyncGeminiPro(model_id))
can_google_search = model_id in GOOGLE_SEARCH_MODELS
register(
GeminiPro(model_id, can_google_search=can_google_search),
AsyncGeminiPro(model_id, can_google_search=can_google_search),
)


def resolve_type(attachment):
Expand Down Expand Up @@ -133,8 +148,17 @@ class Options(llm.Options):
default=None,
)

def __init__(self, model_id):
class OptionsWithGoogleSearch(Options):
google_search: Optional[bool] = Field(
description="Enables the model to use Google Search to improve the accuracy and recency of responses from the model",
default=None,
)

def __init__(self, model_id, can_google_search=False):
self.model_id = model_id
self.can_google_search = can_google_search
if can_google_search:
self.Options = self.OptionsWithGoogleSearch

def build_messages(self, prompt, conversation):
messages = []
Expand Down Expand Up @@ -180,6 +204,8 @@ def build_request_body(self, prompt, conversation):
}
if prompt.options and prompt.options.code_execution:
body["tools"] = [{"codeExecution": {}}]
if prompt.options and self.can_google_search and prompt.options.google_search:
body["tools"] = [{"google_search_retrieval": {}}]
if prompt.system:
body["systemInstruction"] = {"parts": [{"text": prompt.system}]}

Expand Down