Skip to content

Commit 17372e3

Browse files
Add 'cached_content' to GenerativeModel's repr
Change-Id: I06676fad23895e3e1a6393baa938fc1f2df57d80
1 parent d1fd749 commit 17372e3

File tree

2 files changed

+12
-0
lines changed

2 files changed

+12
-0
lines changed

google/generativeai/generative_models.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,7 @@ def maybe_text(content):
123123
safety_settings={self._safety_settings},
124124
tools={self._tools},
125125
system_instruction={maybe_text(self._system_instruction)},
126+
cached_content={getattr(self, "cached_content", None)}
126127
)"""
127128
)
128129

tests/test_generative_models.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1213,6 +1213,7 @@ def test_repr_for_multi_turn_chat(self):
12131213
safety_settings={},
12141214
tools=None,
12151215
system_instruction=None,
1216+
cached_content=None
12161217
),
12171218
history=[glm.Content({'parts': [{'text': 'I really like fantasy books.'}], 'role': 'user'}), glm.Content({'parts': [{'text': 'first'}], 'role': 'model'}), glm.Content({'parts': [{'text': 'I also like this image.'}, {'inline_data': {'data': 'iVBORw0KGgoA...AAElFTkSuQmCC', 'mime_type': 'image/png'}}], 'role': 'user'}), glm.Content({'parts': [{'text': 'second'}], 'role': 'model'}), glm.Content({'parts': [{'text': 'What things do I like?.'}], 'role': 'user'}), glm.Content({'parts': [{'text': 'third'}], 'role': 'model'})]
12181219
)"""
@@ -1241,6 +1242,7 @@ def test_repr_for_incomplete_streaming_chat(self):
12411242
safety_settings={},
12421243
tools=None,
12431244
system_instruction=None,
1245+
cached_content=None
12441246
),
12451247
history=[glm.Content({'parts': [{'text': 'I really like fantasy books.'}], 'role': 'user'}), <STREAMING IN PROGRESS>]
12461248
)"""
@@ -1285,6 +1287,7 @@ def test_repr_for_broken_streaming_chat(self):
12851287
safety_settings={},
12861288
tools=None,
12871289
system_instruction=None,
1290+
cached_content=None
12881291
),
12891292
history=[glm.Content({'parts': [{'text': 'I really like fantasy books.'}], 'role': 'user'}), <STREAMING ERROR>]
12901293
)"""
@@ -1296,6 +1299,14 @@ def test_repr_for_system_instruction(self):
12961299
result = repr(model)
12971300
self.assertIn("system_instruction='Be excellent.'", result)
12981301

1302+
def test_repr_for_model_created_from_cahced_content(self):
1303+
model = generative_models.GenerativeModel.from_cached_content(
1304+
cached_content="test-cached-content"
1305+
)
1306+
result = repr(model)
1307+
self.assertIn("cached_content=cachedContent/test-cached-content", result)
1308+
self.assertIn("model_name='models/gemini-1.0-pro-001'", result)
1309+
12991310
def test_count_tokens_called_with_request_options(self):
13001311
self.responses["count_tokens"].append(glm.CountTokensResponse())
13011312
request_options = {"timeout": 120}

0 commit comments

Comments
 (0)