Skip to content

Commit f08c789

Browse files
MarkDaoustmarkmcd
andauthored
Add genai.protos (google-gemini#354)
* Add genai.protos Change-Id: I21cfada033c6ffbed7a20e117e61582fde925f61 * Add genai.protos Change-Id: I9c8473d4ca1a0e92489f145a18ef1abd29af22b3 * test_protos.py Change-Id: I576080fb80cf9dc9345d8bb2178eb4b9ac59ce97 * fix docs + format Change-Id: I5f9aa3f8e3ae780e5cec2078d3eb153157b195fe * fix merge Change-Id: I17014791d966d797b481bca17df69558b23a9a1a * format Change-Id: I51d30f6568640456bcf28db2bd338a58a82346de * Fix client references Change-Id: I4899231706c9624a0f189b22b6f70aeeb4cbea29 * Fix tests Change-Id: I8a636fb634fd079a892cb99170a12c0613887ccf * add import Change-Id: I517171389801ef249cd478f98798181da83bef69 * fix import Change-Id: I8921c0caaa9b902ebde682ead31a2444298c2c9c * Update docstring Change-Id: I1f6b3b9b9521baa8812a908431bf58c623860733 * spelling Change-Id: I0421a35687ed14b1a5ca3b496cafd91514c4de92 * remove unused imports Change-Id: Ifc791796e36668eb473fd0fffea4833b1a062188 * Resolve review coments. Change-Id: Ieb900190f42e883337028ae25da3be819507db4a * Update docstring. Change-Id: I805473f9aaeb04e922a9f66bb5f40716d42fb738 * Fix typo --------- Co-authored-by: Mark McDonald <macd@google.com>
1 parent 2e62fae commit f08c789

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+1499
-1424
lines changed

docs/build_docs.py

Lines changed: 5 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -44,77 +44,13 @@
4444
# For showing the conditional imports and types in `content_types.py`
4545
# grpc must be imported first.
4646
typing.TYPE_CHECKING = True
47-
from google import generativeai as palm
48-
47+
from google import generativeai as genai
4948

5049
from tensorflow_docs.api_generator import generate_lib
5150
from tensorflow_docs.api_generator import public_api
5251

5352
import yaml
5453

55-
glm.__doc__ = """\
56-
This package, `google.ai.generativelanguage`, is a low-level auto-generated client library for the PaLM API.
57-
58-
```posix-terminal
59-
pip install google.ai.generativelanguage
60-
```
61-
62-
It is built using the same tooling as Google Cloud client libraries, and will be quite familiar if you've used
63-
those before.
64-
65-
While we encourage Python users to access the PaLM API using the `google.generativeai` package (aka `palm`),
66-
this lower level package is also available.
67-
68-
Each method in the PaLM API is connected to one of the client classes. Pass your API-key to the class' `client_options`
69-
when initializing a client:
70-
71-
```
72-
from google.ai import generativelanguage as glm
73-
74-
client = glm.DiscussServiceClient(
75-
client_options={'api_key':'YOUR_API_KEY'})
76-
```
77-
78-
To call the api, pass an appropriate request-proto-object. For the `DiscussServiceClient.generate_message` pass
79-
a `generativelanguage.GenerateMessageRequest` instance:
80-
81-
```
82-
request = glm.GenerateMessageRequest(
83-
model='models/chat-bison-001',
84-
prompt=glm.MessagePrompt(
85-
messages=[glm.Message(content='Hello!')]))
86-
87-
client.generate_message(request)
88-
```
89-
```
90-
candidates {
91-
author: "1"
92-
content: "Hello! How can I help you today?"
93-
}
94-
...
95-
```
96-
97-
For simplicity:
98-
99-
* The API methods also accept key-word arguments.
100-
* Anywhere you might pass a proto-object, the library will also accept simple python structures.
101-
102-
So the following is equivalent to the previous example:
103-
104-
```
105-
client.generate_message(
106-
model='models/chat-bison-001',
107-
prompt={'messages':[{'content':'Hello!'}]})
108-
```
109-
```
110-
candidates {
111-
author: "1"
112-
content: "Hello! How can I help you today?"
113-
}
114-
...
115-
```
116-
"""
117-
11854
HERE = pathlib.Path(__file__).parent
11955

12056
PROJECT_SHORT_NAME = "genai"
@@ -139,43 +75,6 @@
13975
)
14076

14177

142-
class MyFilter:
143-
def __init__(self, base_dirs):
144-
self.filter_base_dirs = public_api.FilterBaseDirs(base_dirs)
145-
146-
def drop_staticmethods(self, parent, children):
147-
parent = dict(parent.__dict__)
148-
for name, value in children:
149-
if not isinstance(parent.get(name, None), staticmethod):
150-
yield name, value
151-
152-
def __call__(self, path, parent, children):
153-
if any("generativelanguage" in part for part in path) or "generativeai" in path:
154-
children = self.filter_base_dirs(path, parent, children)
155-
children = public_api.explicit_package_contents_filter(path, parent, children)
156-
157-
if any("generativelanguage" in part for part in path):
158-
if "ServiceClient" in path[-1] or "ServiceAsyncClient" in path[-1]:
159-
children = list(self.drop_staticmethods(parent, children))
160-
161-
return children
162-
163-
164-
class MyDocGenerator(generate_lib.DocGenerator):
165-
def make_default_filters(self):
166-
return [
167-
# filter the api.
168-
public_api.FailIfNestedTooDeep(10),
169-
public_api.filter_module_all,
170-
public_api.add_proto_fields,
171-
public_api.filter_private_symbols,
172-
MyFilter(self._base_dir), # Replaces: public_api.FilterBaseDirs(self._base_dir),
173-
public_api.FilterPrivateMap(self._private_map),
174-
public_api.filter_doc_controls_skip,
175-
public_api.ignore_typing,
176-
]
177-
178-
17978
def gen_api_docs():
18079
"""Generates api docs for the generative-ai package."""
18180
for name in dir(google):
@@ -188,11 +87,11 @@ def gen_api_docs():
18887
"""
18988
)
19089

191-
doc_generator = MyDocGenerator(
90+
doc_generator = generate_lib.DocGenerator(
19291
root_title=PROJECT_FULL_NAME,
193-
py_modules=[("google", google)],
92+
py_modules=[("google.generativeai", genai)],
19493
base_dir=(
195-
pathlib.Path(palm.__file__).parent,
94+
pathlib.Path(genai.__file__).parent,
19695
pathlib.Path(glm.__file__).parent.parent,
19796
),
19897
code_url_prefix=(
@@ -201,32 +100,12 @@ def gen_api_docs():
201100
),
202101
search_hints=_SEARCH_HINTS.value,
203102
site_path=_SITE_PATH.value,
204-
callbacks=[],
103+
callbacks=[public_api.explicit_package_contents_filter],
205104
)
206105

207106
out_path = pathlib.Path(_OUTPUT_DIR.value)
208107
doc_generator.build(out_path)
209108

210-
# Fixup the toc file.
211-
toc_path = out_path / "google/_toc.yaml"
212-
toc = yaml.safe_load(toc_path.read_text())
213-
assert toc["toc"][0]["title"] == "google"
214-
toc["toc"] = toc["toc"][1:]
215-
toc["toc"][0]["title"] = "google.ai.generativelanguage"
216-
toc["toc"][0]["section"] = toc["toc"][0]["section"][1]["section"]
217-
toc["toc"][0], toc["toc"][1] = toc["toc"][1], toc["toc"][0]
218-
toc_path.write_text(yaml.dump(toc))
219-
220-
# remove some dummy files and redirect them to `api/`
221-
(out_path / "google.md").unlink()
222-
(out_path / "google/ai.md").unlink()
223-
redirects_path = out_path / "_redirects.yaml"
224-
redirects = {"redirects": []}
225-
redirects["redirects"].insert(0, {"from": "/api/python/google/ai", "to": "/api/"})
226-
redirects["redirects"].insert(0, {"from": "/api/python/google", "to": "/api/"})
227-
redirects["redirects"].insert(0, {"from": "/api/python", "to": "/api/"})
228-
redirects_path.write_text(yaml.dump(redirects))
229-
230109
# clear `oneof` junk from proto pages
231110
for fpath in out_path.rglob("*.md"):
232111
old_content = fpath.read_text()

google/generativeai/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@
4242

4343
from google.generativeai import version
4444

45+
from google.generativeai import protos
4546
from google.generativeai import types
4647
from google.generativeai.types import GenerationConfig
4748

google/generativeai/answer.py

Lines changed: 31 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from typing_extensions import TypedDict
2222

2323
import google.ai.generativelanguage as glm
24+
from google.generativeai import protos
2425

2526
from google.generativeai.client import (
2627
get_default_generative_client,
@@ -35,7 +36,7 @@
3536

3637
DEFAULT_ANSWER_MODEL = "models/aqa"
3738

38-
AnswerStyle = glm.GenerateAnswerRequest.AnswerStyle
39+
AnswerStyle = protos.GenerateAnswerRequest.AnswerStyle
3940

4041
AnswerStyleOptions = Union[int, str, AnswerStyle]
4142

@@ -66,28 +67,30 @@ def to_answer_style(x: AnswerStyleOptions) -> AnswerStyle:
6667

6768

6869
GroundingPassageOptions = (
69-
Union[glm.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType],
70+
Union[
71+
protos.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType
72+
],
7073
)
7174

7275
GroundingPassagesOptions = Union[
73-
glm.GroundingPassages,
76+
protos.GroundingPassages,
7477
Iterable[GroundingPassageOptions],
7578
Mapping[str, content_types.ContentType],
7679
]
7780

7881

79-
def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingPassages:
82+
def _make_grounding_passages(source: GroundingPassagesOptions) -> protos.GroundingPassages:
8083
"""
81-
Converts the `source` into a `glm.GroundingPassage`. A `GroundingPassages` contains a list of
82-
`glm.GroundingPassage` objects, which each contain a `glm.Contant` and a string `id`.
84+
Converts the `source` into a `protos.GroundingPassage`. A `GroundingPassages` contains a list of
85+
`protos.GroundingPassage` objects, which each contain a `protos.Contant` and a string `id`.
8386
8487
Args:
85-
source: `Content` or a `GroundingPassagesOptions` that will be converted to glm.GroundingPassages.
88+
source: `Content` or a `GroundingPassagesOptions` that will be converted to protos.GroundingPassages.
8689
8790
Return:
88-
`glm.GroundingPassages` to be passed into `glm.GenerateAnswer`.
91+
`protos.GroundingPassages` to be passed into `protos.GenerateAnswer`.
8992
"""
90-
if isinstance(source, glm.GroundingPassages):
93+
if isinstance(source, protos.GroundingPassages):
9194
return source
9295

9396
if not isinstance(source, Iterable):
@@ -100,19 +103,19 @@ def _make_grounding_passages(source: GroundingPassagesOptions) -> glm.GroundingP
100103
source = source.items()
101104

102105
for n, data in enumerate(source):
103-
if isinstance(data, glm.GroundingPassage):
106+
if isinstance(data, protos.GroundingPassage):
104107
passages.append(data)
105108
elif isinstance(data, tuple):
106109
id, content = data # tuple must have exactly 2 items.
107110
passages.append({"id": id, "content": content_types.to_content(content)})
108111
else:
109112
passages.append({"id": str(n), "content": content_types.to_content(data)})
110113

111-
return glm.GroundingPassages(passages=passages)
114+
return protos.GroundingPassages(passages=passages)
112115

113116

114117
SourceNameType = Union[
115-
str, retriever_types.Corpus, glm.Corpus, retriever_types.Document, glm.Document
118+
str, retriever_types.Corpus, protos.Corpus, retriever_types.Document, protos.Document
116119
]
117120

118121

@@ -127,15 +130,15 @@ class SemanticRetrieverConfigDict(TypedDict):
127130
SemanticRetrieverConfigOptions = Union[
128131
SourceNameType,
129132
SemanticRetrieverConfigDict,
130-
glm.SemanticRetrieverConfig,
133+
protos.SemanticRetrieverConfig,
131134
]
132135

133136

134137
def _maybe_get_source_name(source) -> str | None:
135138
if isinstance(source, str):
136139
return source
137140
elif isinstance(
138-
source, (retriever_types.Corpus, glm.Corpus, retriever_types.Document, glm.Document)
141+
source, (retriever_types.Corpus, protos.Corpus, retriever_types.Document, protos.Document)
139142
):
140143
return source.name
141144
else:
@@ -145,8 +148,8 @@ def _maybe_get_source_name(source) -> str | None:
145148
def _make_semantic_retriever_config(
146149
source: SemanticRetrieverConfigOptions,
147150
query: content_types.ContentsType,
148-
) -> glm.SemanticRetrieverConfig:
149-
if isinstance(source, glm.SemanticRetrieverConfig):
151+
) -> protos.SemanticRetrieverConfig:
152+
if isinstance(source, protos.SemanticRetrieverConfig):
150153
return source
151154

152155
name = _maybe_get_source_name(source)
@@ -156,7 +159,7 @@ def _make_semantic_retriever_config(
156159
source["source"] = _maybe_get_source_name(source["source"])
157160
else:
158161
raise TypeError(
159-
f"Invalid input: Failed to create a 'glm.SemanticRetrieverConfig' from the provided source. "
162+
f"Invalid input: Failed to create a 'protos.SemanticRetrieverConfig' from the provided source. "
160163
f"Received type: {type(source).__name__}, "
161164
f"Received value: {source}"
162165
)
@@ -166,7 +169,7 @@ def _make_semantic_retriever_config(
166169
elif isinstance(source["query"], str):
167170
source["query"] = content_types.to_content(source["query"])
168171

169-
return glm.SemanticRetrieverConfig(source)
172+
return protos.SemanticRetrieverConfig(source)
170173

171174

172175
def _make_generate_answer_request(
@@ -178,26 +181,26 @@ def _make_generate_answer_request(
178181
answer_style: AnswerStyle | None = None,
179182
safety_settings: safety_types.SafetySettingOptions | None = None,
180183
temperature: float | None = None,
181-
) -> glm.GenerateAnswerRequest:
184+
) -> protos.GenerateAnswerRequest:
182185
"""
183-
constructs a glm.GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
186+
constructs a protos.GenerateAnswerRequest object by organizing the input parameters for the API call to generate a grounded answer from the model.
184187
185188
Args:
186189
model: Name of the model used to generate the grounded response.
187190
contents: Content of the current conversation with the model. For single-turn query, this is a
188191
single question to answer. For multi-turn queries, this is a repeated field that contains
189192
conversation history and the last `Content` in the list containing the question.
190193
inline_passages: Grounding passages (a list of `Content`-like objects or `(id, content)` pairs,
191-
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
194+
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
192195
one must be set, but not both.
193-
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
196+
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
194197
`inline_passages`, one must be set, but not both.
195198
answer_style: Style for grounded answers.
196199
safety_settings: Safety settings for generated output.
197200
temperature: The temperature for randomness in the output.
198201
199202
Returns:
200-
Call for glm.GenerateAnswerRequest().
203+
Call for protos.GenerateAnswerRequest().
201204
"""
202205
model = model_types.make_model_name(model)
203206

@@ -224,7 +227,7 @@ def _make_generate_answer_request(
224227
if answer_style:
225228
answer_style = to_answer_style(answer_style)
226229

227-
return glm.GenerateAnswerRequest(
230+
return protos.GenerateAnswerRequest(
228231
model=model,
229232
contents=contents,
230233
inline_passages=inline_passages,
@@ -273,9 +276,9 @@ def generate_answer(
273276
contents: The question to be answered by the model, grounded in the
274277
provided source.
275278
inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
276-
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
279+
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
277280
one must be set, but not both.
278-
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
281+
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
279282
`inline_passages`, one must be set, but not both.
280283
answer_style: Style in which the grounded answer should be returned.
281284
safety_settings: Safety settings for generated output. Defaults to None.
@@ -327,9 +330,9 @@ async def generate_answer_async(
327330
contents: The question to be answered by the model, grounded in the
328331
provided source.
329332
inline_passages: Grounding passages (a list of `Content`-like objects or (id, content) pairs,
330-
or a `glm.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
333+
or a `protos.GroundingPassages`) to send inline with the request. Exclusive with `semantic_retreiver`,
331334
one must be set, but not both.
332-
semantic_retriever: A Corpus, Document, or `glm.SemanticRetrieverConfig` to use for grounding. Exclusive with
335+
semantic_retriever: A Corpus, Document, or `protos.SemanticRetrieverConfig` to use for grounding. Exclusive with
333336
`inline_passages`, one must be set, but not both.
334337
answer_style: Style in which the grounded answer should be returned.
335338
safety_settings: Safety settings for generated output. Defaults to None.

google/generativeai/client.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import httplib2
1111

1212
import google.ai.generativelanguage as glm
13+
import google.generativeai.protos as protos
1314

1415
from google.auth import credentials as ga_credentials
1516
from google.auth import exceptions as ga_exceptions
@@ -76,7 +77,7 @@ def create_file(
7677
name: str | None = None,
7778
display_name: str | None = None,
7879
resumable: bool = True,
79-
) -> glm.File:
80+
) -> protos.File:
8081
if self._discovery_api is None:
8182
self._setup_discovery_api()
8283

0 commit comments

Comments
 (0)