Skip to content

Commit 721ffce

Browse files
committed
first commit, seems to be working
1 parent 419a7ce commit 721ffce

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+219
-208
lines changed

google/generativeai/answer.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,10 @@
1515
from __future__ import annotations
1616

1717
import dataclasses
18-
from collections.abc import Iterable
18+
from typing import Iterable
1919
import itertools
2020
from typing import Any, Iterable, Union, Mapping, Optional
21+
from typing import Dict, List, Tuple
2122
from typing_extensions import TypedDict
2223

2324
import google.ai.generativelanguage as glm
@@ -40,7 +41,7 @@
4041

4142
AnswerStyleOptions = Union[int, str, AnswerStyle]
4243

43-
_ANSWER_STYLES: dict[AnswerStyleOptions, AnswerStyle] = {
44+
_ANSWER_STYLES: Dict[AnswerStyleOptions, AnswerStyle] = {
4445
AnswerStyle.ANSWER_STYLE_UNSPECIFIED: AnswerStyle.ANSWER_STYLE_UNSPECIFIED,
4546
0: AnswerStyle.ANSWER_STYLE_UNSPECIFIED,
4647
"answer_style_unspecified": AnswerStyle.ANSWER_STYLE_UNSPECIFIED,
@@ -68,7 +69,7 @@ def to_answer_style(x: AnswerStyleOptions) -> AnswerStyle:
6869

6970
GroundingPassageOptions = (
7071
Union[
71-
protos.GroundingPassage, tuple[str, content_types.ContentType], content_types.ContentType
72+
protos.GroundingPassage, Tuple[str, content_types.ContentType], content_types.ContentType
7273
],
7374
)
7475

google/generativeai/client.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,8 @@
66
import pathlib
77
import types
88
from typing import Any, cast
9-
from collections.abc import Sequence
9+
from typing import Dict, List, Tuple
10+
from typing import Sequence
1011
import httplib2
1112

1213
import google.ai.generativelanguage as glm
@@ -105,12 +106,12 @@ async def create_file(self, *args, **kwargs):
105106

106107
@dataclasses.dataclass
107108
class _ClientManager:
108-
client_config: dict[str, Any] = dataclasses.field(default_factory=dict)
109-
default_metadata: Sequence[tuple[str, str]] = ()
109+
client_config: Dict[str, Any] = dataclasses.field(default_factory=dict)
110+
default_metadata: Sequence[Tuple[str, str]] = ()
110111

111112
discuss_client: glm.DiscussServiceClient | None = None
112113
discuss_async_client: glm.DiscussServiceAsyncClient | None = None
113-
clients: dict[str, Any] = dataclasses.field(default_factory=dict)
114+
clients: Dict[str, Any] = dataclasses.field(default_factory=dict)
114115

115116
def configure(
116117
self,
@@ -124,9 +125,9 @@ def configure(
124125
# We could accept a dict since all the `Transport` classes take the same args,
125126
# but that seems rare. Users that need it can just switch to the low level API.
126127
transport: str | None = None,
127-
client_options: client_options_lib.ClientOptions | dict[str, Any] | None = None,
128+
client_options: client_options_lib.ClientOptions | Dict[str, Any] | None = None,
128129
client_info: gapic_v1.client_info.ClientInfo | None = None,
129-
default_metadata: Sequence[tuple[str, str]] = (),
130+
default_metadata: Sequence[Tuple[str, str]] = (),
130131
) -> None:
131132
"""Initializes default client configurations using specified parameters or environment variables.
132133
@@ -282,7 +283,7 @@ def configure(
282283
transport: str | None = None,
283284
client_options: client_options_lib.ClientOptions | dict | None = None,
284285
client_info: gapic_v1.client_info.ClientInfo | None = None,
285-
default_metadata: Sequence[tuple[str, str]] = (),
286+
default_metadata: Sequence[Tuple[str, str]] = (),
286287
):
287288
"""Captures default client configuration.
288289

google/generativeai/embedding.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
import itertools
1818
from typing import Any, Iterable, overload, TypeVar, Union, Mapping
19-
19+
from typing import Dict, List, Tuple
2020
import google.ai.generativelanguage as glm
2121
from google.generativeai import protos
2222

@@ -35,7 +35,7 @@
3535

3636
EmbeddingTaskTypeOptions = Union[int, str, EmbeddingTaskType]
3737

38-
_EMBEDDING_TASK_TYPE: dict[EmbeddingTaskTypeOptions, EmbeddingTaskType] = {
38+
_EMBEDDING_TASK_TYPE: Dict[EmbeddingTaskTypeOptions, EmbeddingTaskType] = {
3939
EmbeddingTaskType.TASK_TYPE_UNSPECIFIED: EmbeddingTaskType.TASK_TYPE_UNSPECIFIED,
4040
0: EmbeddingTaskType.TASK_TYPE_UNSPECIFIED,
4141
"task_type_unspecified": EmbeddingTaskType.TASK_TYPE_UNSPECIFIED,
@@ -81,7 +81,7 @@ def to_task_type(x: EmbeddingTaskTypeOptions) -> EmbeddingTaskType:
8181
except AttributeError:
8282
T = TypeVar("T")
8383

84-
def _batched(iterable: Iterable[T], n: int) -> Iterable[list[T]]:
84+
def _batched(iterable: Iterable[T], n: int) -> Iterable[List[T]]:
8585
if n < 1:
8686
raise ValueError(
8787
f"Invalid input: The batch size 'n' must be a positive integer. You entered: {n}. Please enter a number greater than 0."

google/generativeai/generative_models.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from __future__ import annotations
44

5-
from collections.abc import Iterable
5+
from typing import Iterable
66
import textwrap
77
from typing import Any, Union, overload
88
import reprlib
@@ -278,7 +278,7 @@ def generate_content(
278278
279279
### Input type flexibility
280280
281-
While the underlying API strictly expects a `list[protos.Content]` objects, this method
281+
While the underlying API strictly expects a `List[protos.Content]` objects, this method
282282
will convert the user input into the correct type. The hierarchy of types that can be
283283
converted is below. Any of these objects can be passed as an equivalent `dict`.
284284
@@ -504,7 +504,7 @@ def __init__(
504504
enable_automatic_function_calling: bool = False,
505505
):
506506
self.model: GenerativeModel = model
507-
self._history: list[protos.Content] = content_types.to_contents(history)
507+
self._history: List[protos.Content] = content_types.to_contents(history)
508508
self._last_sent: protos.Content | None = None
509509
self._last_received: generation_types.BaseGenerateContentResponse | None = None
510510
self.enable_automatic_function_calling = enable_automatic_function_calling
@@ -615,7 +615,7 @@ def _check_response(self, *, response, stream):
615615
):
616616
raise generation_types.StopCandidateException(response.candidates[0])
617617

618-
def _get_function_calls(self, response) -> list[protos.FunctionCall]:
618+
def _get_function_calls(self, response) -> List[protos.FunctionCall]:
619619
candidates = response.candidates
620620
if len(candidates) != 1:
621621
raise ValueError(
@@ -635,14 +635,14 @@ def _handle_afc(
635635
stream,
636636
tools_lib,
637637
request_options,
638-
) -> tuple[list[protos.Content], protos.Content, generation_types.BaseGenerateContentResponse]:
638+
) -> Tuple[List[protos.Content], protos.Content, generation_types.BaseGenerateContentResponse]:
639639

640640
while function_calls := self._get_function_calls(response):
641641
if not all(callable(tools_lib[fc]) for fc in function_calls):
642642
break
643643
history.append(response.candidates[0].content)
644644

645-
function_response_parts: list[protos.Part] = []
645+
function_response_parts: List[protos.Part] = []
646646
for fc in function_calls:
647647
fr = tools_lib(fc)
648648
assert fr is not None, (
@@ -742,14 +742,14 @@ async def _handle_afc_async(
742742
stream,
743743
tools_lib,
744744
request_options,
745-
) -> tuple[list[protos.Content], protos.Content, generation_types.BaseGenerateContentResponse]:
745+
) -> Tuple[List[protos.Content], protos.Content, generation_types.BaseGenerateContentResponse]:
746746

747747
while function_calls := self._get_function_calls(response):
748748
if not all(callable(tools_lib[fc]) for fc in function_calls):
749749
break
750750
history.append(response.candidates[0].content)
751751

752-
function_response_parts: list[protos.Part] = []
752+
function_response_parts: List[protos.Part] = []
753753
for fc in function_calls:
754754
fr = tools_lib(fc)
755755
assert fr is not None, (
@@ -782,7 +782,7 @@ def __copy__(self):
782782
history=list(self.history),
783783
)
784784

785-
def rewind(self) -> tuple[protos.Content, protos.Content]:
785+
def rewind(self) -> Tuple[protos.Content, protos.Content]:
786786
"""Removes the last request/response pair from the chat history."""
787787
if self._last_received is None:
788788
result = self._history.pop(-2), self._history.pop()
@@ -799,7 +799,7 @@ def last(self) -> generation_types.BaseGenerateContentResponse | None:
799799
return self._last_received
800800

801801
@property
802-
def history(self) -> list[protos.Content]:
802+
def history(self) -> List[protos.Content]:
803803
"""The chat history."""
804804
last = self._last_received
805805
if last is None:

google/generativeai/models.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616

1717
import typing
1818
from typing import Any, Literal
19-
19+
from typing import Dict, List, Tuple
2020
import google.ai.generativelanguage as glm
2121

2222
from google.generativeai import protos
@@ -382,7 +382,7 @@ def update_tuned_model(
382382
@typing.overload
383383
def update_tuned_model(
384384
tuned_model: str,
385-
updates: dict[str, Any],
385+
updates: Dict[str, Any],
386386
*,
387387
client: glm.ModelServiceClient | None = None,
388388
request_options: helper_types.RequestOptionsType | None = None,
@@ -392,7 +392,7 @@ def update_tuned_model(
392392

393393
def update_tuned_model(
394394
tuned_model: str | protos.TunedModel,
395-
updates: dict[str, Any] | None = None,
395+
updates: Dict[str, Any] | None = None,
396396
*,
397397
client: glm.ModelServiceClient | None = None,
398398
request_options: helper_types.RequestOptionsType | None = None,

google/generativeai/notebook/argument_parser.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ class ArgumentParser(argparse.ArgumentParser):
9292

9393
def __init__(self, *args, **kwargs):
9494
super().__init__(*args, **kwargs)
95-
self._messages: list[str] = []
95+
self._messages: List[str] = []
9696

9797
def _print_message(self, message, file=None):
9898
"""Override ArgumentParser's _print_message() method."""

google/generativeai/notebook/cmd_line_parser.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ def _fn(var_name: str) -> llmfn_inputs_source.LLMFnInputsSource:
6363

6464
def _resolve_compare_fn_var(
6565
name: str,
66-
) -> tuple[str, parsed_args_lib.TextResultCompareFn]:
66+
) -> Tuple[str, parsed_args_lib.TextResultCompareFn]:
6767
"""Resolves a value passed into --compare_fn."""
6868
fn = py_utils.get_py_var(name)
6969
if not isinstance(fn, Callable):
@@ -320,7 +320,7 @@ def _create_compare_parser(
320320
# Add positional arguments.
321321
def _resolve_llm_function_fn(
322322
var_name: str,
323-
) -> tuple[str, llm_function.LLMFunction]:
323+
) -> Tuple[str, llm_function.LLMFunction]:
324324
try:
325325
py_utils.validate_var_name(var_name)
326326
except ValueError as e:
@@ -426,7 +426,7 @@ class CmdLineParser:
426426
def _split_post_processing_tokens(
427427
cls,
428428
tokens: Sequence[str],
429-
) -> tuple[Sequence[str], parsed_args_lib.PostProcessingTokens]:
429+
) -> Tuple[Sequence[str], parsed_args_lib.PostProcessingTokens]:
430430
"""Splits inputs into the command and post processing tokens.
431431
432432
The command is represented as a sequence of tokens.
@@ -459,7 +459,7 @@ def _split_post_processing_tokens(
459459
@classmethod
460460
def _tokenize_line(
461461
cls, line: str
462-
) -> tuple[Sequence[str], parsed_args_lib.PostProcessingTokens]:
462+
) -> Tuple[Sequence[str], parsed_args_lib.PostProcessingTokens]:
463463
"""Parses `line` and returns command line and post processing tokens."""
464464
# Check to make sure there is a command at the start. If not, add the
465465
# default command to the list of tokens.
@@ -476,7 +476,7 @@ def _tokenize_line(
476476
@classmethod
477477
def _get_model_args(
478478
cls, parsed_results: MutableMapping[str, Any]
479-
) -> tuple[MutableMapping[str, Any], model_lib.ModelArguments]:
479+
) -> Tuple[MutableMapping[str, Any], model_lib.ModelArguments]:
480480
"""Extracts fields for model args from `parsed_results`.
481481
482482
Keys specific to model arguments will be removed from `parsed_results`.
@@ -503,7 +503,7 @@ def parse_line(
503503
self,
504504
line: str,
505505
placeholders: AbstractSet[str] | None = None,
506-
) -> tuple[parsed_args_lib.ParsedArgs, parsed_args_lib.PostProcessingTokens]:
506+
) -> Tuple[parsed_args_lib.ParsedArgs, parsed_args_lib.PostProcessingTokens]:
507507
"""Parses the commandline and returns ParsedArgs and post-processing tokens.
508508
509509
Args:

google/generativeai/notebook/command_utils.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def _call_impl(
5353
)
5454
)
5555

56-
outputs: list[llmfn_outputs.LLMFnOutputEntry] = []
56+
outputs: List[llmfn_outputs.LLMFnOutputEntry] = []
5757
for idx, (value, prompt_vars) in enumerate(zip(self._data, normalized_inputs)):
5858
output_row = llmfn_output_row.LLMFnOutputRow(
5959
data={
@@ -87,7 +87,7 @@ def create_llm_function(
8787
post_processing_fns: Sequence[post_process_utils.ParsedPostProcessExpr],
8888
) -> llm_function.LLMFunction:
8989
"""Creates an LLMFunction from Command.execute() arguments."""
90-
prompts: list[str] = [cell_content]
90+
prompts: List[str] = [cell_content]
9191

9292
llmfn_outputs_display_fn = _get_ipython_display_fn(env) if env else None
9393

@@ -106,8 +106,8 @@ def create_llm_function(
106106

107107

108108
def _convert_simple_compare_fn(
109-
name_and_simple_fn: tuple[str, Callable[[str, str], Any]]
110-
) -> tuple[str, llm_function.CompareFn]:
109+
name_and_simple_fn: Tuple[str, Callable[[str, str], Any]]
110+
) -> Tuple[str, llm_function.CompareFn]:
111111
simple_fn = name_and_simple_fn[1]
112112
new_fn = lambda x, y: simple_fn(x.result_value(), y.result_value())
113113
return name_and_simple_fn[0], new_fn

google/generativeai/notebook/flag_def.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ class FlagDef(abc.ABC):
107107
dest_type: type[_DESTTYPES] | None = None
108108
parse_to_dest_type_fn: _PARSEFN | None = None
109109

110-
choices: list[_PARSETYPES] | None = None
110+
choices: List[_PARSETYPES] | None = None
111111
help_msg: str | None = None
112112

113113
@abc.abstractmethod

google/generativeai/notebook/gspread_client.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -68,7 +68,7 @@ def get_all_records(
6868
self,
6969
sid: sheets_id.SheetsIdentifier,
7070
worksheet_id: int,
71-
) -> tuple[Sequence[Mapping[str, str]], Callable[[], None]]:
71+
) -> Tuple[Sequence[Mapping[str, str]], Callable[[], None]]:
7272
"""Returns all records for a Google Sheets worksheet."""
7373

7474
@abc.abstractmethod
@@ -126,7 +126,7 @@ def get_all_records(
126126
self,
127127
sid: sheets_id.SheetsIdentifier,
128128
worksheet_id: int,
129-
) -> tuple[Sequence[Mapping[str, str]], Callable[[], None]]:
129+
) -> Tuple[Sequence[Mapping[str, str]], Callable[[], None]]:
130130
sheet = self._open(sid)
131131
worksheet = sheet.get_worksheet(worksheet_id)
132132

@@ -195,7 +195,7 @@ def get_all_records(
195195
self,
196196
sid: sheets_id.SheetsIdentifier,
197197
worksheet_id: int,
198-
) -> tuple[Sequence[Mapping[str, str]], Callable[[], None]]:
198+
) -> Tuple[Sequence[Mapping[str, str]], Callable[[], None]]:
199199
raise _get_import_error()
200200

201201
def write_records(

0 commit comments

Comments
 (0)