Skip to content

Rename .stream()/.stream_async()'s content parameter to stream #102

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 3 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
* `ChatSnowflake()` now supports tool calling. (#98)
* `Chat` instances can now be deep copied, which is useful for forking the chat session. (#96)

### Breaking changes

* The `.stream()`/`.stream_async()` method's `content` parameter was renamed to `stream`. Set `stream` to `"content"` to gain access to tool request/result content objects. (#102)

### Changes

* `ChatDatabricks()`'s `model` now defaults to `databricks-claude-3-7-sonnet` instead of `databricks-dbrx-instruct`. (#95)
Expand Down
108 changes: 68 additions & 40 deletions chatlas/_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -418,9 +418,12 @@ def app(
Whether to run the app in a background thread. If `None`, the app will
run in a background thread if the current environment is a notebook.
echo
Whether to echo text content, all content (i.e., tool calls), or no
content. Defaults to `"none"` when `stream=True` and `"text"` when
`stream=False`.
One of the following (defaults to `"none"` when `stream=True` and `"text"` when
`stream=False`):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
content
Whether to display text content or all content (i.e., tool calls).
kwargs
Expand Down Expand Up @@ -458,7 +461,7 @@ async def _(user_input: str):
user_input,
kwargs=kwargs,
echo=echo or "none",
content=content,
stream="content" if content == "all" else "text",
)
)
else:
Expand Down Expand Up @@ -508,8 +511,11 @@ def console(
Parameters
----------
echo
Whether to echo text content, all content (i.e., tool calls), or no
content.
One of the following (default is "output"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to stream the response (i.e., have the response appear in chunks).
kwargs
Expand Down Expand Up @@ -546,8 +552,11 @@ def chat(
args
The user input(s) to generate a response from.
echo
Whether to echo text content, all content (i.e., tool calls), or no
content.
One of the following (default is "output"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to stream the response (i.e., have the response appear in
chunks).
Expand All @@ -569,7 +578,7 @@ def chat(
self._chat_impl(
turn,
echo=echo,
content="text",
yield_content=False,
stream=stream,
kwargs=kwargs,
)
Expand All @@ -596,8 +605,11 @@ async def chat_async(
args
The user input(s) to generate a response from.
echo
Whether to echo text content, all content (i.e., tool calls, images,
etc), or no content.
One of the following (default is "output"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to stream the response (i.e., have the response appear in
chunks).
Expand All @@ -619,7 +631,7 @@ async def chat_async(
self._chat_impl_async(
turn,
echo=echo,
content="text",
yield_content=False,
stream=stream,
kwargs=kwargs,
),
Expand All @@ -635,7 +647,7 @@ async def chat_async(
def stream(
self,
*args: Content | str,
content: Literal["text"],
stream: Literal["text"],
echo: EchoOptions = "none",
kwargs: Optional[SubmitInputArgsT] = None,
) -> Generator[str, None, None]: ...
Expand All @@ -644,7 +656,7 @@ def stream(
def stream(
self,
*args: Content | str,
content: Literal["all"],
stream: Literal["content"],
echo: EchoOptions = "none",
kwargs: Optional[SubmitInputArgsT] = None,
) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: ...
Expand All @@ -653,7 +665,7 @@ def stream(
self,
*args: Content | str,
echo: EchoOptions = "none",
content: Literal["text", "all"] = "text",
stream: Literal["text", "content"] = "text",
kwargs: Optional[SubmitInputArgsT] = None,
) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]:
"""
Expand All @@ -664,10 +676,14 @@ def stream(
args
The user input(s) to generate a response from.
echo
Whether to echo text content, all content (i.e., tool calls), or no
content.
content
Whether to yield just text content, or all content (i.e., tool calls).
One of the following (default is "none"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to yield just text content or include rich content objects
(e.g., tool calls) when relevant.
kwargs
Additional keyword arguments to pass to the method used for requesting
the response.
Expand All @@ -686,7 +702,7 @@ def stream(
turn,
stream=True,
echo=echo,
content=content,
yield_content=stream == "content",
kwargs=kwargs,
)

Expand All @@ -703,7 +719,7 @@ def wrapper() -> Generator[
async def stream_async(
self,
*args: Content | str,
content: Literal["text"],
stream: Literal["text"],
echo: EchoOptions = "none",
kwargs: Optional[SubmitInputArgsT] = None,
) -> AsyncGenerator[str, None]: ...
Expand All @@ -712,7 +728,7 @@ async def stream_async(
async def stream_async(
self,
*args: Content | str,
content: Literal["all"],
stream: Literal["content"],
echo: EchoOptions = "none",
kwargs: Optional[SubmitInputArgsT] = None,
) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: ...
Expand All @@ -721,7 +737,7 @@ async def stream_async(
self,
*args: Content | str,
echo: EchoOptions = "none",
content: Literal["text", "all"] = "text",
stream: Literal["text", "content"] = "text",
kwargs: Optional[SubmitInputArgsT] = None,
) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]:
"""
Expand All @@ -732,10 +748,14 @@ async def stream_async(
args
The user input(s) to generate a response from.
echo
Whether to echo text content, all content (i.e., tool calls), or no
content.
content
Whether to yield just text content, or all content (i.e., tool calls).
One of the following (default is "none"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to yield just text content or include rich content objects
(e.g., tool calls) when relevant.
kwargs
Additional keyword arguments to pass to the method used for requesting
the response.
Expand All @@ -758,7 +778,7 @@ async def wrapper() -> AsyncGenerator[
turn,
stream=True,
echo=echo,
content=content,
yield_content=stream == "content",
kwargs=kwargs,
):
yield chunk
Expand All @@ -782,7 +802,11 @@ def extract_data(
data_model
A Pydantic model describing the structure of the data to extract.
echo
Whether to echo text content, all content (i.e., tool calls), or no content.
One of the following (default is "none"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to stream the response (i.e., have the response appear in chunks).

Expand Down Expand Up @@ -840,7 +864,11 @@ async def extract_data_async(
data_model
A Pydantic model describing the structure of the data to extract.
echo
Whether to echo text content, all content (i.e., tool calls), or no content
One of the following (default is "none"):
- `"text"`: Echo just the text content of the response.
- `"output"`: Echo text and tool call content.
- `"all"`: Echo both the assistant and user turn.
- `"none"`: Do not echo any content.
stream
Whether to stream the response (i.e., have the response appear in chunks).
Defaults to `True` if `echo` is not "none".
Expand Down Expand Up @@ -1192,7 +1220,7 @@ def _chat_impl(
self,
user_turn: Turn,
echo: EchoOptions,
content: Literal["text"],
yield_content: Literal[False],
stream: bool,
kwargs: Optional[SubmitInputArgsT] = None,
) -> Generator[str, None, None]: ...
Expand All @@ -1202,7 +1230,7 @@ def _chat_impl(
self,
user_turn: Turn,
echo: EchoOptions,
content: Literal["all"],
yield_content: Literal[True],
stream: bool,
kwargs: Optional[SubmitInputArgsT] = None,
) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]: ...
Expand All @@ -1211,7 +1239,7 @@ def _chat_impl(
self,
user_turn: Turn,
echo: EchoOptions,
content: Literal["text", "all"],
yield_content: bool,
stream: bool,
kwargs: Optional[SubmitInputArgsT] = None,
) -> Generator[str | ContentToolRequest | ContentToolResult, None, None]:
Expand All @@ -1234,12 +1262,12 @@ def _chat_impl(
if isinstance(x, ContentToolRequest):
if echo == "output":
self._echo_content(f"\n\n{x}\n\n")
if content == "all":
if yield_content:
yield x
res = self._invoke_tool(x)
if echo == "output":
self._echo_content(f"\n\n{res}\n\n")
if content == "all":
if yield_content:
yield res
results.append(res)

Expand All @@ -1251,7 +1279,7 @@ def _chat_impl_async(
self,
user_turn: Turn,
echo: EchoOptions,
content: Literal["text"],
yield_content: Literal[False],
stream: bool,
kwargs: Optional[SubmitInputArgsT] = None,
) -> AsyncGenerator[str, None]: ...
Expand All @@ -1261,7 +1289,7 @@ def _chat_impl_async(
self,
user_turn: Turn,
echo: EchoOptions,
content: Literal["all"],
yield_content: Literal[True],
stream: bool,
kwargs: Optional[SubmitInputArgsT] = None,
) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]: ...
Expand All @@ -1270,7 +1298,7 @@ async def _chat_impl_async(
self,
user_turn: Turn,
echo: EchoOptions,
content: Literal["text", "all"],
yield_content: bool,
stream: bool,
kwargs: Optional[SubmitInputArgsT] = None,
) -> AsyncGenerator[str | ContentToolRequest | ContentToolResult, None]:
Expand All @@ -1293,12 +1321,12 @@ async def _chat_impl_async(
if isinstance(x, ContentToolRequest):
if echo == "output":
self._echo_content(f"\n\n{x}\n\n")
if content == "all":
if yield_content:
yield x
res = await self._invoke_tool_async(x)
if echo == "output":
self._echo_content(f"\n\n{res}\n\n")
if content == "all":
if yield_content:
yield res
else:
yield "\n\n"
Expand Down
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def get_date():

chat.register_tool(get_date)

response = chat.stream("What's the current date in Y-M-D format?", content="all")
response = chat.stream("What's the current date in Y-M-D format?", stream="content")
chunks = [chunk for chunk in response]
request = [x for x in chunks if isinstance(x, ContentToolRequest)]
assert len(request) == 1
Expand Down