Skip to content

Commit

Permalink
Stream by default, added--no-stream option, closes #25
Browse files Browse the repository at this point in the history
Also finished the work needed to remove --code, refs #24
  • Loading branch information
simonw committed Jun 15, 2023
1 parent 37a2aba commit 1e4a855
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 34 deletions.
10 changes: 5 additions & 5 deletions docs/usage.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,13 @@ The default command for this is `llm prompt` - you can use `llm` instead if you

## Executing a prompt

To run a prompt:
To run a prompt, streaming tokens as they come in:

llm 'Ten names for cheesecakes'

To stream the results a token at a time:
To disable streaming and only return the response once it has completed:

llm 'Ten names for cheesecakes' -s
llm 'Ten names for cheesecakes' --no-stream

To switch from ChatGPT 3.5 (the default) to GPT-4 if you have access:

Expand Down Expand Up @@ -52,10 +52,10 @@ This pattern of using `$(command)` inside a double quoted string is a useful way

You can use `--system '...'` to set a system prompt.

llm 'SQL to calculate total sales by month' -s \
llm 'SQL to calculate total sales by month' \
--system 'You are an exaggerated sentient cheesecake that knows SQL and talks about cheesecake a lot'

This is useful for piping content to standard input, for example:

curl -s 'https://simonwillison.net/2023/May/15/per-interpreter-gils/' | \
llm --system 'Suggest topics for this post as a JSON array' --stream
llm --system 'Suggest topics for this post as a JSON array'
35 changes: 11 additions & 24 deletions llm/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import os
import pathlib
from platformdirs import user_data_dir
import requests
import sqlite_utils
import sys
import warnings
Expand All @@ -31,7 +30,7 @@ def cli():
@click.option("--system", help="System prompt to use")
@click.option("-4", "--gpt4", is_flag=True, help="Use GPT-4")
@click.option("-m", "--model", help="Model to use")
@click.option("-s", "--stream", is_flag=True, help="Stream output")
@click.option("--no-stream", is_flag=True, help="Do not stream output")
@click.option("-n", "--no-log", is_flag=True, help="Don't log to database")
@click.option(
"_continue",
Expand All @@ -48,7 +47,7 @@ def cli():
type=int,
)
@click.option("--key", help="API key to use")
def prompt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id, key):
def prompt(prompt, system, gpt4, model, no_stream, no_log, _continue, chat_id, key):
"Execute a prompt against on OpenAI model"
if prompt is None:
# Read from stdin instead
Expand Down Expand Up @@ -78,7 +77,15 @@ def prompt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id
if model is None:
model = history_model or DEFAULT_MODEL
try:
if stream:
if no_stream:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
)
content = response.choices[0].message.content
log(no_log, "openai", system, prompt, content, model, chat_id)
print(content)
else:
response = []
for chunk in openai.ChatCompletion.create(
model=model,
Expand All @@ -92,16 +99,6 @@ def prompt(prompt, system, gpt4, model, stream, no_log, code, _continue, chat_id
sys.stdout.flush()
print("")
log(no_log, "openai", system, prompt, "".join(response), model, chat_id)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
)
content = response.choices[0].message.content
log(no_log, "openai", system, prompt, content, model, chat_id)
if code:
content = unwrap_markdown(content)
print(content)
except openai.error.AuthenticationError as ex:
raise click.ClickException("{}: {}".format(ex.error.type, ex.error.code))
except openai.error.OpenAIError as ex:
Expand Down Expand Up @@ -279,13 +276,3 @@ def get_history(chat_id):
"rowid = ? or chat_id = ?", [chat_id, chat_id], order_by="rowid"
)
return chat_id, rows


def unwrap_markdown(content):
# Remove first and last line if they are triple backticks
lines = [l for l in content.split("\n")]
if lines[0].strip().startswith("```"):
lines = lines[1:]
if lines[-1].strip() == "```":
lines = lines[:-1]
return "\n".join(lines)
10 changes: 6 additions & 4 deletions tests/test_keys.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,21 +59,23 @@ def assert_key(key):

runner = CliRunner()
# Called without --key uses environment variable
result = runner.invoke(cli, ["hello"], catch_exceptions=False)
result = runner.invoke(cli, ["hello", "--no-stream"], catch_exceptions=False)
assert result.exit_code == 0
assert_key("from-env")
# Called without --key and with no environment variable uses keys.json
monkeypatch.setenv("OPENAI_API_KEY", "")
result2 = runner.invoke(cli, ["hello"], catch_exceptions=False)
result2 = runner.invoke(cli, ["hello", "--no-stream"], catch_exceptions=False)
assert result2.exit_code == 0
assert_key("from-keys-file")
# Called with --key name-in-keys.json uses that value
result3 = runner.invoke(cli, ["hello", "--key", "other"], catch_exceptions=False)
result3 = runner.invoke(
cli, ["hello", "--key", "other", "--no-stream"], catch_exceptions=False
)
assert result3.exit_code == 0
assert_key("other-key")
# Called with --key something-else uses exactly that
result4 = runner.invoke(
cli, ["hello", "--key", "custom-key"], catch_exceptions=False
cli, ["hello", "--key", "custom-key", "--no-stream"], catch_exceptions=False
)
assert result4.exit_code == 0
assert_key("custom-key")
2 changes: 1 addition & 1 deletion tests/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def test_llm_default_prompt(requests_mock, use_stdin):
runner = CliRunner()
prompt = "three names for a pet pelican"
input = None
args = []
args = ["--no-stream"]
if use_stdin:
input = prompt
else:
Expand Down

0 comments on commit 1e4a855

Please sign in to comment.