Skip to content

Commit

Permalink
Update readme_client to be more sensible, fix visible_models docs/use
Browse files Browse the repository at this point in the history
  • Loading branch information
pseudotensor committed Oct 31, 2023
1 parent 588928d commit 810aa5a
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 20 deletions.
47 changes: 32 additions & 15 deletions client/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,25 +28,42 @@ pip install client/dist/h2ogpt_client-*-py3-none-any.whl
```

## Usage

Based upon [test code](tests/test_client.py) and test code `test_readme_example`:
```python
from h2ogpt_client import Client
def test_readme_example(local_server):
import os
import asyncio
from h2ogpt_client import Client

if local_server:
client = Client("http://0.0.0.0:7860")
else:
h2ogpt_key = os.getenv('H2OGPT_H2OGPT_KEY')
if h2ogpt_key is None:
return
# if you have API key for public instance:
client = Client("https://gpt.h2o.ai", h2ogpt_key=h2ogpt_key)

client = Client("http://0.0.0.0:7860")
# Text completion
text_completion = client.text_completion.create()
response = asyncio.run(text_completion.complete("Hello world"))
print("asyncio text completion response: %s" % response)
# Text completion: synchronous
response = text_completion.complete_sync("Hello world")
print("sync text completion response: %s" % response)

# Text completion
text_completion = client.text_completion.create()
response = await text_completion.complete("Hello world")
# Text completion: synchronous
response = text_completion.complete_sync("Hello world")
# Chat completion
chat_completion = client.chat_completion.create()
reply = asyncio.run(chat_completion.chat("Hey!"))
print("asyncio text completion user: %s gpt: %s" % (reply["user"], reply["gpt"]))
chat_history = chat_completion.chat_history()
print("chat_history: %s" % chat_history)
# Chat completion: synchronous
reply = chat_completion.chat_sync("Hey!")
print("sync chat completion gpt: %s" % reply["gpt"])

# Chat completion
chat_completion = client.chat_completion.create()
reply = await chat_completion.chat("Hey!")
print(reply["user"]) # prints user prompt, i.e. "Hey!"
print(reply["gpt"]) # prints reply from h2oGPT
chat_history = chat_completion.chat_history()
# Chat completion: synchronous
reply = chat_completion.chat_sync("Hey!")
test_readme_example(local_server=True)
```
:warning: **Note**: Client APIs are still evolving. Hence, APIs can be changed without prior warnings.

Expand Down
10 changes: 5 additions & 5 deletions client/h2ogpt_client/_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ def list_models(self) -> List[str]:
pdf_loaders=[],
url_loaders=[],
jq_schema=".[]",
visible_models=[],
visible_models=0,
h2ogpt_key=None,
add_search_to_context=False,
chat_conversation=None,
Expand Down Expand Up @@ -139,7 +139,7 @@ def create(
system_pre_context: str = "",
langchain_mode: LangChainMode = LangChainMode.DISABLED,
system_prompt: str = "",
visible_models: Union[str, List[str]] = [],
visible_models: Union[str, int, List[str]] = 0,
add_search_to_context: bool = False,
text_context_list: List[str] = [],
docs_ordering_type: str = "reverse_ucurve_sort",
Expand Down Expand Up @@ -177,7 +177,7 @@ def create(
:param system_prompt: Universal system prompt to override prompt_type's system
prompt
If pass 'None' or 'auto' or None, then automatic per-model value used
:param visible_models: Single string of base model name, single integer of position of model, to get resopnse from
:param visible_models: Single string of base model name, single integer of position of model, to get response from
:param add_search_to_context: Whether to add web search of query to context
:param text_context_list: list of strings to use as context (up to allowed max_seq_len of model)
:param docs_ordering_type: By default uses 'reverse_ucurve_sort' for optimal retrieval
Expand Down Expand Up @@ -275,7 +275,7 @@ def create(
system_pre_context: str = "",
langchain_mode: LangChainMode = LangChainMode.DISABLED,
system_prompt: str = "",
visible_models: Union[str, List[str]] = [],
visible_models: Union[str, int, List[str]] = 0,
add_search_to_context: bool = False,
text_context_list: List[str] = [],
docs_ordering_type: str = "reverse_ucurve_sort",
Expand Down Expand Up @@ -312,7 +312,7 @@ def create(
:param langchain_mode: LangChain mode
:param system_prompt: Universal system prompt to override prompt_type's system
prompt
:param visible_models: Single string of base model name, single integer of position of model, to get resopnse from
:param visible_models: Single string of base model name, single integer of position of model, to get response from
:param add_search_to_context: Whether to add web search of query to context
:param text_context_list: list of strings to use as context (up to allowed max_seq_len of model)
:param docs_ordering_type: By default uses 'reverse_ucurve_sort' for optimal retrieval
Expand Down
35 changes: 35 additions & 0 deletions client/tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,3 +77,38 @@ def test_available_models(client):
def test_parameters_order(client, eval_func_param_names):
text_completion = client.text_completion.create()
assert eval_func_param_names == list(text_completion._parameters.keys())


@pytest.mark.parametrize("local_server", [True, False])
def test_readme_example(local_server):
# self-contained example used for readme, to be copied to client/README.md if changed, setting local_server = True at first
import os
import asyncio
from h2ogpt_client import Client

if local_server:
client = Client("http://0.0.0.0:7860")
else:
h2ogpt_key = os.getenv('H2OGPT_H2OGPT_KEY')
if h2ogpt_key is None:
return
# if you have API key for public instance:
client = Client("https://gpt.h2o.ai", h2ogpt_key=h2ogpt_key)

# Text completion
text_completion = client.text_completion.create()
response = asyncio.run(text_completion.complete("Hello world"))
print("asyncio text completion response: %s" % response)
# Text completion: synchronous
response = text_completion.complete_sync("Hello world")
print("sync text completion response: %s" % response)

# Chat completion
chat_completion = client.chat_completion.create()
reply = asyncio.run(chat_completion.chat("Hey!"))
print("asyncio text completion user: %s gpt: %s" % (reply["user"], reply["gpt"]))
chat_history = chat_completion.chat_history()
print("chat_history: %s" % chat_history)
# Chat completion: synchronous
reply = chat_completion.chat_sync("Hey!")
print("sync chat completion gpt: %s" % reply["gpt"])

0 comments on commit 810aa5a

Please sign in to comment.