Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .stats.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
configured_endpoints: 33
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-4ed32c3243ce7a772e55bb1ba204736fc3fb1d712d8ca0eb91bac0c7ac626938.yml
configured_endpoints: 35
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-c0b9cfd71efb976777313fb342d2e31ebeb44b1b3f9bb7ddea971e6b2bc5fd19.yml
24 changes: 24 additions & 0 deletions api.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,3 +186,27 @@ from contextual.types import LMUnitCreateResponse
Methods:

- <code title="post /lmunit">client.lmunit.<a href="./src/contextual/resources/lmunit.py">create</a>(\*\*<a href="src/contextual/types/lmunit_create_params.py">params</a>) -> <a href="./src/contextual/types/lmunit_create_response.py">LMUnitCreateResponse</a></code>

# Rerank

Types:

```python
from contextual.types import RerankCreateResponse
```

Methods:

- <code title="post /rerank">client.rerank.<a href="./src/contextual/resources/rerank.py">create</a>(\*\*<a href="src/contextual/types/rerank_create_params.py">params</a>) -> <a href="./src/contextual/types/rerank_create_response.py">RerankCreateResponse</a></code>

# Generate

Types:

```python
from contextual.types import GenerateCreateResponse
```

Methods:

- <code title="post /generate">client.generate.<a href="./src/contextual/resources/generate.py">create</a>(\*\*<a href="src/contextual/types/generate_create_params.py">params</a>) -> <a href="./src/contextual/types/generate_create_response.py">GenerateCreateResponse</a></code>
18 changes: 17 additions & 1 deletion src/contextual/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
get_async_library,
)
from ._version import __version__
from .resources import lmunit
from .resources import lmunit, rerank, generate
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
from ._exceptions import APIStatusError, ContextualAIError
from ._base_client import (
Expand All @@ -51,6 +51,8 @@ class ContextualAI(SyncAPIClient):
datastores: datastores.DatastoresResource
agents: agents.AgentsResource
lmunit: lmunit.LMUnitResource
rerank: rerank.RerankResource
generate: generate.GenerateResource
with_raw_response: ContextualAIWithRawResponse
with_streaming_response: ContextualAIWithStreamedResponse

Expand Down Expand Up @@ -111,6 +113,8 @@ def __init__(
self.datastores = datastores.DatastoresResource(self)
self.agents = agents.AgentsResource(self)
self.lmunit = lmunit.LMUnitResource(self)
self.rerank = rerank.RerankResource(self)
self.generate = generate.GenerateResource(self)
self.with_raw_response = ContextualAIWithRawResponse(self)
self.with_streaming_response = ContextualAIWithStreamedResponse(self)

Expand Down Expand Up @@ -223,6 +227,8 @@ class AsyncContextualAI(AsyncAPIClient):
datastores: datastores.AsyncDatastoresResource
agents: agents.AsyncAgentsResource
lmunit: lmunit.AsyncLMUnitResource
rerank: rerank.AsyncRerankResource
generate: generate.AsyncGenerateResource
with_raw_response: AsyncContextualAIWithRawResponse
with_streaming_response: AsyncContextualAIWithStreamedResponse

Expand Down Expand Up @@ -283,6 +289,8 @@ def __init__(
self.datastores = datastores.AsyncDatastoresResource(self)
self.agents = agents.AsyncAgentsResource(self)
self.lmunit = lmunit.AsyncLMUnitResource(self)
self.rerank = rerank.AsyncRerankResource(self)
self.generate = generate.AsyncGenerateResource(self)
self.with_raw_response = AsyncContextualAIWithRawResponse(self)
self.with_streaming_response = AsyncContextualAIWithStreamedResponse(self)

Expand Down Expand Up @@ -396,27 +404,35 @@ def __init__(self, client: ContextualAI) -> None:
self.datastores = datastores.DatastoresResourceWithRawResponse(client.datastores)
self.agents = agents.AgentsResourceWithRawResponse(client.agents)
self.lmunit = lmunit.LMUnitResourceWithRawResponse(client.lmunit)
self.rerank = rerank.RerankResourceWithRawResponse(client.rerank)
self.generate = generate.GenerateResourceWithRawResponse(client.generate)


class AsyncContextualAIWithRawResponse:
def __init__(self, client: AsyncContextualAI) -> None:
self.datastores = datastores.AsyncDatastoresResourceWithRawResponse(client.datastores)
self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents)
self.lmunit = lmunit.AsyncLMUnitResourceWithRawResponse(client.lmunit)
self.rerank = rerank.AsyncRerankResourceWithRawResponse(client.rerank)
self.generate = generate.AsyncGenerateResourceWithRawResponse(client.generate)


class ContextualAIWithStreamedResponse:
def __init__(self, client: ContextualAI) -> None:
self.datastores = datastores.DatastoresResourceWithStreamingResponse(client.datastores)
self.agents = agents.AgentsResourceWithStreamingResponse(client.agents)
self.lmunit = lmunit.LMUnitResourceWithStreamingResponse(client.lmunit)
self.rerank = rerank.RerankResourceWithStreamingResponse(client.rerank)
self.generate = generate.GenerateResourceWithStreamingResponse(client.generate)


class AsyncContextualAIWithStreamedResponse:
def __init__(self, client: AsyncContextualAI) -> None:
self.datastores = datastores.AsyncDatastoresResourceWithStreamingResponse(client.datastores)
self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents)
self.lmunit = lmunit.AsyncLMUnitResourceWithStreamingResponse(client.lmunit)
self.rerank = rerank.AsyncRerankResourceWithStreamingResponse(client.rerank)
self.generate = generate.AsyncGenerateResourceWithStreamingResponse(client.generate)


Client = ContextualAI
Expand Down
28 changes: 28 additions & 0 deletions src/contextual/resources/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,22 @@
LMUnitResourceWithStreamingResponse,
AsyncLMUnitResourceWithStreamingResponse,
)
from .rerank import (
RerankResource,
AsyncRerankResource,
RerankResourceWithRawResponse,
AsyncRerankResourceWithRawResponse,
RerankResourceWithStreamingResponse,
AsyncRerankResourceWithStreamingResponse,
)
from .generate import (
GenerateResource,
AsyncGenerateResource,
GenerateResourceWithRawResponse,
AsyncGenerateResourceWithRawResponse,
GenerateResourceWithStreamingResponse,
AsyncGenerateResourceWithStreamingResponse,
)
from .datastores import (
DatastoresResource,
AsyncDatastoresResource,
Expand Down Expand Up @@ -44,4 +60,16 @@
"AsyncLMUnitResourceWithRawResponse",
"LMUnitResourceWithStreamingResponse",
"AsyncLMUnitResourceWithStreamingResponse",
"RerankResource",
"AsyncRerankResource",
"RerankResourceWithRawResponse",
"AsyncRerankResourceWithRawResponse",
"RerankResourceWithStreamingResponse",
"AsyncRerankResourceWithStreamingResponse",
"GenerateResource",
"AsyncGenerateResource",
"GenerateResourceWithRawResponse",
"AsyncGenerateResourceWithRawResponse",
"GenerateResourceWithStreamingResponse",
"AsyncGenerateResourceWithStreamingResponse",
]
48 changes: 24 additions & 24 deletions src/contextual/resources/agents/datasets/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,20 +81,20 @@ def create(
) -> CreateDatasetResponse:
"""
Create a new evaluation `Dataset` for the specified `Agent` using the provided
JSONL file. A `Dataset` is a versioned collection of samples conforming to a
particular schema, and can be used to store `Evaluation` test-sets and retrieve
`Evaluation` results.
JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming
to a particular schema, and can be used to store `Evaluation` test-sets and
retrieve `Evaluation` results.

Each `Dataset` is versioned and validated against its schema during creation and
subsequent updates. The provided `Dataset` file must conform to the schema
defined for the `dataset_type`.

File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
each line is one JSON object with the following required keys:
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
where each line is one JSON object. The following keys are required:

- `prompt` (required, `string`): Prompt or question
- `prompt` (`string`): Prompt or question

- `reference` (required, `string`): Required reference or ground truth response
- `reference` (`string`): Reference or ground truth response

Args:
agent_id: Agent ID to associate with the evaluation dataset
Expand All @@ -103,7 +103,7 @@ def create(

dataset_type: Type of evaluation dataset which determines its schema and validation rules.

file: JSONL file containing the evaluation dataset
file: JSONL or CSV file containing the evaluation dataset

extra_headers: Send extra headers

Expand Down Expand Up @@ -228,12 +228,12 @@ def update(
Create a new version of the dataset by appending content to the `Dataset` and
validating against its schema.

File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
is one JSON object with the following required keys:
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
where each line is one JSON object. The following keys are required:

- `prompt` (`string`): Prompt or question

- `reference` (`string`): Required reference or ground truth response
- `reference` (`string`): Reference or ground truth response

Args:
agent_id: Agent ID associated with the evaluation dataset
Expand All @@ -243,7 +243,7 @@ def update(
dataset_type: Type of evaluation dataset which determines its schema and validation rules.
Must match the `dataset_type` used at dataset creation time.

file: JSONL file containing the entries to append to the evaluation dataset
file: JSONL or CSV file containing the entries to append to the evaluation dataset

extra_headers: Send extra headers

Expand Down Expand Up @@ -459,20 +459,20 @@ async def create(
) -> CreateDatasetResponse:
"""
Create a new evaluation `Dataset` for the specified `Agent` using the provided
JSONL file. A `Dataset` is a versioned collection of samples conforming to a
particular schema, and can be used to store `Evaluation` test-sets and retrieve
`Evaluation` results.
JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming
to a particular schema, and can be used to store `Evaluation` test-sets and
retrieve `Evaluation` results.

Each `Dataset` is versioned and validated against its schema during creation and
subsequent updates. The provided `Dataset` file must conform to the schema
defined for the `dataset_type`.

File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
each line is one JSON object with the following required keys:
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
where each line is one JSON object. The following keys are required:

- `prompt` (required, `string`): Prompt or question
- `prompt` (`string`): Prompt or question

- `reference` (required, `string`): Required reference or ground truth response
- `reference` (`string`): Reference or ground truth response

Args:
agent_id: Agent ID to associate with the evaluation dataset
Expand All @@ -481,7 +481,7 @@ async def create(

dataset_type: Type of evaluation dataset which determines its schema and validation rules.

file: JSONL file containing the evaluation dataset
file: JSONL or CSV file containing the evaluation dataset

extra_headers: Send extra headers

Expand Down Expand Up @@ -606,12 +606,12 @@ async def update(
Create a new version of the dataset by appending content to the `Dataset` and
validating against its schema.

File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
is one JSON object with the following required keys:
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
where each line is one JSON object. The following keys are required:

- `prompt` (`string`): Prompt or question

- `reference` (`string`): Required reference or ground truth response
- `reference` (`string`): Reference or ground truth response

Args:
agent_id: Agent ID associated with the evaluation dataset
Expand All @@ -621,7 +621,7 @@ async def update(
dataset_type: Type of evaluation dataset which determines its schema and validation rules.
Must match the `dataset_type` used at dataset creation time.

file: JSONL file containing the entries to append to the evaluation dataset
file: JSONL or CSV file containing the entries to append to the evaluation dataset

extra_headers: Send extra headers

Expand Down
6 changes: 3 additions & 3 deletions src/contextual/resources/agents/evaluate/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from __future__ import annotations

from typing import List, Mapping, Optional, cast
from typing import List, Mapping, cast
from typing_extensions import Literal

import httpx
Expand Down Expand Up @@ -68,7 +68,7 @@ def create(
metrics: List[Literal["equivalence", "groundedness"]],
evalset_file: FileTypes | NotGiven = NOT_GIVEN,
evalset_name: str | NotGiven = NOT_GIVEN,
llm_model_id: Optional[str] | NotGiven = NOT_GIVEN,
llm_model_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand Down Expand Up @@ -175,7 +175,7 @@ async def create(
metrics: List[Literal["equivalence", "groundedness"]],
evalset_file: FileTypes | NotGiven = NOT_GIVEN,
evalset_name: str | NotGiven = NOT_GIVEN,
llm_model_id: Optional[str] | NotGiven = NOT_GIVEN,
llm_model_id: str | NotGiven = NOT_GIVEN,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
Expand Down
24 changes: 12 additions & 12 deletions src/contextual/resources/agents/tune/tune.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,21 +111,21 @@ def create(
JSON object represents a single training example. The four required fields are
`guideline`, `prompt`, `reference`, and `knowledge`.

- `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
reference response, as a list of string text chunks
- `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
answer. `knowledge` is a list of retrieved text chunks.

- `reference` field should be the model's response to the prompt.
- `reference` (`str`): The gold-standard answer to the prompt.

- `guideline` (`str): Guidelines or criteria for model output
- `guideline` (`str`): Guidelines for model output.

- `prompt` (required, `string`): Prompt or question model should respond to.
- `prompt` (`str`): Question for the model to respond to.

Example:

```json
[
{
"guideline": "The response should be accurate.",
"guideline": "The answer should be accurate.",
"prompt": "What was last quarter's revenue?",
"reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
"knowledge": [
Expand Down Expand Up @@ -244,21 +244,21 @@ async def create(
JSON object represents a single training example. The four required fields are
`guideline`, `prompt`, `reference`, and `knowledge`.

- `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
reference response, as a list of string text chunks
- `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
answer. `knowledge` is a list of retrieved text chunks.

- `reference` field should be the model's response to the prompt.
- `reference` (`str`): The gold-standard answer to the prompt.

- `guideline` (`str): Guidelines or criteria for model output
- `guideline` (`str`): Guidelines for model output.

- `prompt` (required, `string`): Prompt or question model should respond to.
- `prompt` (`str`): Question for the model to respond to.

Example:

```json
[
{
"guideline": "The response should be accurate.",
"guideline": "The answer should be accurate.",
"prompt": "What was last quarter's revenue?",
"reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
"knowledge": [
Expand Down
Loading