Skip to content

Commit 78de720

Browse files
feat(api): update via SDK Studio (#31)
1 parent a3ed45e commit 78de720

22 files changed

+928
-54
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
configured_endpoints: 33
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-4ed32c3243ce7a772e55bb1ba204736fc3fb1d712d8ca0eb91bac0c7ac626938.yml
1+
configured_endpoints: 35
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/contextual-ai%2Fsunrise-c0b9cfd71efb976777313fb342d2e31ebeb44b1b3f9bb7ddea971e6b2bc5fd19.yml

api.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,3 +186,27 @@ from contextual.types import LMUnitCreateResponse
186186
Methods:
187187

188188
- <code title="post /lmunit">client.lmunit.<a href="./src/contextual/resources/lmunit.py">create</a>(\*\*<a href="src/contextual/types/lmunit_create_params.py">params</a>) -> <a href="./src/contextual/types/lmunit_create_response.py">LMUnitCreateResponse</a></code>
189+
190+
# Rerank
191+
192+
Types:
193+
194+
```python
195+
from contextual.types import RerankCreateResponse
196+
```
197+
198+
Methods:
199+
200+
- <code title="post /rerank">client.rerank.<a href="./src/contextual/resources/rerank.py">create</a>(\*\*<a href="src/contextual/types/rerank_create_params.py">params</a>) -> <a href="./src/contextual/types/rerank_create_response.py">RerankCreateResponse</a></code>
201+
202+
# Generate
203+
204+
Types:
205+
206+
```python
207+
from contextual.types import GenerateCreateResponse
208+
```
209+
210+
Methods:
211+
212+
- <code title="post /generate">client.generate.<a href="./src/contextual/resources/generate.py">create</a>(\*\*<a href="src/contextual/types/generate_create_params.py">params</a>) -> <a href="./src/contextual/types/generate_create_response.py">GenerateCreateResponse</a></code>

src/contextual/_client.py

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424
get_async_library,
2525
)
2626
from ._version import __version__
27-
from .resources import lmunit
27+
from .resources import lmunit, rerank, generate
2828
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
2929
from ._exceptions import APIStatusError, ContextualAIError
3030
from ._base_client import (
@@ -51,6 +51,8 @@ class ContextualAI(SyncAPIClient):
5151
datastores: datastores.DatastoresResource
5252
agents: agents.AgentsResource
5353
lmunit: lmunit.LMUnitResource
54+
rerank: rerank.RerankResource
55+
generate: generate.GenerateResource
5456
with_raw_response: ContextualAIWithRawResponse
5557
with_streaming_response: ContextualAIWithStreamedResponse
5658

@@ -111,6 +113,8 @@ def __init__(
111113
self.datastores = datastores.DatastoresResource(self)
112114
self.agents = agents.AgentsResource(self)
113115
self.lmunit = lmunit.LMUnitResource(self)
116+
self.rerank = rerank.RerankResource(self)
117+
self.generate = generate.GenerateResource(self)
114118
self.with_raw_response = ContextualAIWithRawResponse(self)
115119
self.with_streaming_response = ContextualAIWithStreamedResponse(self)
116120

@@ -223,6 +227,8 @@ class AsyncContextualAI(AsyncAPIClient):
223227
datastores: datastores.AsyncDatastoresResource
224228
agents: agents.AsyncAgentsResource
225229
lmunit: lmunit.AsyncLMUnitResource
230+
rerank: rerank.AsyncRerankResource
231+
generate: generate.AsyncGenerateResource
226232
with_raw_response: AsyncContextualAIWithRawResponse
227233
with_streaming_response: AsyncContextualAIWithStreamedResponse
228234

@@ -283,6 +289,8 @@ def __init__(
283289
self.datastores = datastores.AsyncDatastoresResource(self)
284290
self.agents = agents.AsyncAgentsResource(self)
285291
self.lmunit = lmunit.AsyncLMUnitResource(self)
292+
self.rerank = rerank.AsyncRerankResource(self)
293+
self.generate = generate.AsyncGenerateResource(self)
286294
self.with_raw_response = AsyncContextualAIWithRawResponse(self)
287295
self.with_streaming_response = AsyncContextualAIWithStreamedResponse(self)
288296

@@ -396,27 +404,35 @@ def __init__(self, client: ContextualAI) -> None:
396404
self.datastores = datastores.DatastoresResourceWithRawResponse(client.datastores)
397405
self.agents = agents.AgentsResourceWithRawResponse(client.agents)
398406
self.lmunit = lmunit.LMUnitResourceWithRawResponse(client.lmunit)
407+
self.rerank = rerank.RerankResourceWithRawResponse(client.rerank)
408+
self.generate = generate.GenerateResourceWithRawResponse(client.generate)
399409

400410

401411
class AsyncContextualAIWithRawResponse:
402412
def __init__(self, client: AsyncContextualAI) -> None:
403413
self.datastores = datastores.AsyncDatastoresResourceWithRawResponse(client.datastores)
404414
self.agents = agents.AsyncAgentsResourceWithRawResponse(client.agents)
405415
self.lmunit = lmunit.AsyncLMUnitResourceWithRawResponse(client.lmunit)
416+
self.rerank = rerank.AsyncRerankResourceWithRawResponse(client.rerank)
417+
self.generate = generate.AsyncGenerateResourceWithRawResponse(client.generate)
406418

407419

408420
class ContextualAIWithStreamedResponse:
409421
def __init__(self, client: ContextualAI) -> None:
410422
self.datastores = datastores.DatastoresResourceWithStreamingResponse(client.datastores)
411423
self.agents = agents.AgentsResourceWithStreamingResponse(client.agents)
412424
self.lmunit = lmunit.LMUnitResourceWithStreamingResponse(client.lmunit)
425+
self.rerank = rerank.RerankResourceWithStreamingResponse(client.rerank)
426+
self.generate = generate.GenerateResourceWithStreamingResponse(client.generate)
413427

414428

415429
class AsyncContextualAIWithStreamedResponse:
416430
def __init__(self, client: AsyncContextualAI) -> None:
417431
self.datastores = datastores.AsyncDatastoresResourceWithStreamingResponse(client.datastores)
418432
self.agents = agents.AsyncAgentsResourceWithStreamingResponse(client.agents)
419433
self.lmunit = lmunit.AsyncLMUnitResourceWithStreamingResponse(client.lmunit)
434+
self.rerank = rerank.AsyncRerankResourceWithStreamingResponse(client.rerank)
435+
self.generate = generate.AsyncGenerateResourceWithStreamingResponse(client.generate)
420436

421437

422438
Client = ContextualAI

src/contextual/resources/__init__.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,22 @@
1616
LMUnitResourceWithStreamingResponse,
1717
AsyncLMUnitResourceWithStreamingResponse,
1818
)
19+
from .rerank import (
20+
RerankResource,
21+
AsyncRerankResource,
22+
RerankResourceWithRawResponse,
23+
AsyncRerankResourceWithRawResponse,
24+
RerankResourceWithStreamingResponse,
25+
AsyncRerankResourceWithStreamingResponse,
26+
)
27+
from .generate import (
28+
GenerateResource,
29+
AsyncGenerateResource,
30+
GenerateResourceWithRawResponse,
31+
AsyncGenerateResourceWithRawResponse,
32+
GenerateResourceWithStreamingResponse,
33+
AsyncGenerateResourceWithStreamingResponse,
34+
)
1935
from .datastores import (
2036
DatastoresResource,
2137
AsyncDatastoresResource,
@@ -44,4 +60,16 @@
4460
"AsyncLMUnitResourceWithRawResponse",
4561
"LMUnitResourceWithStreamingResponse",
4662
"AsyncLMUnitResourceWithStreamingResponse",
63+
"RerankResource",
64+
"AsyncRerankResource",
65+
"RerankResourceWithRawResponse",
66+
"AsyncRerankResourceWithRawResponse",
67+
"RerankResourceWithStreamingResponse",
68+
"AsyncRerankResourceWithStreamingResponse",
69+
"GenerateResource",
70+
"AsyncGenerateResource",
71+
"GenerateResourceWithRawResponse",
72+
"AsyncGenerateResourceWithRawResponse",
73+
"GenerateResourceWithStreamingResponse",
74+
"AsyncGenerateResourceWithStreamingResponse",
4775
]

src/contextual/resources/agents/datasets/evaluate.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -81,20 +81,20 @@ def create(
8181
) -> CreateDatasetResponse:
8282
"""
8383
Create a new evaluation `Dataset` for the specified `Agent` using the provided
84-
JSONL file. A `Dataset` is a versioned collection of samples conforming to a
85-
particular schema, and can be used to store `Evaluation` test-sets and retrieve
86-
`Evaluation` results.
84+
JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming
85+
to a particular schema, and can be used to store `Evaluation` test-sets and
86+
retrieve `Evaluation` results.
8787
8888
Each `Dataset` is versioned and validated against its schema during creation and
8989
subsequent updates. The provided `Dataset` file must conform to the schema
9090
defined for the `dataset_type`.
9191
92-
File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
93-
each line is one JSON object with the following required keys:
92+
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
93+
where each line is one JSON object. The following keys are required:
9494
95-
- `prompt` (required, `string`): Prompt or question
95+
- `prompt` (`string`): Prompt or question
9696
97-
- `reference` (required, `string`): Required reference or ground truth response
97+
- `reference` (`string`): Reference or ground truth response
9898
9999
Args:
100100
agent_id: Agent ID to associate with the evaluation dataset
@@ -103,7 +103,7 @@ def create(
103103
104104
dataset_type: Type of evaluation dataset which determines its schema and validation rules.
105105
106-
file: JSONL file containing the evaluation dataset
106+
file: JSONL or CSV file containing the evaluation dataset
107107
108108
extra_headers: Send extra headers
109109
@@ -228,12 +228,12 @@ def update(
228228
Create a new version of the dataset by appending content to the `Dataset` and
229229
validating against its schema.
230230
231-
File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
232-
is one JSON object with the following required keys:
231+
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
232+
where each line is one JSON object. The following keys are required:
233233
234234
- `prompt` (`string`): Prompt or question
235235
236-
- `reference` (`string`): Required reference or ground truth response
236+
- `reference` (`string`): Reference or ground truth response
237237
238238
Args:
239239
agent_id: Agent ID associated with the evaluation dataset
@@ -243,7 +243,7 @@ def update(
243243
dataset_type: Type of evaluation dataset which determines its schema and validation rules.
244244
Must match the `dataset_type` used at dataset creation time.
245245
246-
file: JSONL file containing the entries to append to the evaluation dataset
246+
file: JSONL or CSV file containing the entries to append to the evaluation dataset
247247
248248
extra_headers: Send extra headers
249249
@@ -459,20 +459,20 @@ async def create(
459459
) -> CreateDatasetResponse:
460460
"""
461461
Create a new evaluation `Dataset` for the specified `Agent` using the provided
462-
JSONL file. A `Dataset` is a versioned collection of samples conforming to a
463-
particular schema, and can be used to store `Evaluation` test-sets and retrieve
464-
`Evaluation` results.
462+
JSONL or CSV file. A `Dataset` is a versioned collection of samples conforming
463+
to a particular schema, and can be used to store `Evaluation` test-sets and
464+
retrieve `Evaluation` results.
465465
466466
Each `Dataset` is versioned and validated against its schema during creation and
467467
subsequent updates. The provided `Dataset` file must conform to the schema
468468
defined for the `dataset_type`.
469469
470-
File schema for `dataset_type` `evaluation_set` is a JSONL or CSV file where
471-
each line is one JSON object with the following required keys:
470+
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
471+
where each line is one JSON object. The following keys are required:
472472
473-
- `prompt` (required, `string`): Prompt or question
473+
- `prompt` (`string`): Prompt or question
474474
475-
- `reference` (required, `string`): Required reference or ground truth response
475+
- `reference` (`string`): Reference or ground truth response
476476
477477
Args:
478478
agent_id: Agent ID to associate with the evaluation dataset
@@ -481,7 +481,7 @@ async def create(
481481
482482
dataset_type: Type of evaluation dataset which determines its schema and validation rules.
483483
484-
file: JSONL file containing the evaluation dataset
484+
file: JSONL or CSV file containing the evaluation dataset
485485
486486
extra_headers: Send extra headers
487487
@@ -606,12 +606,12 @@ async def update(
606606
Create a new version of the dataset by appending content to the `Dataset` and
607607
validating against its schema.
608608
609-
File schema for `dataset_type` `evaluation_set` is a JSONL file where each line
610-
is one JSON object with the following required keys:
609+
File schema for `dataset_type` `evaluation_set` is a CSV file or a JSONL file
610+
where each line is one JSON object. The following keys are required:
611611
612612
- `prompt` (`string`): Prompt or question
613613
614-
- `reference` (`string`): Required reference or ground truth response
614+
- `reference` (`string`): Reference or ground truth response
615615
616616
Args:
617617
agent_id: Agent ID associated with the evaluation dataset
@@ -621,7 +621,7 @@ async def update(
621621
dataset_type: Type of evaluation dataset which determines its schema and validation rules.
622622
Must match the `dataset_type` used at dataset creation time.
623623
624-
file: JSONL file containing the entries to append to the evaluation dataset
624+
file: JSONL or CSV file containing the entries to append to the evaluation dataset
625625
626626
extra_headers: Send extra headers
627627

src/contextual/resources/agents/evaluate/evaluate.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
from __future__ import annotations
44

5-
from typing import List, Mapping, Optional, cast
5+
from typing import List, Mapping, cast
66
from typing_extensions import Literal
77

88
import httpx
@@ -68,7 +68,7 @@ def create(
6868
metrics: List[Literal["equivalence", "groundedness"]],
6969
evalset_file: FileTypes | NotGiven = NOT_GIVEN,
7070
evalset_name: str | NotGiven = NOT_GIVEN,
71-
llm_model_id: Optional[str] | NotGiven = NOT_GIVEN,
71+
llm_model_id: str | NotGiven = NOT_GIVEN,
7272
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
7373
# The extra values given here take precedence over values defined on the client or passed to this method.
7474
extra_headers: Headers | None = None,
@@ -175,7 +175,7 @@ async def create(
175175
metrics: List[Literal["equivalence", "groundedness"]],
176176
evalset_file: FileTypes | NotGiven = NOT_GIVEN,
177177
evalset_name: str | NotGiven = NOT_GIVEN,
178-
llm_model_id: Optional[str] | NotGiven = NOT_GIVEN,
178+
llm_model_id: str | NotGiven = NOT_GIVEN,
179179
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
180180
# The extra values given here take precedence over values defined on the client or passed to this method.
181181
extra_headers: Headers | None = None,

src/contextual/resources/agents/tune/tune.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -111,21 +111,21 @@ def create(
111111
JSON object represents a single training example. The four required fields are
112112
`guideline`, `prompt`, `reference`, and `knowledge`.
113113
114-
- `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
115-
reference response, as a list of string text chunks
114+
- `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
115+
answer. `knowledge` is a list of retrieved text chunks.
116116
117-
- `reference` field should be the model's response to the prompt.
117+
- `reference` (`str`): The gold-standard answer to the prompt.
118118
119-
- `guideline` (`str): Guidelines or criteria for model output
119+
- `guideline` (`str`): Guidelines for model output.
120120
121-
- `prompt` (required, `string`): Prompt or question model should respond to.
121+
- `prompt` (`str`): Question for the model to respond to.
122122
123123
Example:
124124
125125
```json
126126
[
127127
{
128-
"guideline": "The response should be accurate.",
128+
"guideline": "The answer should be accurate.",
129129
"prompt": "What was last quarter's revenue?",
130130
"reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
131131
"knowledge": [
@@ -244,21 +244,21 @@ async def create(
244244
JSON object represents a single training example. The four required fields are
245245
`guideline`, `prompt`, `reference`, and `knowledge`.
246246
247-
- `knowledge` (`list[str]`): Knowledge or retrievals used to generate the
248-
reference response, as a list of string text chunks
247+
- `knowledge` (`list[str]`): Retrieved knowledge used to generate the reference
248+
answer. `knowledge` is a list of retrieved text chunks.
249249
250-
- `reference` field should be the model's response to the prompt.
250+
- `reference` (`str`): The gold-standard answer to the prompt.
251251
252-
- `guideline` (`str): Guidelines or criteria for model output
252+
- `guideline` (`str`): Guidelines for model output.
253253
254-
- `prompt` (required, `string`): Prompt or question model should respond to.
254+
- `prompt` (`str`): Question for the model to respond to.
255255
256256
Example:
257257
258258
```json
259259
[
260260
{
261-
"guideline": "The response should be accurate.",
261+
"guideline": "The answer should be accurate.",
262262
"prompt": "What was last quarter's revenue?",
263263
"reference": "According to recent reports, the Q3 revenue was $1.2 million, a 0.1 million increase from Q2.",
264264
"knowledge": [

0 commit comments

Comments
 (0)