Skip to content

Commit b8bcee3

Browse files
stainless-app[bot]stainless-bot
authored andcommitted
feat(api): OpenAPI spec update via Stainless API (#261)
1 parent 006edb5 commit b8bcee3

File tree

19 files changed

+116
-109
lines changed

19 files changed

+116
-109
lines changed

.github/workflows/ci.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ on:
66
pull_request:
77
branches:
88
- main
9+
- next
910

1011
jobs:
1112
lint:

README.md

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ It is generated with [Stainless](https://www.stainlessapi.com/).
1010

1111
## Documentation
1212

13-
The REST API documentation can be found [on openlayer.com](https://openlayer.com/docs/api-reference/rest/overview). The full API of this library can be found in [api.md](api.md).
13+
The REST API documentation can be found on [openlayer.com](https://openlayer.com/docs/api-reference/rest/overview). The full API of this library can be found in [api.md](api.md).
1414

1515
## Installation
1616

@@ -33,7 +33,7 @@ client = Openlayer(
3333
)
3434

3535
data_stream_response = client.inference_pipelines.data.stream(
36-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
36+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
3737
config={
3838
"input_variable_names": ["user_query"],
3939
"output_column_name": "output",
@@ -76,7 +76,7 @@ client = AsyncOpenlayer(
7676

7777
async def main() -> None:
7878
data_stream_response = await client.inference_pipelines.data.stream(
79-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
79+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
8080
config={
8181
"input_variable_names": ["user_query"],
8282
"output_column_name": "output",
@@ -128,7 +128,7 @@ client = Openlayer()
128128

129129
try:
130130
client.inference_pipelines.data.stream(
131-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
131+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
132132
config={
133133
"input_variable_names": ["user_query"],
134134
"output_column_name": "output",
@@ -189,7 +189,7 @@ client = Openlayer(
189189

190190
# Or, configure per-request:
191191
client.with_options(max_retries=5).inference_pipelines.data.stream(
192-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
192+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
193193
config={
194194
"input_variable_names": ["user_query"],
195195
"output_column_name": "output",
@@ -230,7 +230,7 @@ client = Openlayer(
230230

231231
# Override per-request:
232232
client.with_options(timeout=5.0).inference_pipelines.data.stream(
233-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
233+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
234234
config={
235235
"input_variable_names": ["user_query"],
236236
"output_column_name": "output",
@@ -287,7 +287,7 @@ from openlayer import Openlayer
287287

288288
client = Openlayer()
289289
response = client.inference_pipelines.data.with_raw_response.stream(
290-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
290+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
291291
config={
292292
"input_variable_names": ["user_query"],
293293
"output_column_name": "output",
@@ -321,7 +321,7 @@ To stream the response body, use `.with_streaming_response` instead, which requi
321321

322322
```python
323323
with client.inference_pipelines.data.with_streaming_response.stream(
324-
"182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
324+
inference_pipeline_id="182bd5e5-6e1a-4fe4-a799-aa6d9a6ab26e",
325325
config={
326326
"input_variable_names": ["user_query"],
327327
"output_column_name": "output",

requirements-dev.lock

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ markdown-it-py==3.0.0
4949
# via rich
5050
mdurl==0.1.2
5151
# via markdown-it-py
52-
mypy==1.7.1
52+
mypy==1.10.1
5353
mypy-extensions==1.0.0
5454
# via mypy
5555
nodeenv==1.8.0

src/openlayer/_base_client.py

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -879,9 +879,9 @@ def __exit__(
879879
def _prepare_options(
880880
self,
881881
options: FinalRequestOptions, # noqa: ARG002
882-
) -> None:
882+
) -> FinalRequestOptions:
883883
"""Hook for mutating the given options"""
884-
return None
884+
return options
885885

886886
def _prepare_request(
887887
self,
@@ -955,8 +955,13 @@ def _request(
955955
stream: bool,
956956
stream_cls: type[_StreamT] | None,
957957
) -> ResponseT | _StreamT:
958+
# create a copy of the options we were given so that if the
959+
# options are mutated later & we then retry, the retries are
960+
# given the original options
961+
input_options = model_copy(options)
962+
958963
cast_to = self._maybe_override_cast_to(cast_to, options)
959-
self._prepare_options(options)
964+
options = self._prepare_options(options)
960965

961966
retries = self._remaining_retries(remaining_retries, options)
962967
request = self._build_request(options)
@@ -979,7 +984,7 @@ def _request(
979984

980985
if retries > 0:
981986
return self._retry_request(
982-
options,
987+
input_options,
983988
cast_to,
984989
retries,
985990
stream=stream,
@@ -994,7 +999,7 @@ def _request(
994999

9951000
if retries > 0:
9961001
return self._retry_request(
997-
options,
1002+
input_options,
9981003
cast_to,
9991004
retries,
10001005
stream=stream,
@@ -1022,7 +1027,7 @@ def _request(
10221027
if retries > 0 and self._should_retry(err.response):
10231028
err.response.close()
10241029
return self._retry_request(
1025-
options,
1030+
input_options,
10261031
cast_to,
10271032
retries,
10281033
err.response.headers,
@@ -1437,9 +1442,9 @@ async def __aexit__(
14371442
async def _prepare_options(
14381443
self,
14391444
options: FinalRequestOptions, # noqa: ARG002
1440-
) -> None:
1445+
) -> FinalRequestOptions:
14411446
"""Hook for mutating the given options"""
1442-
return None
1447+
return options
14431448

14441449
async def _prepare_request(
14451450
self,
@@ -1518,8 +1523,13 @@ async def _request(
15181523
# execute it earlier while we are in an async context
15191524
self._platform = await asyncify(get_platform)()
15201525

1526+
# create a copy of the options we were given so that if the
1527+
# options are mutated later & we then retry, the retries are
1528+
# given the original options
1529+
input_options = model_copy(options)
1530+
15211531
cast_to = self._maybe_override_cast_to(cast_to, options)
1522-
await self._prepare_options(options)
1532+
options = await self._prepare_options(options)
15231533

15241534
retries = self._remaining_retries(remaining_retries, options)
15251535
request = self._build_request(options)
@@ -1540,7 +1550,7 @@ async def _request(
15401550

15411551
if retries > 0:
15421552
return await self._retry_request(
1543-
options,
1553+
input_options,
15441554
cast_to,
15451555
retries,
15461556
stream=stream,
@@ -1555,7 +1565,7 @@ async def _request(
15551565

15561566
if retries > 0:
15571567
return await self._retry_request(
1558-
options,
1568+
input_options,
15591569
cast_to,
15601570
retries,
15611571
stream=stream,
@@ -1578,7 +1588,7 @@ async def _request(
15781588
if retries > 0 and self._should_retry(err.response):
15791589
await err.response.aclose()
15801590
return await self._retry_request(
1581-
options,
1591+
input_options,
15821592
cast_to,
15831593
retries,
15841594
err.response.headers,

src/openlayer/_compat.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,10 @@ def get_model_fields(model: type[pydantic.BaseModel]) -> dict[str, FieldInfo]:
118118
return model.__fields__ # type: ignore
119119

120120

121-
def model_copy(model: _ModelT) -> _ModelT:
121+
def model_copy(model: _ModelT, *, deep: bool = False) -> _ModelT:
122122
if PYDANTIC_V2:
123-
return model.model_copy()
124-
return model.copy() # type: ignore
123+
return model.model_copy(deep=deep)
124+
return model.copy(deep=deep) # type: ignore
125125

126126

127127
def model_json(model: pydantic.BaseModel, *, indent: int | None = None) -> str:

src/openlayer/_models.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -643,6 +643,14 @@ def validate_type(*, type_: type[_T], value: object) -> _T:
643643
return cast(_T, _validate_non_model_type(type_=type_, value=value))
644644

645645

646+
def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
647+
"""Add a pydantic config for the given type.
648+
649+
Note: this is a no-op on Pydantic v1.
650+
"""
651+
setattr(typ, "__pydantic_config__", config) # noqa: B010
652+
653+
646654
# our use of subclasssing here causes weirdness for type checkers,
647655
# so we just pretend that we don't subclass
648656
if TYPE_CHECKING:

src/openlayer/resources/commits/test_results.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,7 @@
1919
async_to_raw_response_wrapper,
2020
async_to_streamed_response_wrapper,
2121
)
22-
from ..._base_client import (
23-
make_request_options,
24-
)
22+
from ..._base_client import make_request_options
2523
from ...types.commits import test_result_list_params
2624
from ...types.commits.test_result_list_response import TestResultListResponse
2725

src/openlayer/resources/inference_pipelines/data.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,7 @@
1919
async_to_raw_response_wrapper,
2020
async_to_streamed_response_wrapper,
2121
)
22-
from ..._base_client import (
23-
make_request_options,
24-
)
22+
from ..._base_client import make_request_options
2523
from ...types.inference_pipelines import data_stream_params
2624
from ...types.inference_pipelines.data_stream_response import DataStreamResponse
2725

@@ -51,13 +49,13 @@ def stream(
5149
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
5250
) -> DataStreamResponse:
5351
"""
54-
Stream production data to an inference pipeline.
52+
Create an inference data point in an inference pipeline.
5553
5654
Args:
5755
config: Configuration for the data stream. Depends on your **Openlayer project task
5856
type**.
5957
60-
rows: A list of entries that represent rows of a csv file
58+
rows: A list of inference data points with inputs and outputs
6159
6260
extra_headers: Send extra headers
6361
@@ -110,13 +108,13 @@ async def stream(
110108
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
111109
) -> DataStreamResponse:
112110
"""
113-
Stream production data to an inference pipeline.
111+
Create an inference data point in an inference pipeline.
114112
115113
Args:
116114
config: Configuration for the data stream. Depends on your **Openlayer project task
117115
type**.
118116
119-
rows: A list of entries that represent rows of a csv file
117+
rows: A list of inference data points with inputs and outputs
120118
121119
extra_headers: Send extra headers
122120

src/openlayer/resources/inference_pipelines/test_results.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,7 @@
1919
async_to_raw_response_wrapper,
2020
async_to_streamed_response_wrapper,
2121
)
22-
from ..._base_client import (
23-
make_request_options,
24-
)
22+
from ..._base_client import make_request_options
2523
from ...types.inference_pipelines import test_result_list_params
2624
from ...types.inference_pipelines.test_result_list_response import TestResultListResponse
2725

src/openlayer/resources/projects/commits.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,7 @@
1717
async_to_raw_response_wrapper,
1818
async_to_streamed_response_wrapper,
1919
)
20-
from ..._base_client import (
21-
make_request_options,
22-
)
20+
from ..._base_client import make_request_options
2321
from ...types.projects import commit_list_params
2422
from ...types.projects.commit_list_response import CommitListResponse
2523

src/openlayer/resources/projects/inference_pipelines.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -19,9 +19,7 @@
1919
async_to_raw_response_wrapper,
2020
async_to_streamed_response_wrapper,
2121
)
22-
from ..._base_client import (
23-
make_request_options,
24-
)
22+
from ..._base_client import make_request_options
2523
from ...types.projects import inference_pipeline_list_params, inference_pipeline_create_params
2624
from ...types.projects.inference_pipeline_list_response import InferencePipelineListResponse
2725
from ...types.projects.inference_pipeline_create_response import InferencePipelineCreateResponse

src/openlayer/resources/projects/projects.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,9 +29,7 @@
2929
async_to_raw_response_wrapper,
3030
async_to_streamed_response_wrapper,
3131
)
32-
from ..._base_client import (
33-
make_request_options,
34-
)
32+
from ..._base_client import make_request_options
3533
from .inference_pipelines import (
3634
InferencePipelinesResource,
3735
AsyncInferencePipelinesResource,

src/openlayer/types/inference_pipelines/data_stream_params.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ class DataStreamParams(TypedDict, total=False):
2626
"""
2727

2828
rows: Required[Iterable[Dict[str, object]]]
29-
"""A list of entries that represent rows of a csv file"""
29+
"""A list of inference data points with inputs and outputs"""
3030

3131

3232
class ConfigLlmDataPrompt(TypedDict, total=False):

0 commit comments

Comments
 (0)