Skip to content

Commit b205e14

Browse files
stainless-app[bot]stainless-bot
authored andcommitted
feat(api): update via SDK Studio (#252)
1 parent 89330f7 commit b205e14

File tree

12 files changed

+738
-7
lines changed

12 files changed

+738
-7
lines changed

.stats.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
configured_endpoints: 6
1+
configured_endpoints: 8

api.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,11 +3,12 @@
33
Types:
44

55
```python
6-
from openlayer.types import ProjectListResponse
6+
from openlayer.types import ProjectCreateResponse, ProjectListResponse
77
```
88

99
Methods:
1010

11+
- <code title="post /projects">client.projects.<a href="./src/openlayer/resources/projects/projects.py">create</a>(\*\*<a href="src/openlayer/types/project_create_params.py">params</a>) -> <a href="./src/openlayer/types/project_create_response.py">ProjectCreateResponse</a></code>
1112
- <code title="get /projects">client.projects.<a href="./src/openlayer/resources/projects/projects.py">list</a>(\*\*<a href="src/openlayer/types/project_list_params.py">params</a>) -> <a href="./src/openlayer/types/project_list_response.py">ProjectListResponse</a></code>
1213

1314
## Commits
@@ -27,11 +28,12 @@ Methods:
2728
Types:
2829

2930
```python
30-
from openlayer.types.projects import InferencePipelineListResponse
31+
from openlayer.types.projects import InferencePipelineCreateResponse, InferencePipelineListResponse
3132
```
3233

3334
Methods:
3435

36+
- <code title="post /projects/{id}/inference-pipelines">client.projects.inference_pipelines.<a href="./src/openlayer/resources/projects/inference_pipelines.py">create</a>(id, \*\*<a href="src/openlayer/types/projects/inference_pipeline_create_params.py">params</a>) -> <a href="./src/openlayer/types/projects/inference_pipeline_create_response.py">InferencePipelineCreateResponse</a></code>
3537
- <code title="get /projects/{id}/inference-pipelines">client.projects.inference_pipelines.<a href="./src/openlayer/resources/projects/inference_pipelines.py">list</a>(id, \*\*<a href="src/openlayer/types/projects/inference_pipeline_list_params.py">params</a>) -> <a href="./src/openlayer/types/projects/inference_pipeline_list_response.py">InferencePipelineListResponse</a></code>
3638

3739
# Commits

src/openlayer/resources/projects/inference_pipelines.py

Lines changed: 125 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,9 @@
22

33
from __future__ import annotations
44

5+
from typing import Optional
6+
from typing_extensions import Literal
7+
58
import httpx
69

710
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
@@ -20,8 +23,9 @@
2023
from ..._base_client import (
2124
make_request_options,
2225
)
23-
from ...types.projects import inference_pipeline_list_params
26+
from ...types.projects import inference_pipeline_list_params, inference_pipeline_create_params
2427
from ...types.projects.inference_pipeline_list_response import InferencePipelineListResponse
28+
from ...types.projects.inference_pipeline_create_response import InferencePipelineCreateResponse
2529

2630
__all__ = ["InferencePipelinesResource", "AsyncInferencePipelinesResource"]
2731

@@ -35,6 +39,60 @@ def with_raw_response(self) -> InferencePipelinesResourceWithRawResponse:
3539
def with_streaming_response(self) -> InferencePipelinesResourceWithStreamingResponse:
3640
return InferencePipelinesResourceWithStreamingResponse(self)
3741

42+
def create(
43+
self,
44+
id: str,
45+
*,
46+
description: Optional[str],
47+
name: str,
48+
reference_dataset_uri: Optional[str] | NotGiven = NOT_GIVEN,
49+
storage_type: Literal["local", "s3", "gcs", "azure"] | NotGiven = NOT_GIVEN,
50+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
51+
# The extra values given here take precedence over values defined on the client or passed to this method.
52+
extra_headers: Headers | None = None,
53+
extra_query: Query | None = None,
54+
extra_body: Body | None = None,
55+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
56+
) -> InferencePipelineCreateResponse:
57+
"""
58+
Create an inference pipeline under a project.
59+
60+
Args:
61+
description: The inference pipeline description.
62+
63+
name: The inference pipeline name.
64+
65+
reference_dataset_uri: The reference dataset URI.
66+
67+
storage_type: The storage type.
68+
69+
extra_headers: Send extra headers
70+
71+
extra_query: Add additional query parameters to the request
72+
73+
extra_body: Add additional JSON properties to the request
74+
75+
timeout: Override the client-level default timeout for this request, in seconds
76+
"""
77+
if not id:
78+
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
79+
return self._post(
80+
f"/projects/{id}/inference-pipelines",
81+
body=maybe_transform(
82+
{
83+
"description": description,
84+
"name": name,
85+
"reference_dataset_uri": reference_dataset_uri,
86+
"storage_type": storage_type,
87+
},
88+
inference_pipeline_create_params.InferencePipelineCreateParams,
89+
),
90+
options=make_request_options(
91+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
92+
),
93+
cast_to=InferencePipelineCreateResponse,
94+
)
95+
3896
def list(
3997
self,
4098
id: str,
@@ -98,6 +156,60 @@ def with_raw_response(self) -> AsyncInferencePipelinesResourceWithRawResponse:
98156
def with_streaming_response(self) -> AsyncInferencePipelinesResourceWithStreamingResponse:
99157
return AsyncInferencePipelinesResourceWithStreamingResponse(self)
100158

159+
async def create(
160+
self,
161+
id: str,
162+
*,
163+
description: Optional[str],
164+
name: str,
165+
reference_dataset_uri: Optional[str] | NotGiven = NOT_GIVEN,
166+
storage_type: Literal["local", "s3", "gcs", "azure"] | NotGiven = NOT_GIVEN,
167+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
168+
# The extra values given here take precedence over values defined on the client or passed to this method.
169+
extra_headers: Headers | None = None,
170+
extra_query: Query | None = None,
171+
extra_body: Body | None = None,
172+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
173+
) -> InferencePipelineCreateResponse:
174+
"""
175+
Create an inference pipeline under a project.
176+
177+
Args:
178+
description: The inference pipeline description.
179+
180+
name: The inference pipeline name.
181+
182+
reference_dataset_uri: The reference dataset URI.
183+
184+
storage_type: The storage type.
185+
186+
extra_headers: Send extra headers
187+
188+
extra_query: Add additional query parameters to the request
189+
190+
extra_body: Add additional JSON properties to the request
191+
192+
timeout: Override the client-level default timeout for this request, in seconds
193+
"""
194+
if not id:
195+
raise ValueError(f"Expected a non-empty value for `id` but received {id!r}")
196+
return await self._post(
197+
f"/projects/{id}/inference-pipelines",
198+
body=await async_maybe_transform(
199+
{
200+
"description": description,
201+
"name": name,
202+
"reference_dataset_uri": reference_dataset_uri,
203+
"storage_type": storage_type,
204+
},
205+
inference_pipeline_create_params.InferencePipelineCreateParams,
206+
),
207+
options=make_request_options(
208+
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
209+
),
210+
cast_to=InferencePipelineCreateResponse,
211+
)
212+
101213
async def list(
102214
self,
103215
id: str,
@@ -156,6 +268,9 @@ class InferencePipelinesResourceWithRawResponse:
156268
def __init__(self, inference_pipelines: InferencePipelinesResource) -> None:
157269
self._inference_pipelines = inference_pipelines
158270

271+
self.create = to_raw_response_wrapper(
272+
inference_pipelines.create,
273+
)
159274
self.list = to_raw_response_wrapper(
160275
inference_pipelines.list,
161276
)
@@ -165,6 +280,9 @@ class AsyncInferencePipelinesResourceWithRawResponse:
165280
def __init__(self, inference_pipelines: AsyncInferencePipelinesResource) -> None:
166281
self._inference_pipelines = inference_pipelines
167282

283+
self.create = async_to_raw_response_wrapper(
284+
inference_pipelines.create,
285+
)
168286
self.list = async_to_raw_response_wrapper(
169287
inference_pipelines.list,
170288
)
@@ -174,6 +292,9 @@ class InferencePipelinesResourceWithStreamingResponse:
174292
def __init__(self, inference_pipelines: InferencePipelinesResource) -> None:
175293
self._inference_pipelines = inference_pipelines
176294

295+
self.create = to_streamed_response_wrapper(
296+
inference_pipelines.create,
297+
)
177298
self.list = to_streamed_response_wrapper(
178299
inference_pipelines.list,
179300
)
@@ -183,6 +304,9 @@ class AsyncInferencePipelinesResourceWithStreamingResponse:
183304
def __init__(self, inference_pipelines: AsyncInferencePipelinesResource) -> None:
184305
self._inference_pipelines = inference_pipelines
185306

307+
self.create = async_to_streamed_response_wrapper(
308+
inference_pipelines.create,
309+
)
186310
self.list = async_to_streamed_response_wrapper(
187311
inference_pipelines.list,
188312
)

0 commit comments

Comments
 (0)