Skip to content

Commit 0ddf352

Browse files
feat(api): api update
1 parent d736c1f commit 0ddf352

File tree

4 files changed

+18
-18
lines changed

4 files changed

+18
-18
lines changed

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 34
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/lemma%2Flemma-3e76e00e6aca1ff47c9f6b1bcfbb5eb7cc3e927a74e35b0b84149a401c6089f9.yml
3-
openapi_spec_hash: d83427b2f8c9912c0b8321948e69a867
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/lemma%2Flemma-7aff8515414cb67a674d48d77f386c8f551361da6858e0850ba477c48c76e486.yml
3+
openapi_spec_hash: c9e7de7f84e068c05a89ffced9066974
44
config_hash: 3ccc0e50f28be581a8cc4501a5758970

src/lemma/resources/playground.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def run(
4646
self,
4747
*,
4848
model: str,
49-
prompt_id: str,
49+
prompt_content: str,
5050
evaluator_id: Optional[str] | Omit = omit,
5151
input_variables: Dict[str, object] | Omit = omit,
5252
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -62,7 +62,7 @@ def run(
6262
Args:
6363
model: The OpenAI model to use for completion
6464
65-
prompt_id: The ID of the prompt to run
65+
prompt_content: The prompt content to run
6666
6767
evaluator_id: Optional ID of the evaluator to use
6868
@@ -81,7 +81,7 @@ def run(
8181
body=maybe_transform(
8282
{
8383
"model": model,
84-
"prompt_id": prompt_id,
84+
"prompt_content": prompt_content,
8585
"evaluator_id": evaluator_id,
8686
"input_variables": input_variables,
8787
},
@@ -118,7 +118,7 @@ async def run(
118118
self,
119119
*,
120120
model: str,
121-
prompt_id: str,
121+
prompt_content: str,
122122
evaluator_id: Optional[str] | Omit = omit,
123123
input_variables: Dict[str, object] | Omit = omit,
124124
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
@@ -134,7 +134,7 @@ async def run(
134134
Args:
135135
model: The OpenAI model to use for completion
136136
137-
prompt_id: The ID of the prompt to run
137+
prompt_content: The prompt content to run
138138
139139
evaluator_id: Optional ID of the evaluator to use
140140
@@ -153,7 +153,7 @@ async def run(
153153
body=await async_maybe_transform(
154154
{
155155
"model": model,
156-
"prompt_id": prompt_id,
156+
"prompt_content": prompt_content,
157157
"evaluator_id": evaluator_id,
158158
"input_variables": input_variables,
159159
},

src/lemma/types/playground_run_params.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,8 @@ class PlaygroundRunParams(TypedDict, total=False):
1212
model: Required[str]
1313
"""The OpenAI model to use for completion"""
1414

15-
prompt_id: Required[str]
16-
"""The ID of the prompt to run"""
15+
prompt_content: Required[str]
16+
"""The prompt content to run"""
1717

1818
evaluator_id: Optional[str]
1919
"""Optional ID of the evaluator to use"""

tests/api_resources/test_playground.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ class TestPlayground:
2121
def test_method_run(self, client: Lemma) -> None:
2222
playground = client.playground.run(
2323
model="model",
24-
prompt_id="prompt_id",
24+
prompt_content="prompt_content",
2525
)
2626
assert_matches_type(object, playground, path=["response"])
2727

@@ -30,7 +30,7 @@ def test_method_run(self, client: Lemma) -> None:
3030
def test_method_run_with_all_params(self, client: Lemma) -> None:
3131
playground = client.playground.run(
3232
model="model",
33-
prompt_id="prompt_id",
33+
prompt_content="prompt_content",
3434
evaluator_id="evaluator_id",
3535
input_variables={"foo": "bar"},
3636
)
@@ -41,7 +41,7 @@ def test_method_run_with_all_params(self, client: Lemma) -> None:
4141
def test_raw_response_run(self, client: Lemma) -> None:
4242
response = client.playground.with_raw_response.run(
4343
model="model",
44-
prompt_id="prompt_id",
44+
prompt_content="prompt_content",
4545
)
4646

4747
assert response.is_closed is True
@@ -54,7 +54,7 @@ def test_raw_response_run(self, client: Lemma) -> None:
5454
def test_streaming_response_run(self, client: Lemma) -> None:
5555
with client.playground.with_streaming_response.run(
5656
model="model",
57-
prompt_id="prompt_id",
57+
prompt_content="prompt_content",
5858
) as response:
5959
assert not response.is_closed
6060
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
@@ -75,7 +75,7 @@ class TestAsyncPlayground:
7575
async def test_method_run(self, async_client: AsyncLemma) -> None:
7676
playground = await async_client.playground.run(
7777
model="model",
78-
prompt_id="prompt_id",
78+
prompt_content="prompt_content",
7979
)
8080
assert_matches_type(object, playground, path=["response"])
8181

@@ -84,7 +84,7 @@ async def test_method_run(self, async_client: AsyncLemma) -> None:
8484
async def test_method_run_with_all_params(self, async_client: AsyncLemma) -> None:
8585
playground = await async_client.playground.run(
8686
model="model",
87-
prompt_id="prompt_id",
87+
prompt_content="prompt_content",
8888
evaluator_id="evaluator_id",
8989
input_variables={"foo": "bar"},
9090
)
@@ -95,7 +95,7 @@ async def test_method_run_with_all_params(self, async_client: AsyncLemma) -> Non
9595
async def test_raw_response_run(self, async_client: AsyncLemma) -> None:
9696
response = await async_client.playground.with_raw_response.run(
9797
model="model",
98-
prompt_id="prompt_id",
98+
prompt_content="prompt_content",
9999
)
100100

101101
assert response.is_closed is True
@@ -108,7 +108,7 @@ async def test_raw_response_run(self, async_client: AsyncLemma) -> None:
108108
async def test_streaming_response_run(self, async_client: AsyncLemma) -> None:
109109
async with async_client.playground.with_streaming_response.run(
110110
model="model",
111-
prompt_id="prompt_id",
111+
prompt_content="prompt_content",
112112
) as response:
113113
assert not response.is_closed
114114
assert response.http_request.headers.get("X-Stainless-Lang") == "python"

0 commit comments

Comments
 (0)