Skip to content

Commit 3e76e8b

Browse files
authored
refactor: Removed old model names (#307)
* refactor: Removed old model names * ci: Fix action permission
1 parent 9e5fe3d commit 3e76e8b

File tree

14 files changed

+32
-32
lines changed

14 files changed

+32
-32
lines changed

.github/workflows/pr-approval.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ jobs:
1313
if: github.event.review.state == 'APPROVED'
1414
runs-on: ubuntu-latest
1515
permissions:
16-
contents: read
16+
contents: write
1717
pull-requests: write
1818
statuses: write
1919

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ messages = [
8181

8282
chat_completions = client.chat.completions.create(
8383
messages=messages,
84-
model="jamba-mini-1.6-2025-03",
84+
model="jamba-mini",
8585
)
8686
```
8787

@@ -111,7 +111,7 @@ client = AsyncAI21Client(
111111
async def main():
112112
response = await client.chat.completions.create(
113113
messages=messages,
114-
model="jamba-mini-1.6-2025-03",
114+
model="jamba-mini",
115115
)
116116

117117
print(response)
@@ -235,7 +235,7 @@ client = AsyncAI21Client()
235235
async def main():
236236
response = await client.chat.completions.create(
237237
messages=messages,
238-
model="jamba-mini-1.6-2025-03",
238+
model="jamba-mini",
239239
stream=True,
240240
)
241241
async for chunk in response:
@@ -590,7 +590,7 @@ messages = [
590590
]
591591

592592
response = client.chat.completions.create(
593-
model="jamba-mini-1.6-2025-03",
593+
model="jamba-mini",
594594
messages=messages,
595595
)
596596
```

ai21/clients/studio/resources/chat/base_chat_completions.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,9 +15,9 @@
1515

1616

1717
_MODEL_DEPRECATION_WARNING = """
18-
The 'jamba-1.5-mini' and 'jamba-1.5-large' models are deprecated and will
18+
The 'jamba-mini-1.6' and 'jamba-large-1.6' models are deprecated and will
1919
be removed in a future version.
20-
Please use jamba-mini-1.6-2025-03 or jamba-large-1.6-2025-03 instead.
20+
Please use jamba-mini or jamba-large instead.
2121
"""
2222

2323

@@ -28,7 +28,7 @@ def _check_model(self, model: Optional[str]) -> str:
2828
if not model:
2929
raise ValueError("model should be provided 'create' method call")
3030

31-
if model in ["jamba-1.5-mini", "jamba-1.5-large"]:
31+
if model in ["jamba-mini-1.6", "jamba-large-1.6"]:
3232
warnings.warn(
3333
_MODEL_DEPRECATION_WARNING,
3434
DeprecationWarning,

examples/studio/batches/batches.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
"method": "POST",
1414
"url": "/v1/chat/completions",
1515
"body": {
16-
"model": "jamba-1.5",
16+
"model": "jamba-mini",
1717
"messages": [{"role": "user", "content": "What is your favorite color?"}],
1818
},
1919
},
@@ -22,7 +22,7 @@
2222
"method": "POST",
2323
"url": "/v1/chat/completions",
2424
"body": {
25-
"model": "jamba-1.5",
25+
"model": "jamba-mini",
2626
"messages": [{"role": "user", "content": "Tell me about your hobbies."}],
2727
},
2828
},
@@ -31,7 +31,7 @@
3131
"method": "POST",
3232
"url": "/v1/chat/completions",
3333
"body": {
34-
"model": "jamba-1.5",
34+
"model": "jamba-mini",
3535
"messages": [{"role": "user", "content": "Tell me about your favorite food."}],
3636
},
3737
},

examples/studio/chat/async_chat_completions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
async def main():
1919
response = await client.chat.completions.create(
2020
messages=messages,
21-
model="jamba-mini-1.6-2025-03",
21+
model="jamba-mini",
2222
max_tokens=100,
2323
temperature=0.7,
2424
top_p=1.0,

examples/studio/chat/async_stream_chat_completions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
async def main():
1919
response = await client.chat.completions.create(
2020
messages=messages,
21-
model="jamba-mini-1.6-2025-03",
21+
model="jamba-mini",
2222
max_tokens=100,
2323
stream=True,
2424
)

examples/studio/chat/chat_completions.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
response = client.chat.completions.create(
1616
messages=messages,
17-
model="jamba-mini-1.6-2025-03",
17+
model="jamba-mini",
1818
max_tokens=100,
1919
temperature=0.7,
2020
top_p=1.0,

examples/studio/chat/chat_documents.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242

4343
response = client.chat.completions.create(
4444
messages=messages,
45-
model="jamba-mini-1.6-2025-03",
45+
model="jamba-mini",
4646
documents=documents,
4747
)
4848

examples/studio/chat/chat_function_calling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ def get_order_delivery_date(order_id: str) -> str:
4646

4747
client = AI21Client()
4848

49-
response = client.chat.completions.create(messages=messages, model="jamba-large-1.6-2025-03", tools=tools)
49+
response = client.chat.completions.create(messages=messages, model="jamba-large", tools=tools)
5050

5151
""" AI models can be error-prone, it's crucial to ensure that the tool calls align with the expectations.
5252
The below code snippet demonstrates how to handle tool calls in the response and invoke the tool function
@@ -79,5 +79,5 @@ def get_order_delivery_date(order_id: str) -> str:
7979
tool_message = ToolMessage(role="tool", tool_call_id=tool_calls[0].id, content=delivery_date)
8080
messages.append(tool_message)
8181

82-
response = client.chat.completions.create(messages=messages, model="jamba-large-1.6-2025-03", tools=tools)
82+
response = client.chat.completions.create(messages=messages, model="jamba-large", tools=tools)
8383
print(response.choices[0].message.content)

examples/studio/chat/chat_function_calling_multiple_tools.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ def get_sunset_hour(place: str, date: str) -> str:
7575

7676
client = AI21Client()
7777

78-
response = client.chat.completions.create(messages=messages, model="jamba-large-1.6-2025-03", tools=tools)
78+
response = client.chat.completions.create(messages=messages, model="jamba-large", tools=tools)
7979

8080
""" AI models can be error-prone, it's crucial to ensure that the tool calls align with the expectations.
8181
The below code snippet demonstrates how to handle tool calls in the response and invoke the tool function
@@ -123,5 +123,5 @@ def get_sunset_hour(place: str, date: str) -> str:
123123
tool_message = ToolMessage(role="tool", tool_call_id=tool_id_called, content=str(result))
124124
messages.append(tool_message)
125125

126-
response = client.chat.completions.create(messages=messages, model="jamba-large-1.6-2025-03", tools=tools)
126+
response = client.chat.completions.create(messages=messages, model="jamba-large", tools=tools)
127127
print(response.choices[0].message.content)

0 commit comments

Comments
 (0)