Skip to content

Commit de393f8

Browse files
committed
fix(spec, sdk): remove references of Claude 1 and Basic models (#7201)
GitOrigin-RevId: ad669bfbc9b07a31d10cde02894e2b65010e4403
1 parent 8860c92 commit de393f8

File tree

4 files changed

+8
-125
lines changed

4 files changed

+8
-125
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ jobs:
1919
- "3.11"
2020
- "3.10"
2121
- "3.9"
22+
- "3.8"
2223
os:
2324
- ubuntu-22.04
2425
steps:

assemblyai/__version__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
__version__ = "0.40.2"
1+
__version__ = "0.36.0"

assemblyai/types.py

Lines changed: 6 additions & 94 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,6 @@
11
from datetime import datetime
22
from enum import Enum, EnumMeta
3-
from typing import (
4-
TYPE_CHECKING,
5-
Annotated,
6-
Any,
7-
Dict,
8-
List,
9-
Optional,
10-
Sequence,
11-
Tuple,
12-
Union,
13-
)
3+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
144
from urllib.parse import parse_qs, urlparse
155
from warnings import warn
166

@@ -19,13 +9,13 @@
199

2010
try:
2111
# pydantic v2 import
22-
from pydantic import UUID4, BaseModel, ConfigDict, Field, field_validator
12+
from pydantic import UUID4, BaseModel, ConfigDict, Field
2313
from pydantic_settings import BaseSettings, SettingsConfigDict
2414

2515
pydantic_v2 = True
2616
except ImportError:
2717
# pydantic v1 import
28-
from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field, validator
18+
from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field
2919

3020
pydantic_v2 = False
3121

@@ -482,12 +472,6 @@ class SpeechModel(str, Enum):
482472
nano = "nano"
483473
"A lightweight, lower cost model for a wide range of languages."
484474

485-
slam_1 = "slam-1"
486-
"A Speech Language Model optimized explicitly for speech-to-text tasks"
487-
488-
universal = "universal"
489-
"The model optimized for accuracy, low latency, ease of use, and multi-language support"
490-
491475

492476
class RawTranscriptionConfig(BaseModel):
493477
language_code: Optional[Union[str, LanguageCode]] = None
@@ -600,13 +584,6 @@ class RawTranscriptionConfig(BaseModel):
600584
"""
601585
The speech model to use for the transcription.
602586
"""
603-
604-
prompt: Optional[str] = None
605-
"The prompt used to generate the transcript with the Slam-1 speech model. Can't be used together with `keyterms_prompt`."
606-
607-
keyterms_prompt: Optional[List[str]] = None
608-
"The list of key terms used to generate the transcript with the Slam-1 speech model. Can't be used together with `prompt`."
609-
610587
model_config = ConfigDict(extra="allow")
611588

612589

@@ -650,8 +627,6 @@ def __init__(
650627
speech_threshold: Optional[float] = None,
651628
raw_transcription_config: Optional[RawTranscriptionConfig] = None,
652629
speech_model: Optional[SpeechModel] = None,
653-
prompt: Optional[str] = None,
654-
keyterms_prompt: Optional[List[str]] = None,
655630
) -> None:
656631
"""
657632
Args:
@@ -740,8 +715,6 @@ def __init__(
740715
self.language_confidence_threshold = language_confidence_threshold
741716
self.speech_threshold = speech_threshold
742717
self.speech_model = speech_model
743-
self.prompt = prompt
744-
self.keyterms_prompt = keyterms_prompt
745718

746719
@property
747720
def raw(self) -> RawTranscriptionConfig:
@@ -770,26 +743,6 @@ def speech_model(self, speech_model: Optional[SpeechModel]) -> None:
770743
"Sets the speech model to use for the transcription."
771744
self._raw_transcription_config.speech_model = speech_model
772745

773-
@property
774-
def prompt(self) -> Optional[str]:
775-
"The prompt to use for the transcription."
776-
return self._raw_transcription_config.prompt
777-
778-
@prompt.setter
779-
def prompt(self, prompt: Optional[str]) -> None:
780-
"Sets the prompt to use for the transcription."
781-
self._raw_transcription_config.prompt = prompt
782-
783-
@property
784-
def keyterms_prompt(self) -> Optional[List[str]]:
785-
"The keyterms_prompt to use for the transcription."
786-
return self._raw_transcription_config.keyterms_prompt
787-
788-
@keyterms_prompt.setter
789-
def keyterms_prompt(self, keyterms_prompt: Optional[List[str]]) -> None:
790-
"Sets the prompt to use for the transcription."
791-
self._raw_transcription_config.keyterms_prompt = keyterms_prompt
792-
793746
@property
794747
def punctuate(self) -> Optional[bool]:
795748
"Returns the status of the Automatic Punctuation feature."
@@ -1471,19 +1424,6 @@ class Word(BaseModel):
14711424
speaker: Optional[str] = None
14721425
channel: Optional[str] = None
14731426

1474-
# This is a workaround to address an issue where sentiment_analysis_results
1475-
# may return contains sentiments where `start` is null.
1476-
if pydantic_v2:
1477-
1478-
@field_validator("start", mode="before")
1479-
def set_start_default(cls, v):
1480-
return 0 if v is None else v
1481-
else:
1482-
1483-
@validator("start", pre=True)
1484-
def set_start_default(cls, v):
1485-
return 0 if v is None else v
1486-
14871427

14881428
class UtteranceWord(Word):
14891429
channel: Optional[str] = None
@@ -1765,12 +1705,6 @@ class BaseTranscript(BaseModel):
17651705
speech_model: Optional[SpeechModel] = None
17661706
"The speech model to use for the transcription."
17671707

1768-
prompt: Optional[str] = None
1769-
"The prompt used to generate the transcript with the Slam-1 speech model. Can't be used together with `keyterms_prompt`."
1770-
1771-
keyterms_prompt: Optional[List[str]] = None
1772-
"The list of key terms used to generate the transcript with the Slam-1 speech model. Can't be used together with `prompt`."
1773-
17741708

17751709
class TranscriptRequest(BaseTranscript):
17761710
"""
@@ -1836,12 +1770,6 @@ class TranscriptResponse(BaseTranscript):
18361770
speech_model: Optional[SpeechModel] = None
18371771
"The speech model used for the transcription"
18381772

1839-
prompt: Optional[str] = None
1840-
"When Slam-1 is enabled, the prompt used to generate the transcript"
1841-
1842-
keyterms_prompt: Optional[List[str]] = None
1843-
"When Slam-1 is enabled, the list of key terms used to generate the transcript"
1844-
18451773
def __init__(self, **data: Any):
18461774
# cleanup the response before creating the object
18471775
if not data.get("iab_categories_result") or (
@@ -1879,14 +1807,8 @@ class ListTranscriptParameters(BaseModel):
18791807
status: Optional[TranscriptStatus] = None
18801808
"Filter by transcript status"
18811809

1882-
throttled_only: Annotated[
1883-
Optional[bool],
1884-
Field(
1885-
deprecated="`throttled_only` is deprecated and will be removed in a future release.",
1886-
),
1887-
] = None
1810+
throttled_only: Optional[bool] = None
18881811
"Get only throttled transcripts, overrides the status filter"
1889-
18901812
model_config = ConfigDict(use_enum_values=True)
18911813

18921814

@@ -2047,29 +1969,19 @@ class LemurModel(str, Enum):
20471969
LeMUR features different model modes that allow you to configure your request to suit your needs.
20481970
"""
20491971

2050-
claude3_7_sonnet_20250219 = "anthropic/claude-3-7-sonnet-20250219"
2051-
"""
2052-
Claude 3.7 Sonnet is the most intelligent model to date, providing the highest level of intelligence and capability with toggleable extended thinking.
2053-
"""
2054-
20551972
claude3_5_sonnet = "anthropic/claude-3-5-sonnet"
20561973
"""
2057-
Claude 3.5 Sonnet is the previous most intelligent model to date, providing high level of intelligence and capability.
1974+
Claude 3.5 Sonnet is the most intelligent model to date, outperforming Claude 3 Opus on a wide range of evaluations, with the speed and cost of Claude 3 Sonnet.
20581975
"""
20591976

20601977
claude3_opus = "anthropic/claude-3-opus"
20611978
"""
20621979
Claude 3 Opus is good at handling complex analysis, longer tasks with many steps, and higher-order math and coding tasks.
20631980
"""
20641981

2065-
claude3_5_haiku_20241022 = "anthropic/claude-3-5-haiku-20241022"
2066-
"""
2067-
Claude 3.5 Haiku is the fastest model, providing intelligence at blazing speeds.
2068-
"""
2069-
20701982
claude3_haiku = "anthropic/claude-3-haiku"
20711983
"""
2072-
Claude 3 Haiku is the fastest and most compact model for near-instant responsiveness.
1984+
Claude 3 Haiku is the fastest model that can execute lightweight actions.
20731985
"""
20741986

20751987
claude3_sonnet = "anthropic/claude-3-sonnet"

tests/unit/test_sentiment_analysis.py

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -76,33 +76,3 @@ def test_sentiment_analysis_enabled(httpx_mock: HTTPXMock):
7676
assert (
7777
transcript_sentiment_result.speaker == response_sentiment_result["speaker"]
7878
)
79-
80-
81-
def test_sentiment_analysis_null_start(httpx_mock: HTTPXMock):
82-
"""
83-
Tests that `start` converts null values to 0.
84-
"""
85-
mock_response = {
86-
"audio_url": "https://example/audio.mp3",
87-
"status": "completed",
88-
"sentiment_analysis_results": [
89-
{
90-
"text": "hi",
91-
"start": None,
92-
"end": 100,
93-
"confidence": 0.99,
94-
"sentiment": "POSITIVE",
95-
}
96-
],
97-
}
98-
request_body, transcript = unit_test_utils.submit_mock_transcription_request(
99-
httpx_mock,
100-
mock_response=mock_response,
101-
config=aai.TranscriptionConfig(sentiment_analysis=True),
102-
)
103-
104-
for response_sentiment_result, transcript_sentiment_result in zip(
105-
mock_response["sentiment_analysis_results"],
106-
transcript.sentiment_analysis,
107-
):
108-
assert transcript_sentiment_result.start == 0

0 commit comments

Comments
 (0)