|
1 | 1 | from datetime import datetime
|
2 | 2 | from enum import Enum, EnumMeta
|
3 |
| -from typing import ( |
4 |
| - TYPE_CHECKING, |
5 |
| - Annotated, |
6 |
| - Any, |
7 |
| - Dict, |
8 |
| - List, |
9 |
| - Optional, |
10 |
| - Sequence, |
11 |
| - Tuple, |
12 |
| - Union, |
13 |
| -) |
| 3 | +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union |
14 | 4 | from urllib.parse import parse_qs, urlparse
|
15 | 5 | from warnings import warn
|
16 | 6 |
|
|
19 | 9 |
|
20 | 10 | try:
|
21 | 11 | # pydantic v2 import
|
22 |
| - from pydantic import UUID4, BaseModel, ConfigDict, Field, field_validator |
| 12 | + from pydantic import UUID4, BaseModel, ConfigDict, Field |
23 | 13 | from pydantic_settings import BaseSettings, SettingsConfigDict
|
24 | 14 |
|
25 | 15 | pydantic_v2 = True
|
26 | 16 | except ImportError:
|
27 | 17 | # pydantic v1 import
|
28 |
| - from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field, validator |
| 18 | + from pydantic.v1 import UUID4, BaseModel, BaseSettings, ConfigDict, Field |
29 | 19 |
|
30 | 20 | pydantic_v2 = False
|
31 | 21 |
|
@@ -482,12 +472,6 @@ class SpeechModel(str, Enum):
|
482 | 472 | nano = "nano"
|
483 | 473 | "A lightweight, lower cost model for a wide range of languages."
|
484 | 474 |
|
485 |
| - slam_1 = "slam-1" |
486 |
| - "A Speech Language Model optimized explicitly for speech-to-text tasks" |
487 |
| - |
488 |
| - universal = "universal" |
489 |
| - "The model optimized for accuracy, low latency, ease of use, and multi-language support" |
490 |
| - |
491 | 475 |
|
492 | 476 | class RawTranscriptionConfig(BaseModel):
|
493 | 477 | language_code: Optional[Union[str, LanguageCode]] = None
|
@@ -600,13 +584,6 @@ class RawTranscriptionConfig(BaseModel):
|
600 | 584 | """
|
601 | 585 | The speech model to use for the transcription.
|
602 | 586 | """
|
603 |
| - |
604 |
| - prompt: Optional[str] = None |
605 |
| - "The prompt used to generate the transcript with the Slam-1 speech model. Can't be used together with `keyterms_prompt`." |
606 |
| - |
607 |
| - keyterms_prompt: Optional[List[str]] = None |
608 |
| - "The list of key terms used to generate the transcript with the Slam-1 speech model. Can't be used together with `prompt`." |
609 |
| - |
610 | 587 | model_config = ConfigDict(extra="allow")
|
611 | 588 |
|
612 | 589 |
|
@@ -650,8 +627,6 @@ def __init__(
|
650 | 627 | speech_threshold: Optional[float] = None,
|
651 | 628 | raw_transcription_config: Optional[RawTranscriptionConfig] = None,
|
652 | 629 | speech_model: Optional[SpeechModel] = None,
|
653 |
| - prompt: Optional[str] = None, |
654 |
| - keyterms_prompt: Optional[List[str]] = None, |
655 | 630 | ) -> None:
|
656 | 631 | """
|
657 | 632 | Args:
|
@@ -740,8 +715,6 @@ def __init__(
|
740 | 715 | self.language_confidence_threshold = language_confidence_threshold
|
741 | 716 | self.speech_threshold = speech_threshold
|
742 | 717 | self.speech_model = speech_model
|
743 |
| - self.prompt = prompt |
744 |
| - self.keyterms_prompt = keyterms_prompt |
745 | 718 |
|
746 | 719 | @property
|
747 | 720 | def raw(self) -> RawTranscriptionConfig:
|
@@ -770,26 +743,6 @@ def speech_model(self, speech_model: Optional[SpeechModel]) -> None:
|
770 | 743 | "Sets the speech model to use for the transcription."
|
771 | 744 | self._raw_transcription_config.speech_model = speech_model
|
772 | 745 |
|
773 |
| - @property |
774 |
| - def prompt(self) -> Optional[str]: |
775 |
| - "The prompt to use for the transcription." |
776 |
| - return self._raw_transcription_config.prompt |
777 |
| - |
778 |
| - @prompt.setter |
779 |
| - def prompt(self, prompt: Optional[str]) -> None: |
780 |
| - "Sets the prompt to use for the transcription." |
781 |
| - self._raw_transcription_config.prompt = prompt |
782 |
| - |
783 |
| - @property |
784 |
| - def keyterms_prompt(self) -> Optional[List[str]]: |
785 |
| - "The keyterms_prompt to use for the transcription." |
786 |
| - return self._raw_transcription_config.keyterms_prompt |
787 |
| - |
788 |
| - @keyterms_prompt.setter |
789 |
| - def keyterms_prompt(self, keyterms_prompt: Optional[List[str]]) -> None: |
790 |
| - "Sets the prompt to use for the transcription." |
791 |
| - self._raw_transcription_config.keyterms_prompt = keyterms_prompt |
792 |
| - |
793 | 746 | @property
|
794 | 747 | def punctuate(self) -> Optional[bool]:
|
795 | 748 | "Returns the status of the Automatic Punctuation feature."
|
@@ -1471,19 +1424,6 @@ class Word(BaseModel):
|
1471 | 1424 | speaker: Optional[str] = None
|
1472 | 1425 | channel: Optional[str] = None
|
1473 | 1426 |
|
1474 |
| - # This is a workaround to address an issue where sentiment_analysis_results |
1475 |
| - # may return contains sentiments where `start` is null. |
1476 |
| - if pydantic_v2: |
1477 |
| - |
1478 |
| - @field_validator("start", mode="before") |
1479 |
| - def set_start_default(cls, v): |
1480 |
| - return 0 if v is None else v |
1481 |
| - else: |
1482 |
| - |
1483 |
| - @validator("start", pre=True) |
1484 |
| - def set_start_default(cls, v): |
1485 |
| - return 0 if v is None else v |
1486 |
| - |
1487 | 1427 |
|
1488 | 1428 | class UtteranceWord(Word):
|
1489 | 1429 | channel: Optional[str] = None
|
@@ -1765,12 +1705,6 @@ class BaseTranscript(BaseModel):
|
1765 | 1705 | speech_model: Optional[SpeechModel] = None
|
1766 | 1706 | "The speech model to use for the transcription."
|
1767 | 1707 |
|
1768 |
| - prompt: Optional[str] = None |
1769 |
| - "The prompt used to generate the transcript with the Slam-1 speech model. Can't be used together with `keyterms_prompt`." |
1770 |
| - |
1771 |
| - keyterms_prompt: Optional[List[str]] = None |
1772 |
| - "The list of key terms used to generate the transcript with the Slam-1 speech model. Can't be used together with `prompt`." |
1773 |
| - |
1774 | 1708 |
|
1775 | 1709 | class TranscriptRequest(BaseTranscript):
|
1776 | 1710 | """
|
@@ -1836,12 +1770,6 @@ class TranscriptResponse(BaseTranscript):
|
1836 | 1770 | speech_model: Optional[SpeechModel] = None
|
1837 | 1771 | "The speech model used for the transcription"
|
1838 | 1772 |
|
1839 |
| - prompt: Optional[str] = None |
1840 |
| - "When Slam-1 is enabled, the prompt used to generate the transcript" |
1841 |
| - |
1842 |
| - keyterms_prompt: Optional[List[str]] = None |
1843 |
| - "When Slam-1 is enabled, the list of key terms used to generate the transcript" |
1844 |
| - |
1845 | 1773 | def __init__(self, **data: Any):
|
1846 | 1774 | # cleanup the response before creating the object
|
1847 | 1775 | if not data.get("iab_categories_result") or (
|
@@ -1879,14 +1807,8 @@ class ListTranscriptParameters(BaseModel):
|
1879 | 1807 | status: Optional[TranscriptStatus] = None
|
1880 | 1808 | "Filter by transcript status"
|
1881 | 1809 |
|
1882 |
| - throttled_only: Annotated[ |
1883 |
| - Optional[bool], |
1884 |
| - Field( |
1885 |
| - deprecated="`throttled_only` is deprecated and will be removed in a future release.", |
1886 |
| - ), |
1887 |
| - ] = None |
| 1810 | + throttled_only: Optional[bool] = None |
1888 | 1811 | "Get only throttled transcripts, overrides the status filter"
|
1889 |
| - |
1890 | 1812 | model_config = ConfigDict(use_enum_values=True)
|
1891 | 1813 |
|
1892 | 1814 |
|
@@ -2047,29 +1969,19 @@ class LemurModel(str, Enum):
|
2047 | 1969 | LeMUR features different model modes that allow you to configure your request to suit your needs.
|
2048 | 1970 | """
|
2049 | 1971 |
|
2050 |
| - claude3_7_sonnet_20250219 = "anthropic/claude-3-7-sonnet-20250219" |
2051 |
| - """ |
2052 |
| - Claude 3.7 Sonnet is the most intelligent model to date, providing the highest level of intelligence and capability with toggleable extended thinking. |
2053 |
| - """ |
2054 |
| - |
2055 | 1972 | claude3_5_sonnet = "anthropic/claude-3-5-sonnet"
|
2056 | 1973 | """
|
2057 |
| - Claude 3.5 Sonnet is the previous most intelligent model to date, providing high level of intelligence and capability. |
| 1974 | + Claude 3.5 Sonnet is the most intelligent model to date, outperforming Claude 3 Opus on a wide range of evaluations, with the speed and cost of Claude 3 Sonnet. |
2058 | 1975 | """
|
2059 | 1976 |
|
2060 | 1977 | claude3_opus = "anthropic/claude-3-opus"
|
2061 | 1978 | """
|
2062 | 1979 | Claude 3 Opus is good at handling complex analysis, longer tasks with many steps, and higher-order math and coding tasks.
|
2063 | 1980 | """
|
2064 | 1981 |
|
2065 |
| - claude3_5_haiku_20241022 = "anthropic/claude-3-5-haiku-20241022" |
2066 |
| - """ |
2067 |
| - Claude 3.5 Haiku is the fastest model, providing intelligence at blazing speeds. |
2068 |
| - """ |
2069 |
| - |
2070 | 1982 | claude3_haiku = "anthropic/claude-3-haiku"
|
2071 | 1983 | """
|
2072 |
| - Claude 3 Haiku is the fastest and most compact model for near-instant responsiveness. |
| 1984 | + Claude 3 Haiku is the fastest model that can execute lightweight actions. |
2073 | 1985 | """
|
2074 | 1986 |
|
2075 | 1987 | claude3_sonnet = "anthropic/claude-3-sonnet"
|
|
0 commit comments