-
-
Notifications
You must be signed in to change notification settings - Fork 13.5k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #2562 from kqlio67/main
Update providers, documentation improvements and bug fixes
- Loading branch information
Showing
25 changed files
with
906 additions
and
285 deletions.
There are no files selected for viewing
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,90 @@ | ||
from __future__ import annotations | ||
|
||
from aiohttp import ClientSession | ||
import base64 | ||
import json | ||
|
||
from ..typing import AsyncResult, Messages | ||
from ..requests.raise_for_status import raise_for_status | ||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin | ||
from .helper import format_prompt | ||
|
||
class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin): | ||
url = "https://www.autonomous.ai/anon/" | ||
api_endpoints = { | ||
"llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat", | ||
"qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat", | ||
"hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes", | ||
"vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision", | ||
"summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary" | ||
} | ||
|
||
working = True | ||
supports_stream = True | ||
supports_system_message = True | ||
supports_message_history = True | ||
|
||
default_model = "llama" | ||
models = [default_model, "qwen_coder", "hermes", "vision", "summary"] | ||
|
||
model_aliases = { | ||
"llama-3.3-70b": default_model, | ||
"qwen-2.5-coder-32b": "qwen_coder", | ||
"hermes-3": "hermes", | ||
"llama-3.2-90b": "vision", | ||
"llama-3.3-70b": "summary" | ||
} | ||
|
||
@classmethod | ||
async def create_async_generator( | ||
cls, | ||
model: str, | ||
messages: Messages, | ||
proxy: str = None, | ||
stream: bool = False, | ||
**kwargs | ||
) -> AsyncResult: | ||
api_endpoint = cls.api_endpoints[model] | ||
headers = { | ||
'accept': '*/*', | ||
'accept-language': 'en-US,en;q=0.9', | ||
'content-type': 'application/json', | ||
'country-code': 'US', | ||
'origin': 'https://www.autonomous.ai', | ||
'referer': 'https://www.autonomous.ai/', | ||
'time-zone': 'America/New_York', | ||
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' | ||
} | ||
|
||
async with ClientSession(headers=headers) as session: | ||
prompt = format_prompt(messages) | ||
|
||
# Encode message | ||
message = [{"role": "user", "content": prompt}] | ||
message_json = json.dumps(message) | ||
encoded_message = base64.b64encode(message_json.encode('utf-8')).decode('utf-8') | ||
|
||
data = { | ||
"messages": encoded_message, | ||
"threadId": model, | ||
"stream": stream, | ||
"aiAgent": model | ||
} | ||
|
||
async with session.post(api_endpoint, json=data, proxy=proxy) as response: | ||
await raise_for_status(response) | ||
async for chunk in response.content: | ||
if chunk: | ||
chunk_str = chunk.decode() | ||
if chunk_str == "data: [DONE]": | ||
continue | ||
|
||
try: | ||
# Remove "data: " prefix and parse JSON | ||
chunk_data = json.loads(chunk_str.replace("data: ", "")) | ||
if "choices" in chunk_data and chunk_data["choices"]: | ||
delta = chunk_data["choices"][0].get("delta", {}) | ||
if "content" in delta: | ||
yield delta["content"] | ||
except json.JSONDecodeError: | ||
continue |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
from __future__ import annotations | ||
|
||
from aiohttp import ClientSession | ||
import json | ||
|
||
from ..typing import AsyncResult, Messages | ||
from ..requests.raise_for_status import raise_for_status | ||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin | ||
from .helper import format_prompt | ||
|
||
|
||
class CablyAI(AsyncGeneratorProvider, ProviderModelMixin): | ||
url = "https://cablyai.com" | ||
api_endpoint = "https://cablyai.com/v1/chat/completions" | ||
|
||
working = True | ||
supports_stream = True | ||
supports_system_message = True | ||
supports_message_history = True | ||
|
||
default_model = "Cably-80B" | ||
models = [default_model] | ||
|
||
model_aliases = {"cably-80b": default_model} | ||
|
||
@classmethod | ||
async def create_async_generator( | ||
cls, | ||
model: str, | ||
messages: Messages, | ||
stream: bool = False, | ||
proxy: str = None, | ||
**kwargs | ||
) -> AsyncResult: | ||
model = cls.get_model(model) | ||
headers = { | ||
'Accept': '*/*', | ||
'Accept-Language': 'en-US,en;q=0.9', | ||
'Content-Type': 'application/json', | ||
'Origin': 'https://cablyai.com', | ||
'Referer': 'https://cablyai.com/chat', | ||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36' | ||
} | ||
|
||
async with ClientSession(headers=headers) as session: | ||
data = { | ||
"model": model, | ||
"messages": messages, | ||
"stream": stream | ||
} | ||
|
||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: | ||
await raise_for_status(response) | ||
buffer = "" | ||
async for chunk in response.content: | ||
if chunk: | ||
buffer += chunk.decode() | ||
while "\n\n" in buffer: | ||
chunk_data, buffer = buffer.split("\n\n", 1) | ||
if chunk_data.startswith("data: "): | ||
try: | ||
json_data = json.loads(chunk_data[6:]) | ||
if "choices" in json_data and json_data["choices"]: | ||
content = json_data["choices"][0]["delta"].get("content", "") | ||
if content: | ||
yield content | ||
except json.JSONDecodeError: | ||
# Skip invalid JSON | ||
pass | ||
elif chunk_data.strip() == "data: [DONE]": | ||
return |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
from __future__ import annotations | ||
|
||
import uuid | ||
import json | ||
|
||
from aiohttp import ClientSession | ||
|
||
from ..typing import AsyncResult, Messages | ||
from ..requests.raise_for_status import raise_for_status | ||
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin | ||
from .helper import format_prompt | ||
|
||
class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin): | ||
url = "https://chatglm.cn" | ||
api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream" | ||
|
||
working = True | ||
supports_stream = True | ||
supports_system_message = False | ||
supports_message_history = True | ||
|
||
default_model = "all-tools-230b" | ||
models = [default_model] | ||
model_aliases = {"glm-4": default_model} | ||
|
||
@classmethod | ||
async def create_async_generator( | ||
cls, | ||
model: str, | ||
messages: Messages, | ||
proxy: str = None, | ||
**kwargs | ||
) -> AsyncResult: | ||
device_id = str(uuid.uuid4()).replace('-', '') | ||
|
||
headers = { | ||
'Accept-Language': 'en-US,en;q=0.9', | ||
'App-Name': 'chatglm', | ||
'Authorization': 'undefined', | ||
'Content-Type': 'application/json', | ||
'Origin': 'https://chatglm.cn', | ||
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36', | ||
'X-App-Platform': 'pc', | ||
'X-App-Version': '0.0.1', | ||
'X-Device-Id': device_id, | ||
'Accept': 'text/event-stream' | ||
} | ||
|
||
async with ClientSession(headers=headers) as session: | ||
prompt = format_prompt(messages) | ||
data = { | ||
"assistant_id": "65940acff94777010aa6b796", | ||
"conversation_id": "", | ||
"meta_data": { | ||
"if_plus_model": False, | ||
"is_test": False, | ||
"input_question_type": "xxxx", | ||
"channel": "", | ||
"draft_id": "", | ||
"quote_log_id": "", | ||
"platform": "pc" | ||
}, | ||
"messages": [ | ||
{ | ||
"role": "user", | ||
"content": [ | ||
{ | ||
"type": "text", | ||
"text": prompt | ||
} | ||
] | ||
} | ||
] | ||
} | ||
|
||
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response: | ||
await raise_for_status(response) | ||
async for chunk in response.content: | ||
if chunk: | ||
decoded_chunk = chunk.decode('utf-8') | ||
if decoded_chunk.startswith('data: '): | ||
try: | ||
json_data = json.loads(decoded_chunk[6:]) | ||
parts = json_data.get('parts', []) | ||
if parts: | ||
content = parts[0].get('content', []) | ||
if content: | ||
text = content[0].get('text', '') | ||
if text: | ||
yield text | ||
except json.JSONDecodeError: | ||
pass |
Oops, something went wrong.