Skip to content

Commit

Permalink
Merge pull request #2562 from kqlio67/main
Browse files Browse the repository at this point in the history
Update providers, documentation improvements and bug fixes
  • Loading branch information
hlohaus authored Jan 12, 2025
2 parents f1bede1 + bd0add7 commit 214567f
Show file tree
Hide file tree
Showing 25 changed files with 906 additions and 285 deletions.
84 changes: 53 additions & 31 deletions docs/providers-and-models.md

Large diffs are not rendered by default.

37 changes: 3 additions & 34 deletions g4f/Provider/Airforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
api_endpoint_completions = "https://api.airforce/chat/completions"
api_endpoint_imagine2 = "https://api.airforce/imagine2"

working = True
working = False
supports_stream = True
supports_system_message = True
supports_message_history = True
Expand Down Expand Up @@ -115,29 +115,6 @@ def get_model(cls, model: str) -> str:
"""Get the actual model name from alias"""
return cls.model_aliases.get(model, model or cls.default_model)

@classmethod
async def check_api_key(cls, api_key: str) -> bool:
"""
Always returns True to allow all models.
"""
if not api_key or api_key == "null":
return True # No restrictions if no key.

headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
"Accept": "*/*",
}
try:
async with ClientSession(headers=headers) as session:
async with session.get(f"https://api.airforce/check?key={api_key}") as response:
if response.status == 200:
data = await response.json()
return data.get('info') in ['Sponsor key', 'Premium key']
return False
except Exception as e:
print(f"Error checking API key: {str(e)}")
return False

@classmethod
def _filter_content(cls, part_response: str) -> str:
"""
Expand Down Expand Up @@ -177,7 +154,6 @@ async def generate_image(
cls,
model: str,
prompt: str,
api_key: str,
size: str,
seed: int,
proxy: str = None
Expand All @@ -188,7 +164,6 @@ async def generate_image(
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}
params = {"model": model, "prompt": prompt, "size": size, "seed": seed}

Expand All @@ -210,7 +185,6 @@ async def generate_text(
temperature: float,
top_p: float,
stream: bool,
api_key: str,
proxy: str = None
) -> AsyncResult:
"""
Expand All @@ -222,7 +196,6 @@ async def generate_text(
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}",
}

final_messages = []
Expand Down Expand Up @@ -286,22 +259,18 @@ async def create_async_generator(
temperature: float = 1,
top_p: float = 1,
stream: bool = True,
api_key: str = None,
size: str = "1:1",
seed: int = None,
**kwargs
) -> AsyncResult:
if not await cls.check_api_key(api_key):
pass

model = cls.get_model(model)
if model in cls.image_models:
if prompt is None:
prompt = messages[-1]['content']
if seed is None:
seed = random.randint(0, 10000)
async for result in cls.generate_image(model, prompt, api_key, size, seed, proxy):
async for result in cls.generate_image(model, prompt, size, seed, proxy):
yield result
else:
async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, api_key, proxy):
async for result in cls.generate_text(model, messages, max_tokens, temperature, top_p, stream, proxy):
yield result
90 changes: 90 additions & 0 deletions g4f/Provider/AutonomousAI.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
from __future__ import annotations

from aiohttp import ClientSession
import base64
import json

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

class AutonomousAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.autonomous.ai/anon/"
api_endpoints = {
"llama": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
"qwen_coder": "https://chatgpt.autonomous.ai/api/v1/ai/chat",
"hermes": "https://chatgpt.autonomous.ai/api/v1/ai/chat-hermes",
"vision": "https://chatgpt.autonomous.ai/api/v1/ai/chat-vision",
"summary": "https://chatgpt.autonomous.ai/api/v1/ai/summary"
}

working = True
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = "llama"
models = [default_model, "qwen_coder", "hermes", "vision", "summary"]

model_aliases = {
"llama-3.3-70b": default_model,
"qwen-2.5-coder-32b": "qwen_coder",
"hermes-3": "hermes",
"llama-3.2-90b": "vision",
"llama-3.3-70b": "summary"
}

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
stream: bool = False,
**kwargs
) -> AsyncResult:
api_endpoint = cls.api_endpoints[model]
headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'country-code': 'US',
'origin': 'https://www.autonomous.ai',
'referer': 'https://www.autonomous.ai/',
'time-zone': 'America/New_York',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}

async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)

# Encode message
message = [{"role": "user", "content": prompt}]
message_json = json.dumps(message)
encoded_message = base64.b64encode(message_json.encode('utf-8')).decode('utf-8')

data = {
"messages": encoded_message,
"threadId": model,
"stream": stream,
"aiAgent": model
}

async with session.post(api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk:
chunk_str = chunk.decode()
if chunk_str == "data: [DONE]":
continue

try:
# Remove "data: " prefix and parse JSON
chunk_data = json.loads(chunk_str.replace("data: ", ""))
if "choices" in chunk_data and chunk_data["choices"]:
delta = chunk_data["choices"][0].get("delta", {})
if "content" in delta:
yield delta["content"]
except json.JSONDecodeError:
continue
2 changes: 1 addition & 1 deletion g4f/Provider/BlackboxCreateAgent.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ async def fetch_validated(cls) -> Optional[str]:
return cached_value

js_file_pattern = r'static/chunks/\d{4}-[a-fA-F0-9]+\.js'
v_pattern = r'j\s*=\s*[\'"]([0-9a-fA-F-]{36})[\'"]'
v_pattern = r'L\s*=\s*[\'"]([0-9a-fA-F-]{36})[\'"]'

def is_valid_context(text: str) -> bool:
"""Checks if the context is valid."""
Expand Down
71 changes: 71 additions & 0 deletions g4f/Provider/CablyAI.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
from __future__ import annotations

from aiohttp import ClientSession
import json

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt


class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://cablyai.com"
api_endpoint = "https://cablyai.com/v1/chat/completions"

working = True
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = "Cably-80B"
models = [default_model]

model_aliases = {"cably-80b": default_model}

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
headers = {
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.9',
'Content-Type': 'application/json',
'Origin': 'https://cablyai.com',
'Referer': 'https://cablyai.com/chat',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36'
}

async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": messages,
"stream": stream
}

async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
buffer = ""
async for chunk in response.content:
if chunk:
buffer += chunk.decode()
while "\n\n" in buffer:
chunk_data, buffer = buffer.split("\n\n", 1)
if chunk_data.startswith("data: "):
try:
json_data = json.loads(chunk_data[6:])
if "choices" in json_data and json_data["choices"]:
content = json_data["choices"][0]["delta"].get("content", "")
if content:
yield content
except json.JSONDecodeError:
# Skip invalid JSON
pass
elif chunk_data.strip() == "data: [DONE]":
return
92 changes: 92 additions & 0 deletions g4f/Provider/ChatGLM.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
from __future__ import annotations

import uuid
import json

from aiohttp import ClientSession

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt

class ChatGLM(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chatglm.cn"
api_endpoint = "https://chatglm.cn/chatglm/mainchat-api/guest/stream"

working = True
supports_stream = True
supports_system_message = False
supports_message_history = True

default_model = "all-tools-230b"
models = [default_model]
model_aliases = {"glm-4": default_model}

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
**kwargs
) -> AsyncResult:
device_id = str(uuid.uuid4()).replace('-', '')

headers = {
'Accept-Language': 'en-US,en;q=0.9',
'App-Name': 'chatglm',
'Authorization': 'undefined',
'Content-Type': 'application/json',
'Origin': 'https://chatglm.cn',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
'X-App-Platform': 'pc',
'X-App-Version': '0.0.1',
'X-Device-Id': device_id,
'Accept': 'text/event-stream'
}

async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"assistant_id": "65940acff94777010aa6b796",
"conversation_id": "",
"meta_data": {
"if_plus_model": False,
"is_test": False,
"input_question_type": "xxxx",
"channel": "",
"draft_id": "",
"quote_log_id": "",
"platform": "pc"
},
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": prompt
}
]
}
]
}

async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content:
if chunk:
decoded_chunk = chunk.decode('utf-8')
if decoded_chunk.startswith('data: '):
try:
json_data = json.loads(decoded_chunk[6:])
parts = json_data.get('parts', [])
if parts:
content = parts[0].get('content', [])
if content:
text = content[0].get('text', '')
if text:
yield text
except json.JSONDecodeError:
pass
Loading

0 comments on commit 214567f

Please sign in to comment.