Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Provider Updates and Fixes #2570

Merged
merged 11 commits into from
Jan 15, 2025
4 changes: 2 additions & 2 deletions docs/client.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ response = client.chat.completions.create(
"content": "Say this is a test"
}
],
# Add any other necessary parameters
web_search = False
)

print(response.choices[0].message.content)
Expand Down Expand Up @@ -416,4 +416,4 @@ This guide provides a comprehensive overview of the G4F Client API, demonstratin


---
[Return to Home](/)
[Return to Home](/)
47 changes: 23 additions & 24 deletions docs/providers-and-models.md

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -5,19 +5,21 @@

from aiohttp import BaseConnector, ClientSession

from ...errors import RateLimitError
from ...requests import raise_for_status
from ...requests.aiohttp import get_connector
from ...typing import AsyncResult, Messages
from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..errors import RateLimitError
from ..requests import raise_for_status
from ..requests.aiohttp import get_connector
from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin


class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://aichatfree.info/"
working = False
url = "https://aichatfree.info"

working = True
supports_stream = True
supports_message_history = True
default_model = 'gemini-pro'

default_model = 'gemini-1.5-pro'

@classmethod
async def create_async_generator(
Expand All @@ -36,11 +38,6 @@ async def create_async_generator(
"Content-Type": "text/plain;charset=UTF-8",
"Referer": f"{cls.url}/",
"Origin": cls.url,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Connection": "keep-alive",
"TE": "trailers",
}
async with ClientSession(
connector=get_connector(connector, proxy), headers=headers
Expand Down
116 changes: 116 additions & 0 deletions g4f/Provider/AIUncensored.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
from __future__ import annotations

from aiohttp import ClientSession
import time
import hmac
import hashlib
import json
import random

from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from .helper import format_prompt
from ..providers.response import FinishReason

class AIUncensored(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.aiuncensored.info/ai_uncensored"
api_key = "62852b00cb9e44bca86f0ec7e7455dc6"

working = True
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = "hermes3-70b"
models = [default_model]

model_aliases = {"hermes-3": "hermes3-70b"}

@staticmethod
def calculate_signature(timestamp: str, json_dict: dict) -> str:
message = f"{timestamp}{json.dumps(json_dict)}"
secret_key = b'your-super-secret-key-replace-in-production'
signature = hmac.new(
secret_key,
message.encode('utf-8'),
hashlib.sha256
).hexdigest()
return signature

@staticmethod
def get_server_url() -> str:
servers = [
"https://llm-server-nov24-ibak.onrender.com",
"https://llm-server-nov24-qv2w.onrender.com",
"https://llm-server-nov24.onrender.com"
]
return random.choice(servers)

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = False,
proxy: str = None,
api_key: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)

timestamp = str(int(time.time()))

json_dict = {
"messages": [{"role": "user", "content": format_prompt(messages)}],
"model": model,
"stream": stream
}

signature = cls.calculate_signature(timestamp, json_dict)

headers = {
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'content-type': 'application/json',
'origin': 'https://www.aiuncensored.info',
'referer': 'https://www.aiuncensored.info/',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
'x-api-key': cls.api_key,
'x-timestamp': timestamp,
'x-signature': signature
}

url = f"{cls.get_server_url()}/api/chat"

async with ClientSession(headers=headers) as session:
async with session.post(url, json=json_dict, proxy=proxy) as response:
await raise_for_status(response)

if stream:
full_response = ""
async for line in response.content:
if line:
try:
line_text = line.decode('utf-8')
if line_text.startswith(''):
data = line_text[6:]
if data == '[DONE]':
yield FinishReason("stop")
break
try:
json_data = json.loads(data)
if 'data' in json_data:
yield json_data['data']
full_response += json_data['data']
except json.JSONDecodeError:
continue
except UnicodeDecodeError:
continue
if full_response:
yield FinishReason("length")
else:
response_json = await response.json()
if 'content' in response_json:
yield response_json['content']
yield FinishReason("length")
2 changes: 1 addition & 1 deletion g4f/Provider/Airforce.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
api_endpoint_completions = "https://api.airforce/chat/completions"
api_endpoint_imagine2 = "https://api.airforce/imagine2"

working = False
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
Expand Down
Loading
Loading