Skip to content

Commit

Permalink
Merge pull request #2686 from hlohaus/space
Browse files Browse the repository at this point in the history
Add mew G4F provider
  • Loading branch information
hlohaus authored Feb 5, 2025
2 parents 292ebe7 + be8c3f7 commit 9ba2831
Show file tree
Hide file tree
Showing 15 changed files with 260 additions and 202 deletions.
2 changes: 1 addition & 1 deletion g4f/Provider/Blackbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from ..image import to_data_uri
from ..cookies import get_cookies_dir
from .helper import format_prompt, format_image_prompt
from ..providers.response import JsonConversation, ImageResponse
from ..providers.response import JsonConversation, ImageResponse, Reasoning

class Conversation(JsonConversation):
validated_value: str = None
Expand Down
153 changes: 17 additions & 136 deletions g4f/Provider/CablyAI.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,17 @@
from __future__ import annotations

import json
from typing import AsyncGenerator
from aiohttp import ClientSession
from ..errors import ModelNotSupportedError
from .template import OpenaiTemplate

from ..typing import AsyncResult, Messages
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..requests.raise_for_status import raise_for_status
from ..providers.response import FinishReason, Reasoning


class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
class CablyAI(OpenaiTemplate):
label = "CablyAI"
url = "https://cablyai.com"
login_url = url
api_endpoint = "https://cablyai.com/v1/chat/completions"
api_base = "https://cablyai.com/v1"
api_key = "sk-your-openai-api-key"

working = True
needs_auth = False
supports_stream = True
supports_system_message = True
supports_message_history = True

default_model = 'gpt-4o-mini'
reasoning_models = ['deepseek-r1-uncensored']
Expand All @@ -36,132 +26,23 @@ class CablyAI(AsyncGeneratorProvider, ProviderModelMixin):
] + reasoning_models

model_aliases = {
"searchgpt": "searchgpt (free)",
"gpt-4o-mini": "searchgpt",
"llama-3.1-8b": "llama-3.1-8b-instruct",
"deepseek-r1": "deepseek-r1-uncensored",
}

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
api_key: str = None,
stream: bool = True,
proxy: str = None,
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
api_key = api_key or cls.api_key

headers = {
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.9",
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"Origin": cls.url,
"Referer": f"{cls.url}/chat",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36"
}
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
models = super().get_models(api_key, api_base);
return [f"{m} (free)" for m in models if m in cls.fallback_models] + models

async with ClientSession(headers=headers) as session:
data = {
"model": model,
"messages": messages,
"stream": stream
}

async with session.post(
cls.api_endpoint,
json=data,
proxy=proxy
) as response:
await raise_for_status(response)

if stream:
reasoning_buffer = []
in_reasoning = False

async for line in response.content:
if not line:
continue

line = line.decode('utf-8').strip()
print(line)

if not line.startswith("data: "):
continue

if line == "data: [DONE]":
if in_reasoning and reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
yield FinishReason("stop")
return

try:
json_data = json.loads(line[6:])
delta = json_data["choices"][0].get("delta", {})
content = delta.get("content", "")
finish_reason = json_data["choices"][0].get("finish_reason")

if finish_reason:
if in_reasoning and reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
yield FinishReason(finish_reason)
return

if model in cls.reasoning_models:
# Processing the beginning of a tag
if "<think>" in content:
pre, _, post = content.partition("<think>")
if pre:
yield pre
in_reasoning = True
content = post

# Tag end processing
if "</think>" in content:
in_reasoning = False
thought, _, post = content.partition("</think>")
if thought:
reasoning_buffer.append(thought)
if reasoning_buffer:
yield Reasoning(status="".join(reasoning_buffer).strip())
reasoning_buffer.clear()
if post:
yield post
continue

# Buffering content inside tags
if in_reasoning:
reasoning_buffer.append(content)
else:
if content:
yield content
else:
if content:
yield content

except json.JSONDecodeError:
continue
except Exception:
yield FinishReason("error")
return
else:
try:
response_data = await response.json()
message = response_data["choices"][0]["message"]
content = message["content"]

if model in cls.reasoning_models and "<think>" in content:
think_start = content.find("<think>") + 7
think_end = content.find("</think>")
if think_start > 6 and think_end > 0:
reasoning = content[think_start:think_end].strip()
yield Reasoning(status=reasoning)
content = content[think_end + 8:].strip()

yield content
yield FinishReason("stop")
except Exception:
yield FinishReason("error")
@classmethod
def get_model(cls, model: str, **kwargs) -> str:
try:
model = super().get_model(model, **kwargs)
return model.split(" (free)")[0]
except ModelNotSupportedError:
if f"f{model} (free)" in cls.models:
return model
raise
3 changes: 2 additions & 1 deletion g4f/Provider/PollinationsAI.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,8 @@ class PollinationsAI(AsyncGeneratorProvider, ProviderModelMixin):

### Image Models ###
"sdxl-turbo": "turbo",
"flux-schnell": "flux",
"flux-schnell": "flux",
"flux-dev": "flux",
}
text_models = []

Expand Down
2 changes: 1 addition & 1 deletion g4f/Provider/hf/HuggingFaceInference.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ async def create_async_generator(
if response.headers["content-type"].startswith("image/"):
base64_data = base64.b64encode(b"".join([chunk async for chunk in response.iter_content()]))
url = f"data:{response.headers['content-type']};base64,{base64_data.decode()}"
yield ImageResponse(url, prompt)
yield ImageResponse(url, inputs)
else:
yield (await response.json())[0]["generated_text"].strip()

Expand Down
88 changes: 88 additions & 0 deletions g4f/Provider/hf_space/G4F.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
from __future__ import annotations

from aiohttp import ClientSession
import time
import asyncio

from ...typing import AsyncResult, Messages
from ...providers.response import ImageResponse, Reasoning
from ...requests.raise_for_status import raise_for_status
from ..helper import format_image_prompt, get_random_string
from .Janus_Pro_7B import Janus_Pro_7B, JsonConversation, get_zerogpu_token

class G4F(Janus_Pro_7B):
label = "G4F framework"
space = "roxky/Janus-Pro-7B"
url = f"https://huggingface.co/spaces/roxky/g4f-space"
api_url = "https://roxky-janus-pro-7b.hf.space"
url_flux = "https://roxky-g4f-flux.hf.space/run/predict"
referer = f"{api_url}?__theme=light"

default_model = "flux"
model_aliases = {"flux-schnell": default_model, "flux-dev": default_model}
image_models = [Janus_Pro_7B.default_image_model, default_model, *model_aliases.keys()]
models = [Janus_Pro_7B.default_model, *image_models]

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
proxy: str = None,
prompt: str = None,
width: int = 1024,
height: int = 1024,
seed: int = None,
cookies: dict = None,
**kwargs
) -> AsyncResult:
if cls.default_model not in model:
async for chunk in super().create_async_generator(model, messages, prompt=prompt, seed=seed, cookies=cookies, **kwargs):
yield chunk
return

model = cls.get_model(model)
width = max(32, width - (width % 8))
height = max(32, height - (height % 8))
if prompt is None:
prompt = format_image_prompt(messages)
if seed is None:
seed = int(time.time())

payload = {
"data": [
prompt,
seed,
width,
height,
True,
1
],
"event_data": None,
"fn_index": 3,
"session_hash": get_random_string(),
"trigger_id": 10
}
async with ClientSession() as session:
yield Reasoning(status="Acquiring GPU Token")
zerogpu_uuid, zerogpu_token = await get_zerogpu_token(cls.space, session, JsonConversation(), cookies)
headers = {
"x-zerogpu-token": zerogpu_token,
"x-zerogpu-uuid": zerogpu_uuid,
}
async def generate():
async with session.post(cls.url_flux, json=payload, proxy=proxy, headers=headers) as response:
await raise_for_status(response)
response_data = await response.json()
image_url = response_data["data"][0]['url']
return ImageResponse(images=[image_url], alt=prompt)
background_tasks = set()
started = time.time()
task = asyncio.create_task(generate())
background_tasks.add(task)
task.add_done_callback(background_tasks.discard)
while background_tasks:
yield Reasoning(status=f"Generating {time.time() - started:.2f}s")
await asyncio.sleep(0.2)
yield await task
yield Reasoning(status=f"Finished {time.time() - started:.2f}s")
Loading

0 comments on commit 9ba2831

Please sign in to comment.