Skip to content

Gpt4freenoice #2938

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 11 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
"python.testing.unittestArgs": [
"-v",
"-s",
".",
"-p",
"*test.py"
],
"python.testing.pytestEnabled": false,
"python.testing.unittestEnabled": true
}
1 change: 1 addition & 0 deletions g4f/Provider/needs_auth/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,4 @@
from .ThebApi import ThebApi
from .WhiteRabbitNeo import WhiteRabbitNeo
from .xAI import xAI
from .zai import ZAI
157 changes: 157 additions & 0 deletions g4f/Provider/needs_auth/zai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
from __future__ import annotations
from aiohttp import ClientSession
from g4f.typing import AsyncResult, Messages
from g4f.requests.raise_for_status import raise_for_status
from g4f.Provider.base_provider import AsyncGeneratorProvider, ProviderModelMixin
from g4f.Provider.helper import format_prompt
from g4f.providers.response import FinishReason, JsonConversation

class Conversation(JsonConversation):
pass

class ZAI(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://chat.z.ai"
api_endpoint = "https://chat.z.ai/api/chat/completions"
working = True
needs_auth = True
supports_stream = True
supports_message_history = True
supports_system_message = True

# Add all available models
default_model = "main_chat"
models = ["main_chat", "zero", "deep-research"]
model_display_names = {
"main_chat": "GLM-4-32B",
"zero": "Z1-32B",
"deep-research": "Z1-Rumination"
}
model_descriptions = {
"main_chat": "Great for everyday tasks",
"zero": "Proficient in reasoning",
"deep-research": "Deep Research, expert in synthesizing insights from the web"
}
model_aliases = {
"GLM-4-32B": "main_chat",
"Z1-32B": "zero",
"Z1-Rumination": "deep-research"
}

@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model

@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
stream: bool = True,
proxy: str = None,
conversation: Conversation = None,
return_conversation: bool = False,
token: str = None,
user_id: str = "7080a6c5-5fcc-4ea4-a85f-3b3fac905cf2",
chat_id: str = "local",
request_id: str = "633dcec1-adb4-4a2e-bfc4-c4599cc564be",
**kwargs
) -> AsyncResult:
model = cls.get_model(model)
display_name = cls.model_display_names.get(model, model)
description = cls.model_descriptions.get(model, "")
headers = {
"accept": "*/*",
"accept-language": "en-US,en;q=0.9,mr;q=0.8",
**({"authorization": f"Bearer {token}"} if token else {}),
"content-type": "application/json",
"dnt": "1",
"origin": "https://chat.z.ai",
"referer": "https://chat.z.ai/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36",
}
cookies = kwargs.get("cookies") or {
"token": token
}
payload = {
"stream": True,
"model": model,
"messages": [{"role": "user", "content": messages[-1]["content"]}],
"params": {},
"tool_servers": [],
"features": {
"image_generation": False,
"code_interpreter": False,
"web_search": False,
"auto_web_search": False,
"preview_mode": False
},
"variables": {
"{{USER_NAME}}": "Guest-1745003417617",
"{{USER_LOCATION}}": "Unknown",
"{{CURRENT_DATETIME}}": "2025-04-21 00:04:12",
"{{CURRENT_DATE}}": "2025-04-21",
"{{CURRENT_TIME}}": "00:04:12",
"{{CURRENT_WEEKDAY}}": "Monday",
"{{CURRENT_TIMEZONE}}": "Asia/Calcutta",
"{{USER_LANGUAGE}}": "en-US"
},
"model_item": {
"id": model,
"name": display_name,
"owned_by": "openai",
"openai": {
"id": model,
"name": model,
"owned_by": "openai",
"openai": {"id": model},
"urlIdx": 0
},
"urlIdx": 0,
"info": {
"id": model,
"user_id": user_id,
"base_model_id": None,
"name": display_name,
"params": {"max_tokens": 4096, "top_p": 0.95, "temperature": 0.6, "top_k": 40},
"meta": {
"profile_image_url": "/static/favicon.png",
"description": description,
"capabilities": {
"vision": False,
"citations": True,
"preview_mode": False,
"web_search": True,
"language_detection": True,
"restore_n_source": True
},
"suggestion_prompts": None,
"tags": []
},
"access_control": None,
"is_active": True,
"updated_at": 1744522361,
"created_at": 1744522361
},
"actions": [],
"tags": []
},
"chat_id": chat_id,
"id": request_id
}

async with ClientSession() as session:
async with session.post(
cls.api_endpoint,
headers=headers,
cookies=cookies,
json=payload,
proxy=proxy
) as resp:
await raise_for_status(resp)
async for line in resp.content:
yield line.decode("utf-8")
8 changes: 6 additions & 2 deletions g4f/api/run.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
import g4f.api
from g4f.api import run_api

if __name__ == "__main__":
g4f.api.run_api(debug=True)
run_api(
host='0.0.0.0', # Listen on all network interfaces
port=1337, # Default port
debug=True # Enable debug mode
)
26 changes: 26 additions & 0 deletions g4f/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

from dataclasses import dataclass

from g4f.Provider.needs_auth.zai import ZAI

from .Provider import IterListProvider, ProviderType
from .Provider import (
### No Auth Required ###
Expand Down Expand Up @@ -51,6 +53,7 @@
OpenaiAccount,
OpenaiChat,
Reka,
zai,
)

@dataclass(unsafe_hash=True)
Expand Down Expand Up @@ -105,6 +108,7 @@ class VisionModel(Model):
OpenaiChat,
Jmuz,
Cloudflare,
zai,
])
)

Expand Down Expand Up @@ -707,6 +711,23 @@ class VisionModel(Model):
best_provider = IterListProvider([PollinationsAI, TypeGPT])
)

### ZAI MODELS ###
glm_4_32b = Model(
name = 'GLM-4-32B',
base_provider = 'zai',
best_provider = ZAI
)

z1_32b = Model(
name='Z1-32B',
base_provider='zai',
best_provider= ZAI
)
z1_rumination = Model(
name='Z1-Rumination',
base_provider='zai',
best_provider= ZAI
)

#############
### Image ###
Expand Down Expand Up @@ -957,6 +978,11 @@ class ModelUtils:

### Uncensored AI ###
evil.name: evil,

### ZAI MODELS ###
glm_4_32b.name: glm_4_32b,
z1_32b.name: z1_32b,
z1_rumination.name: z1_rumination,

#############
### Image ###
Expand Down
6 changes: 6 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

44 changes: 44 additions & 0 deletions test_zai_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
import asyncio
import sys
import itertools
from g4f.models import glm_4_32b, z1_32b, z1_rumination

async def spinner(msg, event):
for c in itertools.cycle(['|', '/', '-', '\\']):
if event.is_set():
break
sys.stdout.write(f'\r{msg} {c}')
sys.stdout.flush()
await asyncio.sleep(0.1)
sys.stdout.write('\r' + ' ' * (len(msg) + 2) + '\r') # Clear line

async def test_model(model, token, cookies):
check_msg = f"Checking {model.name}..."
done_event = asyncio.Event()
spinner_task = asyncio.create_task(spinner(check_msg, done_event))
try:
messages = [{"role": "user", "content": f"Hello, {model.name}!"}]
async for _ in model.best_provider.create_async_generator(
model=model.name, messages=messages, token=token, cookies=cookies
):
break # Only need to confirm we get a response
done_event.set()
await spinner_task
print(f"\033[92m✔ {model.name} checked\033[0m") # Green tick
except Exception as e:
done_event.set()
await spinner_task
print(f"\033[91m✖ {model.name} failed: {e}\033[0m") # Red cross

async def main():
token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjI5MjUwM2M5LTcwNjMtNGFmOC1hYzE2LWQ4MzA4M2U1NjA2YSJ9.6bNg5txykNraoZqQI4cD-aVw8yTQmxB68n-WXqj1bOM"
cookies = {
"token": token,
"SERVERID": "9a36118b37582db121a9d682ad6aa4f0|1745177148|1745166091",
"SERVERCORSID": "9a36118b37582db121a9d682ad6aa4f0|1745177148|1745166091"
}
for model in [glm_4_32b, z1_32b, z1_rumination]:
await test_model(model, token, cookies)

if __name__ == "__main__":
asyncio.run(main())