Skip to content

feat: update qwen_model to support qwen3 #2301

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions camel/configs/qwen_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,5 @@ class QwenConfig(BaseConfig):
stop: Optional[Union[str, List]] = None
extra_body: Optional[Dict[str, Any]] = None

def __init__(self, include_usage: bool = True, **kwargs):
super().__init__(**kwargs)
# Only set stream_options when stream is True
# Otherwise, it will raise error when calling the API
if self.stream:
self.stream_options = {"include_usage": include_usage}
Comment on lines -83 to -88
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why we remove this?



QWEN_API_PARAMS = {param for param in QwenConfig.model_fields.keys()}
177 changes: 175 additions & 2 deletions camel/models/qwen_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,11 +13,19 @@
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========

import os
from typing import Any, Dict, Optional, Union
import time
from typing import Any, Dict, List, Optional, Union

from openai import AsyncStream, Stream

from camel.configs import QWEN_API_PARAMS, QwenConfig
from camel.messages import OpenAIMessage
from camel.models.openai_compatible_model import OpenAICompatibleModel
from camel.types import ModelType
from camel.types import (
ChatCompletion,
ChatCompletionChunk,
ModelType,
)
from camel.utils import (
BaseTokenCounter,
api_keys_required,
Expand Down Expand Up @@ -79,6 +87,171 @@ def __init__(
timeout=timeout,
)

def _post_handle_response(
self, response: Union[ChatCompletion, Stream[ChatCompletionChunk]]
) -> ChatCompletion:
r"""Handle reasoning content with <think> tags at the beginning."""
if not isinstance(response, Stream):
# Handle non-streaming response (existing logic)
if self.model_config_dict.get("extra_body", {}).get(
"enable_thinking", False
):
reasoning_content = response.choices[
0
].message.reasoning_content # type: ignore[attr-defined]
combined_content = (
f"<think>\n{reasoning_content}\n</think>\n"
if reasoning_content
else ""
)
response_content = response.choices[0].message.content or ""
combined_content += response_content

# Construct a new ChatCompletion with combined content
return ChatCompletion.construct(
id=response.id,
choices=[
dict(
finish_reason=response.choices[0].finish_reason,
index=response.choices[0].index,
logprobs=response.choices[0].logprobs,
message=dict(
role=response.choices[0].message.role,
content=combined_content,
),
)
],
created=response.created,
model=response.model,
object="chat.completion",
system_fingerprint=response.system_fingerprint,
usage=response.usage,
)
else:
return response # Return original if no thinking enabled

# Handle streaming response
accumulated_reasoning = ""
accumulated_content = ""
final_chunk = None
usage_data = None # Initialize usage data
role = "assistant" # Default role

for chunk in response:
final_chunk = chunk # Keep track of the last chunk for metadata
if chunk.choices:
delta = chunk.choices[0].delta
if delta.role:
role = delta.role # Update role if provided
if (
hasattr(delta, 'reasoning_content')
and delta.reasoning_content
):
accumulated_reasoning += delta.reasoning_content
if delta.content:
accumulated_content += delta.content

if hasattr(chunk, 'usage') and chunk.usage:
usage_data = chunk.usage

combined_content = (
f"<think>\n{accumulated_reasoning}\n</think>\n"
if accumulated_reasoning
else ""
) + accumulated_content

# Construct the final ChatCompletion object from accumulated
# stream data
if final_chunk:
finish_reason = "stop" # Default finish reason
logprobs = None
if final_chunk.choices:
finish_reason = (
final_chunk.choices[0].finish_reason or finish_reason
)
if hasattr(final_chunk.choices[0], 'logprobs'):
logprobs = final_chunk.choices[0].logprobs

return ChatCompletion.construct(
# Use data from the final chunk or defaults
id=final_chunk.id
if hasattr(final_chunk, 'id')
else "streamed-completion",
choices=[
dict(
finish_reason=finish_reason,
index=0,
logprobs=logprobs,
message=dict(
role=role,
content=combined_content,
),
)
],
created=final_chunk.created
if hasattr(final_chunk, 'created')
else int(time.time()),
model=final_chunk.model
if hasattr(final_chunk, 'model')
else self.model_type,
object="chat.completion",
system_fingerprint=final_chunk.system_fingerprint
if hasattr(final_chunk, 'system_fingerprint')
else None,
usage=usage_data,
)
else:
# Handle cases where the stream was empty or invalid
return ChatCompletion.construct(
id="empty-stream",
choices=[
dict(
finish_reason="error",
index=0,
message=dict(role="assistant", content=""),
)
],
created=int(time.time()),
model=self.model_type,
object="chat.completion",
usage=usage_data,
)

def _request_chat_completion(
self,
messages: List[OpenAIMessage],
tools: Optional[List[Dict[str, Any]]] = None,
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
request_config = self.model_config_dict.copy()

if tools:
request_config["tools"] = tools

return self._post_handle_response(
self._client.chat.completions.create(
messages=messages,
model=self.model_type,
**request_config,
)
)

async def _arequest_chat_completion(
self,
messages: List[OpenAIMessage],
tools: Optional[List[Dict[str, Any]]] = None,
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
request_config = self.model_config_dict.copy()

if tools:
request_config["tools"] = tools

response = await self._async_client.chat.completions.create(
messages=messages,
model=self.model_type,
**request_config,
)
return self._post_handle_response(response)

def check_model_config(self):
r"""Check whether the model configuration contains any
unexpected arguments to Qwen API.
Expand Down
12 changes: 12 additions & 0 deletions camel/types/enums.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,10 @@ class ModelType(UnifiedModelType, Enum):
QWEN_MAX = "qwen-max"
QWEN_PLUS = "qwen-plus"
QWEN_TURBO = "qwen-turbo"
QWEN_PLUS_LATEST = "qwen-plus-latest"
QWEN_PLUS_2025_04_28 = "qwen-plus-2025-04-28"
QWEN_TURBO_LATEST = "qwen-turbo-latest"
QWEN_TURBO_2025_04_28 = "qwen-turbo-2025-04-28"
QWEN_LONG = "qwen-long"
QWEN_VL_MAX = "qwen-vl-max"
QWEN_VL_PLUS = "qwen-vl-plus"
Expand Down Expand Up @@ -685,6 +689,10 @@ def is_qwen(self) -> bool:
ModelType.QWEN_QWQ_32B,
ModelType.QWEN_QVQ_72B,
ModelType.QWEN_QWQ_PLUS,
ModelType.QWEN_PLUS_LATEST,
ModelType.QWEN_PLUS_2025_04_28,
ModelType.QWEN_TURBO_LATEST,
ModelType.QWEN_TURBO_2025_04_28,
}

@property
Expand Down Expand Up @@ -1094,6 +1102,10 @@ def token_limit(self) -> int:
ModelType.QWEN_PLUS,
ModelType.QWEN_TURBO,
ModelType.QWEN_CODER_TURBO,
ModelType.QWEN_PLUS_LATEST,
ModelType.QWEN_PLUS_2025_04_28,
ModelType.QWEN_TURBO_LATEST,
ModelType.QWEN_TURBO_2025_04_28,
ModelType.TOGETHER_LLAMA_3_1_8B,
ModelType.TOGETHER_LLAMA_3_1_70B,
ModelType.TOGETHER_LLAMA_3_1_405B,
Expand Down
150 changes: 150 additions & 0 deletions examples/models/qwen_model_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,153 @@ def main():
```
===============================================================================
'''

model = ModelFactory.create(
model_platform=ModelPlatformType.QWEN,
model_type=ModelType.QWEN_PLUS_LATEST,
model_config_dict=QwenConfig(
temperature=0.2,
stream=False,
# you can set enable_thinking and steam to True to use thinking
extra_body={"enable_thinking": False, "thinking_budget": 10000},
).as_dict(),
)

# Define system message
sys_msg = "You are a helpful assistant."

# Set agent
camel_agent = ChatAgent(system_message=sys_msg, model=model)

user_msg = """give me python code to develop a trading bot"""

# Get response information
response = camel_agent.step(user_msg)
print(response.msgs[0].content)

# ruff: noqa: E501
'''
===============================================================================
Creating a **trading bot in Python** involves several components:

1. **Choosing a Broker/Exchange API**
2. **Fetching Market Data**
3. **Implementing a Trading Strategy**
4. **Placing Trades Automatically**
5. **Risk Management**

Below is a simple example of a **basic trading bot** using the **Binance** exchange and the `python-binance` library.

---

### ✅ Prerequisites

Install required packages:
```bash
pip install python-binance pandas
```

You'll also need:
- A Binance account
- API Key and Secret (from your Binance account)

---

### 📦 Basic Trading Bot Example (Moving Average Crossover Strategy)

```python
import time
import pandas as pd
from binance.client import Client
from binance.enums import *

# Replace with your own API keys
API_KEY = 'your_api_key'
API_SECRET = 'your_api_secret'

client = Client(API_KEY, API_SECRET)

# --- Helper Functions ---
def get_moving_average(symbol, interval, period=20):
"""Get the moving average for a given symbol"""
klines = client.get_klines(symbol=symbol, interval=interval)
closes = [float(entry[4]) for entry in klines]
return sum(closes[-period:]) / period

def place_order(symbol, quantity, side):
try:
order = client.create_order(
symbol=symbol,
side=side,
type=ORDER_TYPE_MARKET,
quantity=quantity
)
print(f"Order placed: {order}")
return order
except Exception as e:
print(f"Error placing order: {e}")
return None

# --- Trading Logic ---
def trading_bot():
symbol = "BTCUSDT"
qty = 0.001 # Adjust based on your balance and risk
fast_ma_period = 10
slow_ma_period = 20

print("Starting trading bot...")

while True:
try:
fast_ma = get_moving_average(symbol, KLINE_INTERVAL_1MINUTE, fast_ma_period)
slow_ma = get_moving_average(symbol, KLINE_INTERVAL_1MINUTE, slow_ma_period)

print(f"Fast MA: {fast_ma}, Slow MA: {slow_ma}")

# Simple crossover strategy
if fast_ma > slow_ma:
print("Going Long")
place_order(symbol, qty, SIDE_BUY)
elif fast_ma < slow_ma:
print("Going Short")
place_order(symbol, qty, SIDE_SELL)

# Wait before next check
time.sleep(60) # Check every minute

except KeyboardInterrupt:
print("Stopping bot...")
break
except Exception as e:
print(f"Error: {e}")
time.sleep(60)

# Run the bot
if __name__ == "__main__":
trading_bot()
```

---

### 🔐 Important Notes

- **Paper Trading First**: Test your bot with a demo or paper trading account before using real money.
- **Risk Management**: Add stop-loss, take-profit, and position sizing logic.
- **Rate Limits**: Be aware of exchange rate limits to avoid being banned.
- **Logging & Monitoring**: Add logging and alerts (email, Telegram, etc.)
- **Backtesting**: Always backtest your strategy before deploying it live.

---

### 🧠 Want More?

Let me know if you want:
- A **backtester** for strategies
- Integration with **Telegram alerts**
- Use of **TA-Lib** for technical indicators
- **Multi-exchange support**
- **GUI interface**

Would you like help customizing this bot further?
===============================================================================
'''
Loading