diff --git a/modelscope_agent/agents/role_play.py b/modelscope_agent/agents/role_play.py index 31f302a9..f9a36998 100644 --- a/modelscope_agent/agents/role_play.py +++ b/modelscope_agent/agents/role_play.py @@ -6,6 +6,7 @@ from modelscope_agent.agent_env_util import AgentEnvMixin from modelscope_agent.llm.base import BaseChatModel from modelscope_agent.tools.base import BaseTool +from modelscope_agent.utils.logger import agent_logger as logger from modelscope_agent.utils.tokenization_utils import count_tokens from modelscope_agent.utils.utils import check_and_limit_input_length @@ -260,11 +261,13 @@ def _run(self, planning_prompt = self.llm.build_raw_prompt(messages) max_turn = 10 + call_llm_count = 0 while True and max_turn > 0: # print('=====one input planning_prompt======') # print(planning_prompt) # print('=============Answer=================') max_turn -= 1 + call_llm_count += 1 if self.llm.support_function_calling(): output = self.llm.chat_with_functions( messages=messages, @@ -282,6 +285,7 @@ def _run(self, **kwargs) llm_result = '' + logger.info(f'call llm {call_llm_count} times output: {output}') for s in output: if isinstance(s, dict): llm_result = s diff --git a/modelscope_agent/llm/__init__.py b/modelscope_agent/llm/__init__.py index 2a1f6169..d63714aa 100644 --- a/modelscope_agent/llm/__init__.py +++ b/modelscope_agent/llm/__init__.py @@ -5,7 +5,7 @@ from .modelscope import ModelScopeChatGLM, ModelScopeLLM from .ollama import OllamaLLM from .openai import OpenAi -from .zhipu import GLM4, ZhipuLLM +from .zhipu import ZhipuLLM def get_chat_model(model: str, model_server: str, **kwargs) -> BaseChatModel: @@ -26,5 +26,5 @@ def get_chat_model(model: str, model_server: str, **kwargs) -> BaseChatModel: __all__ = [ 'LLM_REGISTRY', 'BaseChatModel', 'OpenAi', 'DashScopeLLM', 'QwenChatAtDS', - 'ModelScopeLLM', 'ModelScopeChatGLM', 'ZhipuLLM', 'GLM4', 'OllamaLLM' + 'ModelScopeLLM', 'ModelScopeChatGLM', 'ZhipuLLM', 'OllamaLLM' ] diff --git a/modelscope_agent/llm/base.py b/modelscope_agent/llm/base.py index 06df1383..05c14da9 100644 --- a/modelscope_agent/llm/base.py +++ b/modelscope_agent/llm/base.py @@ -211,6 +211,9 @@ def support_function_calling(self) -> bool: if response.get('function_call', None): # logger.info('Support of function calling is detected.') self._support_fn_call = True + if response.get('tool_calls', None): + # logger.info('Support of function calling is detected.') + self._support_fn_call = True except FnCallNotImplError: pass except AttributeError: diff --git a/modelscope_agent/llm/zhipu.py b/modelscope_agent/llm/zhipu.py index 643ab30b..aed5a608 100644 --- a/modelscope_agent/llm/zhipu.py +++ b/modelscope_agent/llm/zhipu.py @@ -1,6 +1,7 @@ import os from typing import Dict, Iterator, List, Optional +from modelscope_agent.utils.logger import agent_logger as logger from zhipuai import ZhipuAI from .base import BaseChatModel, register_llm @@ -31,9 +32,12 @@ class ZhipuLLM(BaseChatModel): Universal LLM model interface on zhipu """ - def __init__(self, model: str, model_server: str, **kwargs): - super().__init__(model, model_server) - self._support_fn_call = True + def __init__(self, + model: str, + model_server: str, + support_fn_call: bool = True, + **kwargs): + super().__init__(model, model_server, support_fn_call=support_fn_call) api_key = kwargs.get('api_key', os.getenv('ZHIPU_API_KEY', '')).strip() assert api_key, 'ZHIPU_API_KEY is required.' self.client = ZhipuAI(api_key=api_key) @@ -45,7 +49,8 @@ def _chat_stream(self, **kwargs) -> Iterator[str]: if not functions or not len(functions): tool_choice = 'none' - print(f'====> stream messages: {messages}') + logger.info( + f'====> stream messages: {messages}, functions: {functions}') response = self.client.chat.completions.create( model=self.model, messages=messages, @@ -62,18 +67,15 @@ def _chat_no_stream(self, **kwargs) -> str: if not functions or not len(functions): tool_choice = 'none' - print(f'====> no stream messages: {messages}') + logger.info( + f'====> no stream messages: {messages}, functions: {functions}') response = self.client.chat.completions.create( model=self.model, messages=messages, tools=functions, tool_choice=tool_choice, ) - return response.choices[0].message + message = response.choices[0].message + output = message.content if not functions else [message.model_dump()] - -@register_llm('glm-4') -class GLM4(ZhipuLLM): - """ - glm-4 from zhipu - """ + return output diff --git a/modelscope_agent/utils/retry.py b/modelscope_agent/utils/retry.py index 314b7148..ea098d61 100644 --- a/modelscope_agent/utils/retry.py +++ b/modelscope_agent/utils/retry.py @@ -1,5 +1,6 @@ import time from functools import wraps +from traceback import format_exc from modelscope_agent.utils.logger import agent_logger as logger @@ -26,9 +27,9 @@ def wrapper(*args, **kwargs): return func(*args, **kwargs) except AssertionError as e: raise AssertionError(e) - except Exception as e: + except Exception: logger.warning( - f'Attempt to run {func.__name__} {attempts + 1} failed: {e}' + f'Attempt to run {func.__name__} {attempts + 1} failed: {format_exc()}' ) attempts += 1 time.sleep(delay_seconds)