Skip to content

Commit f319e23

Browse files
ok Merge branch 'main' of github.com:modelscope/ms-agent into release/1.0
2 parents 9bd942a + 8203b4b commit f319e23

File tree

6 files changed

+73
-23
lines changed

6 files changed

+73
-23
lines changed

README.md

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,12 @@ MS-Agent is a lightweight framework designed to empower agents with autonomous e
4848

4949
## 🎉 News
5050

51+
* 🔥July 16, 2025: Release MS-Agent v1.0.0rc0, which includes the following updates:
52+
- Support for Agent chat with MCP (Model Context Protocol)
53+
- Support for Deep Research (Agentic Insight), refer to: [Report_Demo](projects/deep_research/examples/task_20250617a/report.md), [Script_Demo](projects/deep_research/run.py)
54+
- Support for [MCP-Playground](https://modelscope.cn/mcp/playground)
55+
- Add callback mechanism for Agent chat
56+
5157

5258
<details><summary>Archive</summary>
5359

@@ -79,6 +85,13 @@ MS-Agent is a lightweight framework designed to empower agents with autonomous e
7985

8086
## Installation
8187

88+
### Install from PyPI
89+
90+
```shell
91+
pip install ms-agent
92+
```
93+
94+
8295
### Install from source
8396

8497
```shell

ms_agent/agent/base.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# Copyright (c) Alibaba, Inc. and its affiliates.
22
import importlib
33
import inspect
4+
import os
45
import sys
56
from abc import abstractmethod
67
from typing import Dict, List, Optional, Union
@@ -10,6 +11,9 @@
1011
from ms_agent.llm import Message
1112
from omegaconf import DictConfig
1213

14+
DEFAULT_YAML = os.path.join(
15+
os.path.dirname(os.path.abspath(__file__)), 'agent.yaml')
16+
1317

1418
class Agent:
1519
"""
@@ -37,10 +41,13 @@ def __init__(self,
3741
env: Optional[Dict[str, str]] = None,
3842
tag: Optional[str] = None,
3943
trust_remote_code: bool = False):
40-
if config_dir_or_id is None:
44+
if config_dir_or_id is not None:
45+
self.config: DictConfig = Config.from_task(config_dir_or_id, env)
46+
elif config is not None:
4147
self.config: DictConfig = config
4248
else:
43-
self.config: DictConfig = Config.from_task(config_dir_or_id, env)
49+
self.config: DictConfig = Config.from_task(DEFAULT_YAML)
50+
4451
if tag is None:
4552
self.tag = getattr(config, 'tag', None) or self.DEFAULT_TAG
4653
else:

ms_agent/agent/llm_agent.py

Lines changed: 38 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515
from ms_agent.tools import ToolManager
1616
from ms_agent.utils import async_retry
1717
from ms_agent.utils.logger import logger
18-
from omegaconf import DictConfig
18+
from omegaconf import DictConfig, OmegaConf
1919

2020
from ..utils.utils import read_history, save_history
2121
from .base import Agent
@@ -24,9 +24,6 @@
2424
from .plan.utils import planer_mapping
2525
from .runtime import Runtime
2626

27-
DEFAULT_YAML = os.path.join(
28-
os.path.dirname(os.path.abspath(__file__)), 'agent.yaml')
29-
3027

3128
class LLMAgent(Agent):
3229
"""
@@ -51,7 +48,7 @@ class LLMAgent(Agent):
5148
DEFAULT_SYSTEM = 'You are a helpful assistant.'
5249

5350
def __init__(self,
54-
config_dir_or_id: Optional[str] = DEFAULT_YAML,
51+
config_dir_or_id: Optional[str] = None,
5552
config: Optional[DictConfig] = None,
5653
env: Optional[Dict[str, str]] = None,
5754
**kwargs):
@@ -311,8 +308,9 @@ def _log_output(content: str, tag: str):
311308
for _line in line.split('\\n'):
312309
logger.info(f'[{tag}] {_line}')
313310

314-
@async_retry(max_attempts=2)
315-
async def _step(self, messages: List[Message], tag: str) -> List[Message]:
311+
@async_retry(max_attempts=2, delay=1.0)
312+
async def _step(self, messages: List[Message],
313+
tag: str) -> List[Message]: # type: ignore
316314
"""
317315
Execute a single step in the agent's interaction loop.
318316
@@ -348,12 +346,18 @@ async def _step(self, messages: List[Message], tag: str) -> List[Message]:
348346
self.config.generation_config, 'stream', False):
349347
self._log_output('[assistant]:', tag=tag)
350348
_content = ''
349+
is_first = True
351350
for _response_message in self._handle_stream_message(
352351
messages, tools=tools):
352+
if is_first:
353+
messages.append(_response_message)
354+
is_first = False
353355
new_content = _response_message.content[len(_content):]
354356
sys.stdout.write(new_content)
355357
sys.stdout.flush()
356358
_content = _response_message.content
359+
messages[-1] = _response_message
360+
yield messages
357361
sys.stdout.write('\n')
358362
else:
359363
_response_message = self.llm.generate(messages, tools=tools)
@@ -384,7 +388,7 @@ async def _step(self, messages: List[Message], tag: str) -> List[Message]:
384388
f'[usage] prompt_tokens: {_response_message.prompt_tokens}, '
385389
f'completion_tokens: {_response_message.completion_tokens}',
386390
tag=tag)
387-
return messages
391+
yield messages
388392

389393
def _prepare_llm(self):
390394
"""Initialize the LLM model from the configuration."""
@@ -443,13 +447,8 @@ def _save_history(self, messages: List[Message], **kwargs):
443447
config=config,
444448
messages=messages)
445449

446-
async def run(self, messages: Union[List[Message], str],
447-
**kwargs) -> List[Message]:
448-
"""
449-
Main method to execute the agent.
450-
451-
Runs the agent loop, which includes generating responses,
452-
calling tools, and managing memory and planning.
450+
async def _run(self, messages: Union[List[Message], str], **kwargs):
451+
"""Run the agent, mainly contains a llm calling and tool calling loop.
453452
454453
Args:
455454
messages (Union[List[Message], str]): Input data for the agent. Can be a raw string prompt,
@@ -486,7 +485,9 @@ async def run(self, messages: Union[List[Message], str],
486485
self._log_output('[' + message.role + ']:', tag=self.tag)
487486
self._log_output(message.content, tag=self.tag)
488487
while not self.runtime.should_stop:
489-
messages = await self._step(messages, self.tag)
488+
yield_step = self._step(messages, self.tag)
489+
async for messages in yield_step:
490+
yield messages
490491
self.runtime.round += 1
491492
# +1 means the next round the assistant may give a conclusion
492493
if self.runtime.round >= self.max_chat_round + 1:
@@ -498,15 +499,35 @@ async def run(self, messages: Union[List[Message], str],
498499
f'Task {messages[1].content} failed, max round({self.max_chat_round}) exceeded.'
499500
))
500501
self.runtime.should_stop = True
502+
yield messages
501503
# save history
502504
self._save_history(messages, **kwargs)
503505

504506
await self._loop_callback('on_task_end', messages)
505507
await self._cleanup_tools()
506-
return messages
507508
except Exception as e:
508509
if hasattr(self.config, 'help'):
509510
logger.error(
510511
f'[{self.tag}] Runtime error, please follow the instructions:\n\n {self.config.help}'
511512
)
512513
raise e
514+
515+
async def run(self, messages: Union[List[Message], str],
516+
**kwargs) -> List[Message]:
517+
stream = kwargs.get('stream', False)
518+
if stream:
519+
OmegaConf.update(
520+
self.config, 'generation_config.stream', True, merge=True)
521+
522+
if stream:
523+
524+
async def stream_generator():
525+
async for chunk in self._run(messages=messages, **kwargs):
526+
yield chunk
527+
528+
return stream_generator()
529+
else:
530+
res = None
531+
async for chunk in self._run(messages=messages, **kwargs):
532+
res = chunk
533+
return res

ms_agent/llm/openai_llm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ def format_tools(self,
7272
tools = None
7373
return tools
7474

75-
@retry(max_attempts=12, delay=1.0)
75+
@retry(max_attempts=3, delay=1.0)
7676
def generate(self,
7777
messages: List[Message],
7878
tools: Optional[List[Tool]] = None,

ms_agent/tools/mcp_client.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,13 @@ async def call_tool(self, server_name: str, tool_name: str,
6161
texts = []
6262
if response.isError:
6363
sep = '\n\n'
64-
return f'execute error: {sep.join(response.content)}'
64+
if all(isinstance(item, str) for item in response.content):
65+
return f'execute error: {sep.join(response.content)}'
66+
else:
67+
item_list = []
68+
for item in response.content:
69+
item_list.append(item.text)
70+
return f'execute error: {sep.join(item_list)}'
6571
for content in response.content:
6672
if content.type == 'text':
6773
texts.append(content.text)

ms_agent/utils/llm_utils.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
# Copyright (c) Alibaba, Inc. and its affiliates.
2+
import asyncio
23
import functools
34
import time
45
from typing import Callable, Tuple, Type, TypeVar, Union
@@ -63,15 +64,17 @@ async def wrapper(*args, **kwargs) -> T:
6364

6465
for attempt in range(1, max_attempts + 1):
6566
try:
66-
return await func(*args, **kwargs)
67+
async for item in func(*args, **kwargs):
68+
yield item
69+
return
6770
except exceptions as e:
6871
last_exception = e
6972
if attempt < max_attempts:
7073
logger.warning(
7174
f'Attempt {attempt}/{max_attempts} fails: {func.__name__}. '
7275
f'Exception message: {e}. Will retry in {current_delay:.2f} seconds.'
7376
)
74-
time.sleep(current_delay)
77+
await asyncio.sleep(current_delay)
7578
current_delay *= backoff_factor
7679
else:
7780
logger.error(

0 commit comments

Comments
 (0)