Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 38 additions & 0 deletions docs/docs/agent.md
Original file line number Diff line number Diff line change
Expand Up @@ -115,3 +115,41 @@ planner_agent = Agent(
runner = Runner(agent=planner_agent, short_term_memory=ShortTermMemory())
response = await runner.run(messages=prompt, session_id=session_id)
```

## 从 Agent 配置文件构建

你可以通过一个 Agent 配置文件来构建 Agent 运行时实例,例如:

```yaml
root_agent:
type: Agent # Agent | SequencialAgent | LoopAgent | ParallelAgent
name: test
description: A test agent
instruction: A test instruction
long_term_memory:
backend: local
knowledgebase:
backend: opensearch
sub_agents:
- ${sub_agent_1}

sub_agent_1:
type: Agent
name: agent1
```

其中,每个`agent`的`type`负责指定 Agent 的类名。

可以通过如下代码来实例化这个 Agent:

```python
from veadk.agent_builder import AgentBuilder

agent = AgentBuilder().build(path="./agent.yaml")
```

函数`build`接收3个参数:

- `path`:配置文件路径
- `root_agent_identifier`:配置文件中主 Agent 的名称,默认为`root_agent`
- `tools`:主 agent 挂载的工具列表(子 Agent 工具列表暂未推出)
11 changes: 6 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "veadk-python"
version = "0.2.5"
version = "0.2.6"
description = "Volcengine agent development kit, integrations with Volcengine cloud services."
readme = "README.md"
requires-python = ">=3.10"
Expand All @@ -23,10 +23,11 @@ dependencies = [
"wrapt>=1.17.2", # For patching built-in functions
"openai<1.100", # For fix https://github.com/BerriAI/litellm/issues/13710
"volcengine-python-sdk==4.0.3", # For Volcengine API
"agent-pilot-sdk>=0.0.9", # Prompt optimization by Volcengine AgentPilot/PromptPilot toolkits
"fastmcp>=2.11.3", # For running MCP
"cookiecutter>=2.6.0", # For cloud deploy
"opensearch-py==2.8.0" # For OpenSearch database
"agent-pilot-sdk>=0.0.9", # Prompt optimization by Volcengine AgentPilot/PromptPilot toolkits
"fastmcp>=2.11.3", # For running MCP
"cookiecutter>=2.6.0", # For cloud deploy # For OpenSearch database
"opensearch-py==2.8.0",
"omegaconf>=2.3.0", # For agent builder
]

[project.scripts]
Expand Down
85 changes: 85 additions & 0 deletions veadk/agent_builder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from google.adk.agents import BaseAgent
from google.adk.agents.llm_agent import ToolUnion
from omegaconf import OmegaConf

from veadk.a2a.remote_ve_agent import RemoteVeAgent
from veadk.agent import Agent
from veadk.agents.loop_agent import LoopAgent
from veadk.agents.parallel_agent import ParallelAgent
from veadk.agents.sequential_agent import SequentialAgent
from veadk.utils.logger import get_logger

logger = get_logger(__name__)

AGENT_TYPES = {
"Agent": Agent,
"SequentialAgent": SequentialAgent,
"ParallelAgent": ParallelAgent,
"LoopAgent": LoopAgent,
"RemoteVeAgent": RemoteVeAgent,
}


class AgentBuilder:
def __init__(self) -> None:
pass

def _build(self, agent_config: dict) -> BaseAgent:
logger.info(f"Building agent with config: {agent_config}")

sub_agents = []
if agent_config.get("sub_agents", None):
for sub_agent_config in agent_config["sub_agents"]:
agent = self._build(sub_agent_config)
sub_agents.append(agent)
agent_config.pop("sub_agents")

agent_cls = AGENT_TYPES[agent_config["type"]]
agent = agent_cls(**agent_config, sub_agents=sub_agents)

logger.debug("Build agent done.")

return agent

def _read_config(self, path: str) -> dict:
"""Read config file (from `path`) to a in-memory dict."""
assert path.endswith(".yaml"), "Agent config file must be a `.yaml` file."

config = OmegaConf.load(path)
config_dict = OmegaConf.to_container(config, resolve=True)

assert isinstance(config_dict, dict), (
"Parsed config must in `dict` format. Pls check your building file format."
)

return config_dict

def build(
self,
path: str,
root_agent_identifier: str = "root_agent",
tools: list[ToolUnion] | None = None,
) -> BaseAgent:
config = self._read_config(path)

agent_config = config[root_agent_identifier]
agent = self._build(agent_config)

if tools and isinstance(agent, Agent):
agent.tools = tools

return agent
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@
from veadk.tracing.telemetry.opentelemetry_tracer import OpentelemetryTracer
from veadk.types import AgentRunConfig
from veadk.utils.logger import get_logger
from volcengine.base.Request import Request
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from opentelemetry import context

Expand Down Expand Up @@ -165,7 +164,7 @@ async def combined_lifespan(app: FastAPI):
)

@app.middleware("http")
async def otel_context_middleware(request: Request, call_next):
async def otel_context_middleware(request, call_next):
carrier = {
"traceparent": request.headers.get("Traceparent"),
"tracestate": request.headers.get("Tracestate"),
Expand Down
38 changes: 20 additions & 18 deletions veadk/knowledgebase/knowledgebase.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import BinaryIO, Literal, TextIO
from typing import Any, BinaryIO, Literal, TextIO

from pydantic import BaseModel

from veadk.database.database_adapter import get_knowledgebase_database_adapter
from veadk.database.database_factory import DatabaseFactory
Expand All @@ -25,23 +27,23 @@ def build_knowledgebase_index(app_name: str):
return f"veadk_kb_{app_name}"


class KnowledgeBase:
def __init__(
self,
backend: Literal["local", "opensearch", "viking", "redis", "mysql"] = "local",
top_k: int = 10,
db_config=None,
):
logger.info(f"Initializing knowledgebase: backend={backend} top_k={top_k}")
class KnowledgeBase(BaseModel):
backend: Literal["local", "opensearch", "viking", "redis", "mysql"] = "local"
top_k: int = 10
db_config: Any | None = None

self.backend = backend
self.top_k = top_k
def model_post_init(self, __context: Any) -> None:
logger.info(
f"Initializing knowledgebase: backend={self.backend} top_k={self.top_k}"
)

self.db_client = DatabaseFactory.create(backend=backend, config=db_config)
self.adapter = get_knowledgebase_database_adapter(self.db_client)
self._db_client = DatabaseFactory.create(
backend=self.backend, config=self.db_config
)
self._adapter = get_knowledgebase_database_adapter(self._db_client)

logger.info(
f"Initialized knowledgebase: db_client={self.db_client.__class__.__name__} adapter={self.adapter}"
f"Initialized knowledgebase: db_client={self._db_client.__class__.__name__} adapter={self._adapter}"
)

def add(
Expand All @@ -67,7 +69,7 @@ def add(

logger.info(f"Adding documents to knowledgebase: index={index}")

self.adapter.add(data=data, index=index)
self._adapter.add(data=data, index=index)

def search(self, query: str, app_name: str, top_k: int | None = None) -> list[str]:
top_k = self.top_k if top_k is None else top_k
Expand All @@ -76,15 +78,15 @@ def search(self, query: str, app_name: str, top_k: int | None = None) -> list[st
f"Searching knowledgebase: app_name={app_name} query={query} top_k={top_k}"
)
index = build_knowledgebase_index(app_name)
result = self.adapter.query(query=query, index=index, top_k=top_k)
result = self._adapter.query(query=query, index=index, top_k=top_k)
if len(result) == 0:
logger.warning(f"No documents found in knowledgebase. Query: {query}")
return result

def delete_doc(self, app_name: str, id: str) -> bool:
index = build_knowledgebase_index(app_name)
return self.adapter.delete_doc(index=index, id=id)
return self._adapter.delete_doc(index=index, id=id)

def list_docs(self, app_name: str, offset: int = 0, limit: int = 100) -> list[dict]:
index = build_knowledgebase_index(app_name)
return self.adapter.list_docs(index=index, offset=offset, limit=limit)
return self._adapter.list_docs(index=index, offset=offset, limit=limit)
41 changes: 20 additions & 21 deletions veadk/memory/long_term_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
# limitations under the License.

# adapted from Google ADK memory service adk-python/src/google/adk/memory/vertex_ai_memory_bank_service.py at 0a9e67dbca67789247e882d16b139dbdc76a329a · google/adk-python

import json
from typing import Literal
from typing import Any, Literal

from google.adk.events.event import Event
from google.adk.memory.base_memory_service import (
Expand All @@ -24,6 +25,7 @@
from google.adk.memory.memory_entry import MemoryEntry
from google.adk.sessions import Session
from google.genai import types
from pydantic import BaseModel
from typing_extensions import override

from veadk.database import DatabaseFactory
Expand All @@ -37,33 +39,30 @@ def build_long_term_memory_index(app_name: str, user_id: str):
return f"{app_name}_{user_id}"


class LongTermMemory(BaseMemoryService):
def __init__(
self,
backend: Literal[
"local", "opensearch", "redis", "mysql", "viking", "viking_mem"
] = "opensearch",
top_k: int = 5,
):
if backend == "viking":
class LongTermMemory(BaseMemoryService, BaseModel):
backend: Literal[
"local", "opensearch", "redis", "mysql", "viking", "viking_mem"
] = "opensearch"
top_k: int = 5

def model_post_init(self, __context: Any) -> None:
if self.backend == "viking":
logger.warning(
"`viking` backend is deprecated, switching to `viking_mem` backend."
)
backend = "viking_mem"
self.top_k = top_k
self.backend = backend
self.backend = "viking_mem"

logger.info(
f"Initializing long term memory: backend={self.backend} top_k={self.top_k}"
)

self.db_client = DatabaseFactory.create(
backend=backend,
self._db_client = DatabaseFactory.create(
backend=self.backend,
)
self.adapter = get_long_term_memory_database_adapter(self.db_client)
self._adapter = get_long_term_memory_database_adapter(self._db_client)

logger.info(
f"Initialized long term memory: db_client={self.db_client.__class__.__name__} adapter={self.adapter}"
f"Initialized long term memory: db_client={self._db_client.__class__.__name__} adapter={self._adapter}"
)

def _filter_and_convert_events(self, events: list[Event]) -> list[str]:
Expand Down Expand Up @@ -101,9 +100,9 @@ async def add_session_to_memory(

# check if viking memory database, should give a user id: if/else
if self.backend == "viking_mem":
self.adapter.add(data=event_strings, index=index, user_id=session.user_id)
self._adapter.add(data=event_strings, index=index, user_id=session.user_id)
else:
self.adapter.add(data=event_strings, index=index)
self._adapter.add(data=event_strings, index=index)

logger.info(
f"Added {len(event_strings)} events to long term memory: index={index}"
Expand All @@ -119,11 +118,11 @@ async def search_memory(self, *, app_name: str, user_id: str, query: str):

# user id if viking memory db
if self.backend == "viking_mem":
memory_chunks = self.adapter.query(
memory_chunks = self._adapter.query(
query=query, index=index, top_k=self.top_k, user_id=user_id
)
else:
memory_chunks = self.adapter.query(
memory_chunks = self._adapter.query(
query=query, index=index, top_k=self.top_k
)

Expand Down
2 changes: 1 addition & 1 deletion veadk/version.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,4 @@
# See the License for the specific language governing permissions and
# limitations under the License.

VERSION = "0.2.5"
VERSION = "0.2.6"