Skip to content

Commit

Permalink
Merge pull request #763 from TransformerOptimus/json_schema_changes
Browse files Browse the repository at this point in the history
moving json structure to json schema structure in agent
  • Loading branch information
I’m authored Jul 14, 2023
2 parents 8b5af24 + 389c672 commit e07029f
Show file tree
Hide file tree
Showing 5 changed files with 112 additions and 37 deletions.
19 changes: 17 additions & 2 deletions superagi/agent/output_parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from abc import ABC, abstractmethod
from typing import Dict, NamedTuple, List
import re
import ast
import json5
from superagi.helper.json_cleaner import JsonCleaner
from superagi.lib.logger import logger
Expand All @@ -22,7 +23,21 @@ class BaseOutputParser(ABC):
def parse(self, text: str) -> AgentGPTAction:
"""Return AgentGPTAction"""


class AgentSchemaOutputParser(BaseOutputParser):
def parse(self, response: str) -> AgentGPTAction:
if response.startswith("```") and response.endswith("```"):
response = "```".join(response.split("```")[1:-1])
# OpenAI returns `str(content_dict)`, literal_eval reverses this
try:
logger.debug("AgentSchemaOutputParser: ", response)
response_obj = ast.literal_eval(response)
return AgentGPTAction(
name=response_obj['tool']['name'],
args=response_obj['tool']['args'],
)
except BaseException as e:
logger.info(f"AgentSchemaOutputParser: Error parsing JSON respons {e}")
return {}

class AgentOutputParser(BaseOutputParser):
def parse(self, text: str) -> AgentGPTAction:
Expand Down Expand Up @@ -95,4 +110,4 @@ def parse_tasks(self, text: str) -> AgentTasks:
# If the command is null or incomplete, return an erroneous tool
return AgentTasks(
error=f"Incomplete tool args: {parsed}",
)
)
38 changes: 30 additions & 8 deletions superagi/agent/prompts/analyse_task.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,34 @@ Ensure next action tool is picked from the below tool list.
TOOLS:
{tools}

RESPONSE FORMAT:
Respond with only valid JSON conforming to the following schema:
{
"thoughts": {
"reasoning": "short reasoning"
},
"tool": {"name": "tool name", "args": {"arg name": "arg value(escape in case of string)"}}
}

Your answer must be something that JSON.parse() can read, and nothing else.
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"properties": {
"reasoning": {
"type": "string",
"description": "short reasoning",
}
},
"required": ["reasoning"]
},
"tool": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "tool name",
},
"args": {
"type": "object",
"description": "tool arguments",
}
},
"required": ["name", "args"]
}
}
}
2 changes: 1 addition & 1 deletion superagi/agent/prompts/initialize_tasks.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ GOALS:

{task_instructions}

Construct a sequence of actions, not exceeding 4 steps, to achieve this goal.
Construct a sequence of actions, not exceeding 3 steps, to achieve this goal.

Submit your response as a formatted ARRAY of strings, suitable for utilization with JSON.parse().

Expand Down
57 changes: 51 additions & 6 deletions superagi/agent/prompts/superagi.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,54 @@ PERFORMANCE EVALUATION:
4. Reflect on past decisions and strategies to refine your approach.
5. Every tool has a cost, so be smart and efficient.

I should only respond in JSON format as described below.
Response Format:
{response_format}


Ensure the response can be parsed by Python json.loads.
Respond with only valid JSON conforming to the following schema:
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"thoughts": {
"type": "object",
"properties": {
"text": {
"type": "string",
"description": "thought"
},
"reasoning": {
"type": "string",
"description": "short reasoning"
},
"plan": {
"type": "string",
"description": "- short bulleted\n- list that conveys\n- long-term plan"
},
"criticism": {
"type": "string",
"description": "constructive self-criticism"
},
"speak": {
"type": "string",
"description": "thoughts summary to say to user"
}
},
"required": ["text", "reasoning", "plan", "criticism", "speak"],
"additionalProperties": false
},
"tool": {
"type": "object",
"properties": {
"name": {
"type": "string",
"description": "tool name"
},
"args": {
"type": "object",
"description": "tool arguments"
}
},
"required": ["name", "args"],
"additionalProperties": false
}
},
"required": ["thoughts", "tool"],
"additionalProperties": false
}
33 changes: 13 additions & 20 deletions superagi/agent/super_agi.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,37 +4,32 @@
from __future__ import annotations

import time
from typing import Any, Dict
from typing import Any
from typing import Tuple
import json
import numpy as np
from halo import Halo
from pydantic import ValidationError
from pydantic.types import List
from sqlalchemy import desc, asc
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy import asc
from sqlalchemy.orm import sessionmaker

from superagi.agent.agent_prompt_builder import AgentPromptBuilder
from superagi.agent.output_parser import BaseOutputParser, AgentOutputParser
from superagi.agent.output_parser import BaseOutputParser, AgentOutputParser, AgentSchemaOutputParser
from superagi.agent.task_queue import TaskQueue
from superagi.apm.event_handler import EventHandler
from superagi.helper.token_counter import TokenCounter
from superagi.lib.logger import logger
from superagi.llms.base_llm import BaseLlm
from superagi.models.agent_config import AgentConfiguration
from superagi.models.agent import Agent
from superagi.models.agent_execution import AgentExecution
# from superagi.models.types.agent_with_config import AgentWithConfig
from superagi.models.agent_execution_feed import AgentExecutionFeed
from superagi.models.agent_execution_permission import AgentExecutionPermission
from superagi.models.agent_workflow_step import AgentWorkflowStep
from superagi.models.db import connect_db
from superagi.tools.base_tool import BaseTool
from superagi.types.common import BaseMessage, HumanMessage, AIMessage, SystemMessage
from superagi.types.common import BaseMessage
from superagi.vector_store.base import VectorStore
from superagi.models.agent import Agent
from superagi.models.resource import Resource
from superagi.config.config import get_config
from superagi.apm.event_handler import EventHandler
import os
from superagi.lib.logger import logger

FINISH = "finish"
WRITE_FILE = "Write File"
Expand All @@ -59,7 +54,7 @@ def __init__(self,
tools: List[BaseTool],
agent_config: Any,
agent_execution_config: Any,
output_parser: BaseOutputParser = AgentOutputParser(),
output_parser: BaseOutputParser = AgentSchemaOutputParser(),
):
self.ai_name = ai_name
self.ai_role = ai_role
Expand All @@ -70,8 +65,6 @@ def __init__(self,
self.tools = tools
self.agent_config = agent_config
self.agent_execution_config = agent_execution_config
# Init Log
# print("\033[92m\033[1m" + "\nWelcome to SuperAGI - The future of AGI" + "\033[0m\033[0m")

@classmethod
def from_llm_and_tools(
Expand Down Expand Up @@ -144,9 +137,8 @@ def execute(self, workflow_step: AgentWorkflowStep):
# agent_id=self.agent_config["agent_id"], feed=template_step.prompt,
# role="user")

logger.info(messages)
logger.debug("Prompt messages:", messages)
if len(agent_feeds) <= 0:
logger.info(prompt)
for message in messages:
agent_execution_feed = AgentExecutionFeed(agent_execution_id=self.agent_config["agent_execution_id"],
agent_id=self.agent_config["agent_id"],
Expand All @@ -160,7 +152,7 @@ def execute(self, workflow_step: AgentWorkflowStep):
current_calls = current_calls + 1
total_tokens = current_tokens + TokenCounter.count_message_tokens(response, self.llm.get_model())
self.update_agent_execution_tokens(current_calls, total_tokens)
logger.info("Response:", response)
logger.debug("Response:", response)
if 'content' not in response or response['content'] is None:
raise RuntimeError(f"Failed to get response from llm")
assistant_reply = response['content']
Expand All @@ -175,7 +167,8 @@ def execute(self, workflow_step: AgentWorkflowStep):
tool_response = self.handle_tool_response(assistant_reply)

agent_execution_feed = AgentExecutionFeed(agent_execution_id=self.agent_config["agent_execution_id"],
agent_id=self.agent_config["agent_id"], feed=assistant_reply,
agent_id=self.agent_config["agent_id"],
feed=assistant_reply,
role="assistant")
session.add(agent_execution_feed)
tool_response_feed = AgentExecutionFeed(agent_execution_id=self.agent_config["agent_execution_id"],
Expand Down

0 comments on commit e07029f

Please sign in to comment.