Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tool-Memory LTM #1007

Merged
merged 14 commits into from
Aug 10, 2023
Next Next commit
add ltm
  • Loading branch information
AdityaSharma13064 committed Aug 3, 2023
commit 016124feb34fc308e16ef3f2551bcaea775ba2ca
Binary file added Delivery_Status_Notification__Failure_/icon.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file not shown.
20 changes: 17 additions & 3 deletions superagi/agent/super_agi.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,13 @@
import time
from typing import Any
from typing import Tuple

import json
import numpy as np
from pydantic import ValidationError
from pydantic.types import List
from sqlalchemy import asc
from sqlalchemy.orm import sessionmaker

from langchain.text_splitter import TokenTextSplitter
from superagi.agent.agent_prompt_builder import AgentPromptBuilder
from superagi.agent.output_parser import BaseOutputParser, AgentSchemaOutputParser
from superagi.agent.task_queue import TaskQueue
Expand Down Expand Up @@ -205,9 +205,23 @@ def execute(self, workflow_step: AgentWorkflowStep):
if len(current_tasks) > 0 and final_response["result"] == "COMPLETE":
final_response["result"] = "PENDING"
session.commit()

logger.info("Iteration completed moving to next iteration!")
session.close()

data = json.loads(assistant_reply)
task_description = data['thoughts']['text']
final_tool_response = final_response["result"]
prompt = task_description+final_tool_response
text_splitter = TokenTextSplitter(chunk_size=512, chunk_overlap=10)
chunk_response = text_splitter.split_text(prompt)

print("Here is the task description and tool response: ",chunk_response,"END")
metadata = {"agent_execution_id":self.agent_config["agent_execution_id"]}
metadatas = []
for _ in chunk_response:
metadatas.append(metadata)

self.memory.add_texts(chunk_response,metadatas)
return final_response

def handle_tool_response(self, session, assistant_reply):
Expand Down
7 changes: 4 additions & 3 deletions superagi/jobs/agent_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,8 @@ def execute_next_action(self, agent_execution_id):
tools = self.set_default_params_tools(tools, parsed_config, parsed_execution_config, agent_execution.agent_id,
model_api_key=model_api_key,
resource_description=resource_summary,
session=session)
session=session,
memory=memory)

spawned_agent = SuperAgi(ai_name=parsed_config["name"], ai_role=parsed_config["description"],
llm=get_model(model=parsed_config["model"], api_key=model_api_key), tools=tools,
Expand Down Expand Up @@ -295,7 +296,7 @@ def execute_next_action(self, agent_execution_id):
engine.dispose()

def set_default_params_tools(self, tools, parsed_config, parsed_execution_config, agent_id, model_api_key,
session, resource_description=None):
session, resource_description=None,memory=None):
"""
Set the default parameters for the tools.

Expand Down Expand Up @@ -331,7 +332,7 @@ def set_default_params_tools(self, tools, parsed_config, parsed_execution_config
"agent_execution_id"])
if hasattr(tool, 'tool_response_manager'):
tool.tool_response_manager = ToolResponseQueryManager(session=session, agent_execution_id=parsed_config[
"agent_execution_id"])
"agent_execution_id"],memory=memory)

if tool.name == "QueryResource" and resource_description:
tool.description = tool.description.replace("{summary}", resource_description)
Expand Down
3 changes: 3 additions & 0 deletions superagi/tools/thinking/prompts/thinking.txt
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ and the following task, `{task_description}`.
Below is last tool response:
`{last_tool_response}`

Below is the relevant tool response:
`{relevant_tool_response}`

Perform the task by understanding the problem, extracting variables, and being smart
and efficient. Provide a descriptive response, make decisions yourself when
confronted with choices and provide reasoning for ideas / decisions.
6 changes: 6 additions & 0 deletions superagi/tools/thinking/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ class ThinkingTool(BaseTool):
)
args_schema: Type[ThinkingSchema] = ThinkingSchema
goals: List[str] = []
agent_execution_id:int=None
permission_required: bool = False
tool_response_manager: Optional[ToolResponseQueryManager] = None

Expand All @@ -56,6 +57,11 @@ def _execute(self, task_description: str):
prompt = prompt.replace("{task_description}", task_description)
last_tool_response = self.tool_response_manager.get_last_response()
prompt = prompt.replace("{last_tool_response}", last_tool_response)
metadata = {"agent_execution_id":self.agent_execution_id}
relevant_tool_response = self.tool_response_manager.get_relevant_response(query=task_description,metadata=metadata)
print("Here is the relevant tool response: ",relevant_tool_response,"END")
prompt = prompt.replace("{relevant_tool_response}",relevant_tool_response)
print("Final tool prompt: ",prompt,"END")
messages = [{"role": "system", "content": prompt}]
result = self.llm.chat_completion(messages, max_tokens=self.max_token_limit)
return result["content"]
Expand Down
12 changes: 11 additions & 1 deletion superagi/tools/tool_response_query_manager.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,22 @@
from sqlalchemy.orm import Session

from superagi.models.agent_execution_feed import AgentExecutionFeed
from superagi.vector_store.base import VectorStore


class ToolResponseQueryManager:
def __init__(self, session: Session, agent_execution_id: int):
def __init__(self, session: Session, agent_execution_id: int,memory:VectorStore):
self.session = session
self.agent_execution_id = agent_execution_id
self.memory=memory

def get_last_response(self, tool_name: str = None):
return AgentExecutionFeed.get_last_tool_response(self.session, self.agent_execution_id, tool_name)

def get_relevant_response(self, query: str,metadata:dict, top_k: int = 5):
documents = self.memory.get_matching_text(query, metadata=metadata)
print("Here are the documents: ",documents,"END")
relevant_responses = ""
for document in documents["documents"]:
relevant_responses += document.text_content
return relevant_responses
12 changes: 12 additions & 0 deletions superagi/vector_store/embedding/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,18 @@ class OpenAiEmbedding:
def __init__(self, api_key, model="text-embedding-ada-002"):
self.model = model
self.api_key = api_key

async def get_embedding_async(self, text: str):
try:
# openai.api_key = get_config("OPENAI_API_KEY")
openai.api_key = self.api_key
response = await openai.Embedding.create(
input=[text],
engine=self.model
)
return response['data'][0]['embedding']
except Exception as exception:
return {"error": exception}

def get_embedding(self, text):
try:
Expand Down
6 changes: 4 additions & 2 deletions tools.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
{
"tools": {
}
"tools": {
"DuckDuckGo": "https://github.com/TransformerOptimus/SuperAGI-Tools/tree/main/DuckDuckGo",
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

revert

"notion": "https://github.com/TransformerOptimus/SuperAGI-Tools/tree/main/notion"
}
}