Skip to content

Commit

Permalink
delete the tests from the react.py and we have react_note in develope…
Browse files Browse the repository at this point in the history
…r notes
  • Loading branch information
liyin2015 committed Jul 11, 2024
1 parent a4a70bc commit 64618ab
Showing 1 changed file with 0 additions and 108 deletions.
108 changes: 0 additions & 108 deletions lightrag/lightrag/components/agent/react.py
Original file line number Diff line number Diff line change
Expand Up @@ -306,111 +306,3 @@ def call(
def _extra_repr(self) -> str:
s = f"max_steps={self.max_steps}, add_llm_as_fallback={self.add_llm_as_fallback}, "
return s


if __name__ == "__main__":
from lightrag.components.model_client import GroqAPIClient
from lightrag.core.types import ModelClientType
from lightrag.utils import setup_env

# get_logger()

setup_env()

def multiply(a: int, b: int) -> int:
"""
Multiply two numbers.
"""
return a * b

def add(a: int, b: int) -> int:
"""
Add two numbers.
"""
return a + b

def divide(a: float, b: float) -> float:
"""
Divide two numbers.
"""
return float(a) / b

def search(query: str) -> str:
"""
Search the web for the given query.
"""
return "python programming is a great way to learn programming"

tools = [
FunctionTool(fn=multiply),
FunctionTool(fn=add),
FunctionTool(fn=divide),
# FunctionTool.from_defaults(fn=search),
]
llm_model_kwargs = {
"model": "llama3-70b-8192", # llama3 is not good with string formatting, llama3 8b is also bad at following instruction, 70b is better but still not as good as gpt-3.5-turbo
# mistral also not good: mixtral-8x7b-32768, but with better prompt, it can still work
"temperature": 0.0,
}

gpt_3_5_turbo_model_kwargs = {
"model": "gpt-3.5-turbo",
}

examples = [
# r"""
# User: What is 9 - 3?
# You: {
# "thought": "I need to subtract 3 from 9, but there is no subtraction tool, so I ask llm_tool to answer the query.",
# "action": "llm_tool('What is 9 - 3?')"
# }
# """
]
# agent = ReActAgent(
# # examples=examples,
# tools=tools,
# max_steps=5,
# model_client=GroqAPIClient,
# model_kwargs=llm_model_kwargs,
# )
# print(agent)
queries = [
# "What is 2 times 3?",
# "What is 3 plus 4?",
"What is the capital of France? and what is 465 times 321 then add 95297 and then divide by 13.2?",
# "Li adapted her pet Apple in 2017 when Apple was only 2 months old, now we are at year 2024, how old is Li's pet Apple?",
"Give me 5 words rhyming with cool, and make a 4-sentence poem using them",
]
"""
Results: mixtral-8x7b-32768, 0.9s per query
llama3-70b-8192, 1.8s per query
gpt-3.5-turbo, 2.2s per query
"""
import time

generator = Generator(
model_client=GroqAPIClient(),
model_kwargs=llm_model_kwargs,
)
# for i in range(3):
agent = ReActAgent(
tools=tools,
max_steps=5,
model_client=ModelClientType.GROQ(),
model_kwargs=llm_model_kwargs,
)
# agent.llm_planner.print_prompt()
# print(agent)

# vs not using agent
# print(agent.tools)

average_time = 0
for query in queries:
t0 = time.time()
answer = agent(query)
average_time += time.time() - t0
answer_no_agent = generator(prompt_kwargs={"input_str": query})
print(f"Answer with agent: {answer}")
print(f"Answer without agent: {answer_no_agent}")
print(f"Average time: {average_time / len(queries)}")

0 comments on commit 64618ab

Please sign in to comment.