forked from SylphAI-Inc/AdalFlow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathreact_note.py
161 lines (127 loc) · 4.33 KB
/
react_note.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
from adalflow.components.agent import ReActAgent
from adalflow.core import Generator, ModelClientType, ModelClient
from adalflow.utils import setup_env
setup_env()
# Define tools
def multiply(a: int, b: int) -> int:
"""
Multiply two numbers.
"""
return a * b
def add(a: int, b: int) -> int:
"""
Add two numbers.
"""
return a + b
def divide(a: float, b: float) -> float:
"""
Divide two numbers.
"""
return float(a) / b
llama3_model_kwargs = {
"model": "llama3-8b-8192", # llama3 70b works better than 8b here.
"temperature": 0.0,
}
gpt_model_kwargs = {
"model": "gpt-3.5-turbo",
"temperature": 0.0,
}
def test_react_agent(model_client: ModelClient, model_kwargs: dict):
tools = [multiply, add, divide]
queries = [
"What is the capital of France? and what is 465 times 321 then add 95297 and then divide by 13.2?",
"Give me 5 words rhyming with cool, and make a 4-sentence poem using them",
]
# define a generator without tools for comparison
generator = Generator(
model_client=model_client,
model_kwargs=model_kwargs,
)
react = ReActAgent(
max_steps=6,
add_llm_as_fallback=True,
tools=tools,
model_client=model_client,
model_kwargs=model_kwargs,
)
# print(react)
for query in queries:
print(f"Query: {query}")
agent_response = react.call(query)
llm_response = generator.call(prompt_kwargs={"input_str": query})
print(f"Agent response: {agent_response}")
print(f"LLM response: {llm_response}")
print("")
"""
To have an agent.
input, prompt, template, step_history -> generator
-> stepoutput -> step_history -> generator -> stepoutput -> step_history
-> generator -> stepoutput -> step_history -> generator -> stepoutput -> step_history
"""
def test_react_agent_train(model_client: ModelClient, model_kwargs: dict):
tools = [multiply, add, divide]
queries = [
"What is the capital of France? and what is 465 times 321 then add 95297 and then divide by 13.2?",
"Give me 5 words rhyming with cool, and make a 4-sentence poem using them",
]
# define a generator without tools for comparison
# generator = Generator(
# model_client=model_client,
# model_kwargs=model_kwargs,
# )
react = ReActAgent(
max_steps=6,
add_llm_as_fallback=True,
tools=tools,
model_client=model_client,
model_kwargs=model_kwargs,
)
# print(react)
react.train()
for query in queries:
print(f"Query: {query}")
agent_response = react.forward(query)
agent_response.draw_graph()
agent_response.draw_output_subgraph()
# print(f"Agent response: {agent_response}")
break
# llm_response = generator.call(prompt_kwargs={"input_str": query})
# print(f"LLM response: {llm_response}")
print("")
def test_react_agent_use_examples(model_client: ModelClient, model_kwargs: dict):
tools = [multiply, add, divide]
queries = [
"What is the capital of France? and what is 465 times 321 then add 95297 and then divide by 13.2?",
"Give me 5 words rhyming with cool, and make a 4-sentence poem using them",
]
from adalflow.core.types import FunctionExpression
# add examples for the output format str
example_using_multiply = FunctionExpression.from_function(
func=multiply,
thought="Now, let's multiply two numbers.",
a=3,
b=4,
)
react = ReActAgent(
max_steps=6,
add_llm_as_fallback=True,
tools=tools,
model_client=model_client,
model_kwargs=model_kwargs,
examples=[example_using_multiply],
)
print(react)
# see the output format
react.planner.print_prompt()
for query in queries:
print(f"Query: {query}")
agent_response = react.call(query)
print(f"Agent response: {agent_response}")
print("")
if __name__ == "__main__":
# get_logger(level="DEBUG")
# test_react_agent(ModelClientType.GROQ(), llama3_model_kwargs)
test_react_agent_train(ModelClientType.OPENAI(), gpt_model_kwargs)
# test_react_agent(ModelClientType.OPENAI(), gpt_model_kwargs)
# print("Done")
# test_react_agent_use_examples(ModelClientType.GROQ(), llama3_model_kwargs)