Skip to content

Commit 1a5326f

Browse files
authored
feat: fix #272 add memory feature (#420)
1 parent a08cc9c commit 1a5326f

39 files changed

+7693
-1025
lines changed

.changeset/easy-taxis-stop.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
---
2+
'@openai/agents-openai': minor
3+
'@openai/agents-core': minor
4+
---
5+
6+
feat: fix #272 add memory feature

examples/mcp/hosted-mcp-human-in-the-loop.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,10 @@ async function confirm(item: RunToolApprovalItem): Promise<boolean> {
1616
async function main(verbose: boolean, stream: boolean): Promise<void> {
1717
// 'always' | 'never' | { never, always }
1818
const requireApproval = {
19-
never: { toolNames: ['search_codex_code', 'fetch_codex_documentation'] },
20-
always: { toolNames: ['fetch_generic_url_content'] },
19+
never: { toolNames: ['search_codex_code'] },
20+
always: {
21+
toolNames: ['fetch_generic_url_content', 'fetch_codex_documentation'],
22+
},
2123
};
2224
const agent = new Agent({
2325
name: 'MCP Assistant',

examples/mcp/hosted-mcp-on-approval.ts

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,8 @@ async function promptApproval(item: RunToolApprovalItem): Promise<boolean> {
1616
async function main(verbose: boolean, stream: boolean): Promise<void> {
1717
// 'always' | 'never' | { never, always }
1818
const requireApproval = {
19-
never: {
20-
toolNames: ['fetch_codex_documentation', 'fetch_generic_url_content'],
21-
},
22-
always: {
23-
toolNames: ['search_codex_code'],
24-
},
19+
never: { toolNames: ['fetch_generic_url_content'] },
20+
always: { toolNames: ['fetch_codex_documentation', 'search_codex_code'] },
2521
};
2622
const agent = new Agent({
2723
name: 'MCP Assistant',

examples/memory/.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
tmp/
2+
*.db

examples/memory/file-hitl.ts

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
import readline from 'node:readline/promises';
2+
import { stdin as input, stdout as output } from 'node:process';
3+
import {
4+
Agent,
5+
RunResult,
6+
RunToolApprovalItem,
7+
run,
8+
withTrace,
9+
} from '@openai/agents';
10+
11+
import type { Interface as ReadlineInterface } from 'node:readline/promises';
12+
import { FileSession } from './sessions';
13+
import { createLookupCustomerProfileTool, fetchImageData } from './tools';
14+
15+
const customerDirectory: Record<string, string> = {
16+
'101':
17+
'Customer Kaz S. (tier gold) can be reached at +1-415-555-AAAA. Notes: Prefers SMS follow ups and values concise summaries.',
18+
'104':
19+
'Customer Yu S. (tier platinum) can be reached at +1-415-555-BBBB. Notes: Recently reported sync issues. Flagged for a proactive onboarding call.',
20+
'205':
21+
'Customer Ken S. (tier standard) can be reached at +1-415-555-CCCC. Notes: Interested in automation tutorials sent last week.',
22+
};
23+
24+
const lookupCustomerProfile = createLookupCustomerProfileTool({
25+
directory: customerDirectory,
26+
transientErrorMessage:
27+
'Simulated CRM outage for the first lookup. Please retry the tool call.',
28+
});
29+
lookupCustomerProfile.needsApproval = async () => true;
30+
31+
const instructions =
32+
'You assist support agents. For every user turn you must call lookup_customer_profile and fetch_image_data before responding so replies include stored notes and the sample image. If a tool reports a transient failure, request approval and retry the same call once before responding. Keep responses under three sentences.';
33+
34+
function formatToolArguments(interruption: RunToolApprovalItem): string {
35+
const args = interruption.rawItem.arguments;
36+
if (!args) {
37+
return '';
38+
}
39+
if (typeof args === 'string') {
40+
return args;
41+
}
42+
try {
43+
return JSON.stringify(args);
44+
} catch {
45+
return String(args);
46+
}
47+
}
48+
49+
async function promptYesNo(
50+
rl: ReadlineInterface,
51+
question: string,
52+
): Promise<boolean> {
53+
const answer = await rl.question(`${question} (y/n): `);
54+
const normalized = answer.trim().toLowerCase();
55+
return normalized === 'y' || normalized === 'yes';
56+
}
57+
58+
async function resolveInterruptions<TContext, TAgent extends Agent<any, any>>(
59+
rl: ReadlineInterface,
60+
agent: TAgent,
61+
initialResult: RunResult<TContext, TAgent>,
62+
session: FileSession,
63+
): Promise<RunResult<TContext, TAgent>> {
64+
let result = initialResult;
65+
while (result.interruptions?.length) {
66+
for (const interruption of result.interruptions) {
67+
const args = formatToolArguments(interruption);
68+
const approved = await promptYesNo(
69+
rl,
70+
`Agent ${interruption.agent.name} wants to call ${interruption.rawItem.name} with ${args || 'no arguments'}`,
71+
);
72+
if (approved) {
73+
result.state.approve(interruption);
74+
console.log('Approved tool call.');
75+
} else {
76+
result.state.reject(interruption);
77+
console.log('Rejected tool call.');
78+
}
79+
}
80+
81+
result = await run(agent, result.state, { session });
82+
}
83+
84+
return result;
85+
}
86+
87+
async function main() {
88+
await withTrace('memory:file-hitl:main', async () => {
89+
const agent = new Agent({
90+
name: 'File HITL assistant',
91+
instructions,
92+
modelSettings: { toolChoice: 'required' },
93+
tools: [lookupCustomerProfile, fetchImageData],
94+
});
95+
96+
const session = new FileSession({ dir: './tmp' });
97+
const sessionId = await session.getSessionId();
98+
const rl = readline.createInterface({ input, output });
99+
100+
console.log(`Session id: ${sessionId}`);
101+
console.log(
102+
'Enter a message to chat with the agent. Submit an empty line to exit.',
103+
);
104+
105+
while (true) {
106+
const userMessage = await rl.question('You: ');
107+
if (!userMessage.trim()) {
108+
break;
109+
}
110+
111+
let result = await run(agent, userMessage, { session });
112+
result = await resolveInterruptions(rl, agent, result, session);
113+
114+
const reply = result.finalOutput ?? '[No final output produced]';
115+
console.log(`Assistant: ${reply}`);
116+
console.log();
117+
}
118+
119+
rl.close();
120+
});
121+
}
122+
123+
main().catch((error) => {
124+
console.error(error);
125+
process.exit(1);
126+
});

examples/memory/file.ts

Lines changed: 105 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,105 @@
1+
import { Agent, run, withTrace } from '@openai/agents';
2+
import { FileSession } from './sessions';
3+
import { createLookupCustomerProfileTool, fetchImageData } from './tools';
4+
5+
const directory: Record<string, string> = {
6+
'1': 'Customer 1 (tier gold). Notes: Prefers concise replies.',
7+
'2': 'Customer 2 (tier standard). Notes: Interested in tutorials.',
8+
};
9+
10+
const instructions =
11+
'You are a helpful assistant. For every user turn you must call lookup_customer_profile and fetch_image_data before responding.';
12+
13+
const lookupCustomerProfile = createLookupCustomerProfileTool({
14+
directory,
15+
transientErrorMessage:
16+
'Simulated transient CRM outage. Please retry the tool call.',
17+
});
18+
19+
async function main() {
20+
await withTrace('memory:file:main', async () => {
21+
const agent = new Agent({
22+
name: 'Assistant',
23+
instructions,
24+
modelSettings: { toolChoice: 'required' },
25+
tools: [lookupCustomerProfile, fetchImageData],
26+
});
27+
28+
const session = new FileSession({ dir: './tmp/' });
29+
let result = await run(
30+
agent,
31+
'What is the largest country in South America?',
32+
{ session },
33+
);
34+
console.log(result.finalOutput); // e.g., Brazil
35+
36+
result = await run(agent, 'What is the capital of that country?', {
37+
session,
38+
});
39+
console.log(result.finalOutput); // e.g., Brasilia
40+
});
41+
}
42+
43+
async function mainStream() {
44+
await withTrace('memory:file:mainStream', async () => {
45+
const agent = new Agent({
46+
name: 'Assistant',
47+
instructions,
48+
modelSettings: { toolChoice: 'required' },
49+
tools: [lookupCustomerProfile, fetchImageData],
50+
});
51+
52+
const session = new FileSession({ dir: './tmp/' });
53+
let result = await run(
54+
agent,
55+
'What is the largest country in South America?',
56+
{
57+
stream: true,
58+
session,
59+
},
60+
);
61+
62+
for await (const event of result) {
63+
if (
64+
event.type === 'raw_model_stream_event' &&
65+
event.data.type === 'output_text_delta'
66+
)
67+
process.stdout.write(event.data.delta);
68+
}
69+
console.log();
70+
71+
result = await run(agent, 'What is the capital of that country?', {
72+
stream: true,
73+
session,
74+
});
75+
76+
// toTextStream() automatically returns a readable stream of strings intended to be displayed
77+
// to the user
78+
for await (const event of result.toTextStream()) {
79+
process.stdout.write(event);
80+
}
81+
console.log();
82+
83+
// Additional tool invocations happen earlier in the turn.
84+
});
85+
}
86+
87+
async function promptAndRun() {
88+
const readline = await import('node:readline/promises');
89+
const rl = readline.createInterface({
90+
input: process.stdin,
91+
output: process.stdout,
92+
});
93+
const isStream = await rl.question('Run in stream mode? (y/n): ');
94+
rl.close();
95+
if (isStream.trim().toLowerCase() === 'y') {
96+
await mainStream();
97+
} else {
98+
await main();
99+
}
100+
}
101+
102+
promptAndRun().catch((error) => {
103+
console.error(error);
104+
process.exit(1);
105+
});

examples/memory/memory-hitl.ts

Lines changed: 126 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
import readline from 'node:readline/promises';
2+
import { stdin as input, stdout as output } from 'node:process';
3+
import {
4+
Agent,
5+
MemorySession,
6+
RunResult,
7+
RunToolApprovalItem,
8+
run,
9+
withTrace,
10+
} from '@openai/agents';
11+
12+
import type { Interface as ReadlineInterface } from 'node:readline/promises';
13+
import { createLookupCustomerProfileTool, fetchImageData } from './tools';
14+
15+
const customerDirectory: Record<string, string> = {
16+
'101':
17+
'Customer Kaz S. (tier gold) can be reached at +1-415-555-AAAA. Notes: Prefers SMS follow ups and values concise summaries.',
18+
'104':
19+
'Customer Yu S. (tier platinum) can be reached at +1-415-555-BBBB. Notes: Recently reported sync issues. Flagged for a proactive onboarding call.',
20+
'205':
21+
'Customer Ken S. (tier standard) can be reached at +1-415-555-CCCC. Notes: Interested in automation tutorials sent last week.',
22+
};
23+
24+
const lookupCustomerProfile = createLookupCustomerProfileTool({
25+
directory: customerDirectory,
26+
transientErrorMessage:
27+
'Simulated CRM outage for the first lookup. Please retry the tool call.',
28+
});
29+
lookupCustomerProfile.needsApproval = async () => true;
30+
31+
const instructions =
32+
'You assist support agents. For every user turn you must call lookup_customer_profile and fetch_image_data before responding so replies include stored notes and the sample image. If a tool reports a transient failure, request approval and retry the same call once before responding. Keep responses under three sentences.';
33+
34+
function formatToolArguments(interruption: RunToolApprovalItem): string {
35+
const args = interruption.rawItem.arguments;
36+
if (!args) {
37+
return '';
38+
}
39+
if (typeof args === 'string') {
40+
return args;
41+
}
42+
try {
43+
return JSON.stringify(args);
44+
} catch {
45+
return String(args);
46+
}
47+
}
48+
49+
async function promptYesNo(
50+
rl: ReadlineInterface,
51+
question: string,
52+
): Promise<boolean> {
53+
const answer = await rl.question(`${question} (y/n): `);
54+
const normalized = answer.trim().toLowerCase();
55+
return normalized === 'y' || normalized === 'yes';
56+
}
57+
58+
async function resolveInterruptions<TContext, TAgent extends Agent<any, any>>(
59+
rl: ReadlineInterface,
60+
agent: TAgent,
61+
initialResult: RunResult<TContext, TAgent>,
62+
session: MemorySession,
63+
): Promise<RunResult<TContext, TAgent>> {
64+
let result = initialResult;
65+
while (result.interruptions?.length) {
66+
for (const interruption of result.interruptions) {
67+
const args = formatToolArguments(interruption);
68+
const approved = await promptYesNo(
69+
rl,
70+
`Agent ${interruption.agent.name} wants to call ${interruption.rawItem.name} with ${args || 'no arguments'}`,
71+
);
72+
if (approved) {
73+
result.state.approve(interruption);
74+
console.log('Approved tool call.');
75+
} else {
76+
result.state.reject(interruption);
77+
console.log('Rejected tool call.');
78+
}
79+
}
80+
81+
result = await run(agent, result.state, { session });
82+
}
83+
84+
return result;
85+
}
86+
87+
async function main() {
88+
await withTrace('memory:memory-hitl:main', async () => {
89+
const agent = new Agent({
90+
name: 'Memory HITL assistant',
91+
instructions,
92+
modelSettings: { toolChoice: 'required' },
93+
tools: [lookupCustomerProfile, fetchImageData],
94+
});
95+
96+
const session = new MemorySession();
97+
const sessionId = await session.getSessionId();
98+
const rl = readline.createInterface({ input, output });
99+
100+
console.log(`Session id: ${sessionId}`);
101+
console.log(
102+
'Enter a message to chat with the agent. Submit an empty line to exit.',
103+
);
104+
105+
while (true) {
106+
const userMessage = await rl.question('You: ');
107+
if (!userMessage.trim()) {
108+
break;
109+
}
110+
111+
let result = await run(agent, userMessage, { session });
112+
result = await resolveInterruptions(rl, agent, result, session);
113+
114+
const reply = result.finalOutput ?? '[No final output produced]';
115+
console.log(`Assistant: ${reply}`);
116+
console.log();
117+
}
118+
119+
rl.close();
120+
});
121+
}
122+
123+
main().catch((error) => {
124+
console.error(error);
125+
process.exit(1);
126+
});

0 commit comments

Comments
 (0)