-
Notifications
You must be signed in to change notification settings - Fork 0
/
llm-endpoint.js
46 lines (41 loc) · 1.63 KB
/
llm-endpoint.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
// llm-endpoints.js
export function LLMEndpoint() {
// Other properties and methods can be here if needed
}
LLMEndpoint.callEndpoint = async function(userInput) {
const URI = "https://api.openai.com/v1/chat/completions";
const OPENAI_API_KEY = process.env.OPEN_AI; // Ensure you securely access the API key, for example, from environment variables
const requestPayload = {
model: "gpt-3.5-turbo", // Adjust the model according to your needs
messages: [{
role: "user",
content: userInput
}],
// Add any additional parameters you may need, following OpenAI's API documentation
};
try {
const response = await fetch(URI, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${OPENAI_API_KEY}` // Use the API key securely
},
body: JSON.stringify(requestPayload)
});
if (response.status === 200) {
const result = await response.json();
// Assuming you want the last completion's message content
const lastIndex = result.choices.length - 1;
const output = result.choices[lastIndex].message.content;
// Perform any necessary formatting on `output`
// For example, cleaning up or formatting the response text
console.log(output);
return output; // Return the processed output
} else {
console.error('Error:', response.status);
return null;
}
} catch (error) {
console.error('Error:', error);
}
};