118 lines
3.1 KiB
JavaScript
118 lines
3.1 KiB
JavaScript
import { DebugBuilder } from "../../modules/debugger.mjs";
|
|
const log = new DebugBuilder("server", "discordBot.modules.gptHandler");
|
|
import dotenv from "dotenv";
|
|
dotenv.config();
|
|
|
|
import { OpenAI } from "openai";
|
|
import { EventEmitter } from "events";
|
|
|
|
const openai = new OpenAI(process.env.OPENAI_API_KEY);
|
|
|
|
const assistant = await openai.beta.assistants.create({
|
|
name: "Emmelia",
|
|
instructions: process.env.DRB_SERVER_INITIAL_PROMPT,
|
|
model: "gpt-4o",
|
|
});
|
|
|
|
class EventHandler extends EventEmitter {
|
|
constructor(client) {
|
|
super();
|
|
this.client = client;
|
|
}
|
|
|
|
async onEvent(event) {
|
|
try {
|
|
console.log(event);
|
|
// Retrieve events that are denoted with 'requires_action'
|
|
// since these will have our tool_calls
|
|
if (event.event === "thread.run.requires_action") {
|
|
await this.handleRequiresAction(
|
|
event.data,
|
|
event.data.id,
|
|
event.data.thread_id,
|
|
);
|
|
}
|
|
} catch (error) {
|
|
console.error("Error handling event:", error);
|
|
}
|
|
}
|
|
|
|
async handleRequiresAction(data, runId, threadId) {
|
|
try {
|
|
const toolOutputs =
|
|
data.required_action.submit_tool_outputs.tool_calls.map((toolCall) => {
|
|
// Call the function
|
|
switch (toolCall.function.name) {
|
|
case "getCurrentTemperature":
|
|
return {
|
|
tool_call_id: toolCall.id,
|
|
output: "57",
|
|
};
|
|
}
|
|
});
|
|
// Submit all the tool outputs at the same time
|
|
await this.submitToolOutputs(toolOutputs, runId, threadId);
|
|
} catch (error) {
|
|
console.error("Error processing required action:", error);
|
|
}
|
|
}
|
|
|
|
async submitToolOutputs(toolOutputs, runId, threadId) {
|
|
try {
|
|
// Use the submitToolOutputsStream helper
|
|
const stream = this.client.beta.threads.runs.submitToolOutputsStream(
|
|
threadId,
|
|
runId,
|
|
{ tool_outputs: toolOutputs },
|
|
);
|
|
for await (const event of stream) {
|
|
this.emit("event", event);
|
|
}
|
|
} catch (error) {
|
|
console.error("Error submitting tool outputs:", error);
|
|
}
|
|
}
|
|
}
|
|
|
|
const eventHandler = new EventHandler(openai);
|
|
eventHandler.on("event", eventHandler.onEvent.bind(eventHandler));
|
|
|
|
export const gptHandler = async (additionalMessages) => {
|
|
const thread = await openai.beta.threads.create();
|
|
|
|
// Add the additional messages to the conversation
|
|
for (const msgObj of additionalMessages) {
|
|
await openai.beta.threads.messages.create(thread.id, msgObj);
|
|
}
|
|
|
|
log.DEBUG("AI Conversation:", thread);
|
|
|
|
// Run the thread to get a response
|
|
try {
|
|
const stream = await openai.beta.threads.runs.stream(
|
|
thread.id,
|
|
{ assistant_id: assistant.id },
|
|
eventHandler,
|
|
);
|
|
|
|
for await (const event of stream) {
|
|
eventHandler.emit("event", event);
|
|
}
|
|
|
|
let response;
|
|
const messages = await openai.beta.threads.messages.list(thread.id);
|
|
response = messages.data[0].content[0].text.value;
|
|
|
|
log.DEBUG("AI Response:", response);
|
|
|
|
if (!response) {
|
|
return false;
|
|
}
|
|
|
|
return response;
|
|
} catch (error) {
|
|
console.error("Error generating response:", error);
|
|
return false;
|
|
}
|
|
};
|