From 77a3c5c6fc224280ec992b28e2e4ab44e885e013 Mon Sep 17 00:00:00 2001 From: Logan Cusano Date: Sun, 26 Feb 2023 14:28:57 -0500 Subject: [PATCH] Implemented ChatGPT functionality --- commands/chat.js | 20 +++++++++-- controllers/chatGptController.js | 57 +++++++++++++------------------- events/interactionCreate.js | 5 +-- 3 files changed, 44 insertions(+), 38 deletions(-) diff --git a/commands/chat.js b/commands/chat.js index 4319e3e..956cafb 100644 --- a/commands/chat.js +++ b/commands/chat.js @@ -31,10 +31,26 @@ module.exports = { defaultTokenUsage: 100, deferInitialReply: true, async execute(interaction) { + const promptText = interaction.options.getString('prompt'); + const temperature = interaction.options.getNumber('temperature') ?? undefined; + const maxTokens = interaction.options.getNumber('tokens') ?? undefined; + const discordAccountId = interaction.member.id; try { - submitPromptTransaction(interaction, async (err, result) => { + submitPromptTransaction(promptText, temperature, maxTokens, discordAccountId, async (err, result) => { if (err) throw err; - await interaction.editReply({ content: `${interaction.member.user} ${result.promptResult}`, ephemeral: false }); + + const gptEmbed = new EmbedBuilder() + .setColor(0x0099FF) + .setTitle(`New GPT response`) + .setDescription(`${interaction.member.user} sent: '${promptText}'`) + .addFields( + { name: 'Generated Text', value: result.promptResult }, + ) + .addFields({ name: 'Tokens Used', value: `${result.totalTokens}`, inline: true }) + .setTimestamp() + .setFooter({ text: 'Brought to you by Emmelia.' }); + + await interaction.editReply({ embeds: [gptEmbed], ephemeral: false }); }); // Needs reply code to reply to the generation diff --git a/controllers/chatGptController.js b/controllers/chatGptController.js index 3c23039..0a2c3aa 100644 --- a/controllers/chatGptController.js +++ b/controllers/chatGptController.js @@ -5,41 +5,35 @@ const { createTransaction } = require("./transactionController"); const { Configuration, OpenAIApi } = require('openai'); const configuration = new Configuration({ - organization: process.env.OPENAI_ORG, - apiKey: process.env.OPENAI_KEY + organization: process.env.OPENAI_ORG, + apiKey: process.env.OPENAI_KEY }); const openai = new OpenAIApi(configuration); async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) { + // If the temperature is set to null + _temperature = _temperature ?? 0; + // If the tokens are set to null + _max_tokens = _max_tokens ?? 100; + + // TODO - Get the tokens in the message and subtract that from the max tokens to be sent to the AI + log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens) - try{ - /* + try{ const response = await openai.createCompletion({ model: _model, prompt: _prompt, temperature: _temperature, max_tokens: _max_tokens - }); - */ - - var response = { - "id": "ABD123", - "usage": { - "total_tokens": _max_tokens - }, - "data": { - "choices": [ - { - "text": "ASKLDJHASLDJALSKDJAKLSDJLASKDJALSKD" - } - ] - } - }; - return callback(undefined, response); + }); + if(!response?.data) return callback(new Error("Error in response data: ", response)); + return callback(undefined, response.data); } catch (err){ - return callback(err, undefined); + log.ERROR(err); + log.ERROR("Error when handing model request"); + //return callback(err, undefined); } //var responseData = response.data.choices[0].text; } @@ -51,30 +45,25 @@ async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _ * @param {*} param1 Default parameters can be modified * @returns */ -exports.submitPromptTransaction = async (interaction, callback) => { - var params = {}; - var promptText = interaction.options.getString('prompt'); - var temperature = interaction.options.getNumber('temperature'); - var maxTokens = interaction.options.getNumber('tokens'); +exports.submitPromptTransaction = async (prompt, temperature, max_tokens, discord_account_id, callback) => { + - if (temperature) params._temperature = temperature; - if (maxTokens) params._max_tokens = maxTokens; - - getGeneration(promptText, (err, gptResult) => { + getGeneration(prompt, (err, gptResult) => { if (err) callback(err, undefined); // TODO - Use the pricing table to calculate discord tokens + log.DEBUG("GPT Response", gptResult); const discordTokensUsed = gptResult.usage.total_tokens; if (gptResult){ - createTransaction(gptResult.id, interaction.member.id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => { + createTransaction(gptResult.id, discord_account_id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => { if (err) callback(err, undefined); if (transactionResult){ log.DEBUG("Transaction Created: ", transactionResult); - callback(undefined, ({ promptResult: gptResult.data.choices[0].text, totalTokens: discordTokensUsed})); + callback(undefined, ({ promptResult: gptResult.choices[0].text, totalTokens: discordTokensUsed})); } }); } - }, { _temperature: temperature, _max_tokens: maxTokens }); + }, { _temperature: temperature, _max_tokens: max_tokens }); } \ No newline at end of file diff --git a/events/interactionCreate.js b/events/interactionCreate.js index b1ebb02..8e3981e 100644 --- a/events/interactionCreate.js +++ b/events/interactionCreate.js @@ -24,12 +24,13 @@ module.exports = { try { if (command.deferInitialReply) { try { - if (!interaction.options.getBool('public')) await interaction.deferReply({ ephemeral: true }); + if (interaction.options.getBool('public') && interaction.options.getBool('public') == false) await interaction.deferReply({ ephemeral: true }); + else await interaction.deferReply({ ephemeral: false }); } catch (err) { if (err instanceof TypeError) { // The public option doesn't exist in this command - await interaction.deferReply({ ephemeral: true }); + await interaction.deferReply({ ephemeral: false }); } else { throw err; }