diff --git a/commands/chat.js b/commands/chat.js index 4edb234..f4d0017 100644 --- a/commands/chat.js +++ b/commands/chat.js @@ -1,14 +1,15 @@ -const { submitPromptTransaction } = require("../controllers/chatGptController"); +const { submitTextPromptTransaction } = require("../controllers/openAiController"); const { SlashCommandBuilder } = require('discord.js'); const { DebugBuilder } = require("../utilities/debugBuilder"); const log = new DebugBuilder("server", "chat"); const { EmmeliaEmbedBuilder } = require('../libUtils'); +const COST_OF_COMMAND = 100 module.exports = { data: new SlashCommandBuilder() .setName('chat') - .setDescription('Send a text prompt to ChatGPT') + .setDescription(`Send a text prompt to ChatGPT, by default this command costs *${COST_OF_COMMAND}* tokens`) .addStringOption(option => option.setName('prompt') .setDescription('The prompt to be sent to ChatGPT') @@ -36,7 +37,7 @@ module.exports = { const maxTokens = interaction.options.getNumber('tokens') ?? undefined; const discordAccountId = interaction.member.id; try { - submitPromptTransaction(promptText, temperature, maxTokens, discordAccountId, async (err, result) => { + submitTextPromptTransaction(promptText, temperature, maxTokens, discordAccountId, async (err, result) => { if (err) throw err; const gptEmbed = new EmmeliaEmbedBuilder() diff --git a/commands/imagine.js b/commands/imagine.js new file mode 100644 index 0000000..976728f --- /dev/null +++ b/commands/imagine.js @@ -0,0 +1,72 @@ +const { submitImagePromptTransaction } = require("../controllers/openAiController"); +const { SlashCommandBuilder } = require('discord.js'); +const { DebugBuilder } = require("../utilities/debugBuilder"); +const log = new DebugBuilder("server", "chat"); +const { EmmeliaEmbedBuilder } = require('../libUtils'); + +const COST_OF_COMMAND = 100 + +module.exports = { + data: new SlashCommandBuilder() + .setName('imagine') + .setDescription(`Submit an image generation prompt to DALL-E, by default this command costs *${COST_OF_COMMAND}* tokens`) + .addStringOption(option => + option.setName('prompt') + .setDescription('The prompt to be sent to DALL-E') + .setRequired(true)) + .addBooleanOption(option => + option.setName('public') + .setDescription("Set this to false if you would like the message to only be visible to you. *defaults to public*") + .setRequired(false)) + .addNumberOption(option => + option.setName('images') + .setDescription('The number of images you wish to generate [1 - 10] *(defaults to 1)*') + .setRequired(false)) + .addStringOption(option => + option.setName('size') + .setDescription('The size of the images to be generated *defaults to 256px*') + .addChoices( + { name: '1024px', value: '1024x1024' }, + { name: '512px', value: '512x512' }, + { name: '256px', value: '256x256' }, + ) + .setRequired(false)), + example: "imagine [the sinking of the titanic on acid] [4] [", // Need to figure out the tokens + isPrivileged: false, + requiresTokens: true, + defaultTokenUsage: COST_OF_COMMAND, + deferInitialReply: true, + async execute(interaction) { + const promptText = interaction.options.getString('prompt'); + const images = interaction.options.getNumber('images') ?? undefined; + const size = interaction.options.getString('size') ?? undefined; + const discordAccountId = interaction.member.id; + try { + submitImagePromptTransaction(promptText, discordAccountId, images, size, async (err, imageResults) => { + if (err) throw err; + + log.DEBUG("Image Results: ", imageResults) + + const dalleEmbed = new EmmeliaEmbedBuilder() + .setColor(0x0099FF) + .setTitle(`New Image Result`) + .setDescription(`${interaction.member.user} sent the prompt: '${promptText}'`) + .addFields({ name: 'Tokens Used', value: `${this.defaultTokenUsage}`, inline: true }) + .setTimestamp() + + const imagesInResult = Array(imageResults.results.data).length + + log.DEBUG("Images in the result: ", imagesInResult); + + if (imagesInResult == 1) dalleEmbed.setImage(imageResults.results.data[0].url); + + await interaction.editReply({ embeds: [dalleEmbed], ephemeral: false }); + }); + + // Needs reply code to reply to the generation + }catch(err){ + log.ERROR(err) + //await interaction.reply(err.toString()); + } + } +}; \ No newline at end of file diff --git a/controllers/chatGptController.js b/controllers/chatGptController.js deleted file mode 100644 index f9b6c07..0000000 --- a/controllers/chatGptController.js +++ /dev/null @@ -1,73 +0,0 @@ -const { DebugBuilder } = require("../utilities/debugBuilder"); -const log = new DebugBuilder("server", "chatGptController"); - -const { createTransaction } = require("./transactionController"); - -const { encode } = require("gpt-3-encoder") -const { Configuration, OpenAIApi } = require('openai'); -const configuration = new Configuration({ - organization: process.env.OPENAI_ORG, - apiKey: process.env.OPENAI_KEY -}); - -const openai = new OpenAIApi(configuration); - -async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) { - // If the temperature is set to null - _temperature = _temperature ?? 0; - // If the tokens are set to null - _max_tokens = _max_tokens ?? 100; - - const encodedPrompt = encode(_prompt); - const promptTokens = encodedPrompt.length; - log.DEBUG("Tokens in prompt: ", promptTokens); - _max_tokens = _max_tokens - promptTokens; - log.DEBUG("Updated max tokens: ", _max_tokens); - - log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens) - try{ - const response = await openai.createCompletion({ - model: _model, - prompt: _prompt, - temperature: _temperature, - max_tokens: _max_tokens - }); - if(!response?.data) return callback(new Error("Error in response data: ", response)); - return callback(undefined, response.data); - } catch (err){ - log.ERROR(err); - log.ERROR("Error when handing model request"); - //return callback(err, undefined); - } - //var responseData = response.data.choices[0].text; -} - -/** - * Use ChatGPT to generate a response - * - * @param {*} _prompt The use submitted text prompt - * @param {*} param1 Default parameters can be modified - * @returns - */ -exports.submitPromptTransaction = async (prompt, temperature, max_tokens, discord_account_id, callback) => { - - - getGeneration(prompt, (err, gptResult) => { - if (err) callback(err, undefined); - - // TODO - Use the pricing table to calculate discord tokens - log.DEBUG("GPT Response", gptResult); - const discordTokensUsed = gptResult.usage.total_tokens; - - if (gptResult){ - createTransaction(gptResult.id, discord_account_id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => { - if (err) callback(err, undefined); - - if (transactionResult){ - log.DEBUG("Transaction Created: ", transactionResult); - callback(undefined, ({ promptResult: gptResult.choices[0].text, totalTokens: discordTokensUsed})); - } - }); - } - }, { _temperature: temperature, _max_tokens: max_tokens }); - } \ No newline at end of file diff --git a/controllers/openAiController.js b/controllers/openAiController.js new file mode 100644 index 0000000..0cf6a64 --- /dev/null +++ b/controllers/openAiController.js @@ -0,0 +1,144 @@ +const { DebugBuilder } = require("../utilities/debugBuilder"); +const log = new DebugBuilder("server", "openAiController"); +const crypto = require('crypto') + +const { createTransaction } = require("./transactionController"); + +const { encode } = require("gpt-3-encoder") +const { Configuration, OpenAIApi } = require('openai'); +const configuration = new Configuration({ + organization: process.env.OPENAI_ORG, + apiKey: process.env.OPENAI_KEY +}); + +const openai = new OpenAIApi(configuration); + +async function getImageGeneration(_prompt, { _images_to_generate = 1, _image_size = "256x256" }, callback){ + const validImageSizes = ["256x256", "512x512", "1024x1024"]; + + if (!_prompt) callback(new Error("No prompt given before generating image"), undefined); + if (!validImageSizes.includes(_image_size)) callback(new Error("Image size given is not valid, valid size: ", validImageSizes)); + if (!_images_to_generate || _images_to_generate === 0 || _images_to_generate > 10) callback(new Error("Invalid image count given")); + + // Calculate token usage? + + log.DEBUG("Getting image generation with these properties: ", _prompt, _images_to_generate, _image_size) + try{ + const response = await openai.createImage({ + prompt: _prompt, + n: _images_to_generate, + size: _image_size + }) + + + if(!response?.data) return callback(new Error("Error in response data: ", response)); + return callback(undefined, response.data); + } catch (err){ + log.ERROR(err); + log.ERROR("Error when handing image model request"); + return callback(err, undefined); + } +} + +/** + * Get the response from GPT with the specified parameters + * + * @param {*} _prompt The text prompt to send to the model + * @param {*} callback The callback to call with errors or results + * @param {*} param2 Any parameters the user has changed for this request + * @returns + */ +async function getTextGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) { + // If the temperature is set to null + _temperature = _temperature ?? 0; + // If the tokens are set to null + _max_tokens = _max_tokens ?? 100; + + const encodedPrompt = encode(_prompt); + const promptTokens = encodedPrompt.length; + log.DEBUG("Tokens in prompt: ", promptTokens); + if (promptTokens >= _max_tokens) return callback(new Error("Tokens of request are greater than the set max tokens", promptTokens, _max_tokens)); + + _max_tokens = _max_tokens - promptTokens; + log.DEBUG("Updated max tokens: ", _max_tokens); + + log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens) + try{ + const response = await openai.createCompletion({ + model: _model, + prompt: _prompt, + temperature: _temperature, + max_tokens: _max_tokens + }); + if(!response?.data) return callback(new Error("Error in response data: ", response)); + return callback(undefined, response.data); + } catch (err){ + log.ERROR("Error when handing text model request: ", err); + return callback(err, undefined); + } + //var responseData = response.data.choices[0].text; +} + +/** + * Use ChatGPT to generate a response + * + * @param {*} _prompt The use submitted text prompt + * @param {*} param1 Default parameters can be modified + * @returns + */ +exports.submitTextPromptTransaction = async (prompt, temperature, max_tokens, discord_account_id, callback) => { + + getTextGeneration(prompt, (err, gptResult) => { + if (err) callback(err, undefined); + + // TODO - Use the pricing table to calculate discord tokens + log.DEBUG("GPT Response", gptResult); + const discordTokensUsed = gptResult.usage.total_tokens; + + if (gptResult){ + createTransaction(gptResult.id, discord_account_id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => { + if (err) callback(err, undefined); + + if (transactionResult){ + log.DEBUG("Transaction Created: ", transactionResult); + callback(undefined, ({ promptResult: gptResult.choices[0].text, totalTokens: discordTokensUsed})); + } + }); + } + }, { _temperature: temperature, _max_tokens: max_tokens }); + } + +/** + * Wrapper to generate an image from a prompt and params and store this information in a transaction + * + * @param {*} prompt The prompt of the image + * @param {*} images_to_generate The number of images to generate + * @param {*} image_size The size of the image ["256x256" | "512x512" | "1024x1024"] + * @param {*} callback + */ + exports.submitImagePromptTransaction = async (prompt, discord_account_id, images_to_generate, image_size, callback) => { + + getImageGeneration(prompt, { + _image_size: image_size, + _images_to_generate: images_to_generate + }, (err, dalleResult) => { + if (err) callback(err, undefined); + + // TODO - Use the pricing table to calculate discord tokens + log.DEBUG("DALL-E Result", dalleResult); + const discordTokensUsed = 100; + const providerTokensUsed = 100; + const dalleResultHash = crypto.createHash('sha1').update(JSON.stringify({ discord_account_id : prompt, images_to_generate: image_size })).digest('hex') + + if (dalleResult){ + createTransaction(dalleResultHash, discord_account_id, discordTokensUsed, providerTokensUsed, 2, async (err, transactionResult) => { + if (err) callback(err, undefined); + + if (transactionResult){ + log.DEBUG("Transaction Created: ", transactionResult); + callback(undefined, ({ results: dalleResult, totalTokens: discordTokensUsed})); + } + }); + } + }); +} \ No newline at end of file