const { DebugBuilder } = require("../utilities/debugBuilder"); const log = new DebugBuilder("server", "openAiController"); const crypto = require('crypto') const { createTransaction } = require("./transactionController"); const { authorizeTokenUsage } = require("../middleware/balanceAuthorization"); const { encode } = require("gpt-3-encoder") const { Configuration, OpenAIApi } = require('openai'); const configuration = new Configuration({ organization: process.env.OPENAI_ORG, apiKey: process.env.OPENAI_KEY }); const openai = new OpenAIApi(configuration); async function getImageGeneration(_prompt, { _images_to_generate = 1, _image_size = "256x256" }, callback){ const validImageSizes = ["256x256", "512x512", "1024x1024"]; if (!_prompt) callback(new Error("No prompt given before generating image"), undefined); if (!validImageSizes.includes(_image_size)) callback(new Error("Image size given is not valid, valid size: ", validImageSizes)); if (!_images_to_generate || _images_to_generate === 0 || _images_to_generate > 10) callback(new Error("Invalid image count given")); // Calculate token usage? log.DEBUG("Getting image generation with these properties: ", _prompt, _images_to_generate, _image_size) try{ const response = await openai.createImage({ prompt: _prompt, n: _images_to_generate, size: _image_size }) if(!response?.data) return callback(new Error("Error in response data: ", response)); return callback(undefined, response.data); } catch (err){ log.ERROR(err); log.ERROR("Error when handing image model request"); return callback(err, undefined); } } /** * Get the response from GPT with the specified parameters * * @param {*} _prompt The text prompt to send to the model * @param {*} callback The callback to call with errors or results * @param {*} param2 Any parameters the user has changed for this request * @returns */ async function getTextGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) { // If the temperature is set to null _temperature = _temperature ?? 0; // If the tokens are set to null _max_tokens = _max_tokens ?? 100; const encodedPrompt = encode(_prompt); const promptTokens = encodedPrompt.length; log.DEBUG("Tokens in prompt: ", promptTokens); if (promptTokens >= _max_tokens) return callback(new Error("Tokens of request are greater than the set max tokens", promptTokens, _max_tokens)); _max_tokens = _max_tokens - promptTokens; log.DEBUG("Updated max tokens: ", _max_tokens); log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens) try{ const response = await openai.createCompletion({ model: _model, prompt: _prompt, temperature: _temperature, max_tokens: _max_tokens }); if(!response?.data) return callback(new Error("Error in response data: ", response)); return callback(undefined, response.data); } catch (err){ log.ERROR("Error when handing text model request: ", err); return callback(err, undefined); } //var responseData = response.data.choices[0].text; } /** * Use ChatGPT to generate a response * * @param {*} _prompt The use submitted text prompt * @param {*} param1 Default parameters can be modified * @returns */ exports.submitTextPromptTransaction = async (prompt, temperature, max_tokens, discord_account_id, interaction, command, callback) => { getTextGeneration(prompt, (err, gptResult) => { if (err) callback(err, undefined); // TODO - Use the pricing table to calculate discord tokens log.DEBUG("GPT Response", gptResult); const discordTokensUsed = gptResult.usage.total_tokens; if (gptResult){ createTransaction(gptResult.id, discord_account_id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => { if (err) callback(err, undefined); if (transactionResult){ log.DEBUG("Transaction Created: ", transactionResult); callback(undefined, ({ promptResult: gptResult.choices[0].text, totalTokens: discordTokensUsed})); } }); } }, { _temperature: temperature, _max_tokens: max_tokens }); } /** * Wrapper to generate an image from a prompt and params and store this information in a transaction * * @param {*} prompt The prompt of the image * @param {*} images_to_generate The number of images to generate * @param {*} image_size The size of the image ["256x256" | "512x512" | "1024x1024"] * @param {*} callback */ exports.submitImagePromptTransaction = async (prompt, discord_account_id, images_to_generate, image_size, interaction, command, callback) => { let pricePerImage = 800; log.DEBUG(image_size) switch(image_size){ case "1024x1024": log.DEBUG("1024 selected"); pricePerImage = 1000; break; case "512x512": pricePerImage = 900; log.DEBUG("512 selected"); break; case "256x256": log.DEBUG("256 selected"); pricePerImage = 800; break; default: log.DEBUG("256px defaulted"); pricePerImage = 800; break; } if (!images_to_generate) images_to_generate = 1; if (!image_size) images_to_generate = "256x256"; totalTokensToBeUsed = pricePerImage * images_to_generate; log.DEBUG("Total tokens to be used", totalTokensToBeUsed, pricePerImage, images_to_generate); authorizeTokenUsage(interaction, command, totalTokensToBeUsed, (isAuthorized) => { if (isAuthorized) { getImageGeneration(prompt, { _image_size: image_size, _images_to_generate: images_to_generate }, (err, dalleResult) => { if (err) callback(err, undefined); // TODO - Use the pricing table to calculate discord tokens log.DEBUG("DALL-E Result", dalleResult); const dalleResultHash = crypto.createHash('sha1').update(JSON.stringify({ discord_account_id : prompt, images_to_generate: image_size })).digest('hex') if (dalleResult){ createTransaction(dalleResultHash, discord_account_id, totalTokensToBeUsed, totalTokensToBeUsed, 2, async (err, transactionResult) => { if (err) callback(err, undefined); if (transactionResult){ log.DEBUG("Transaction Created: ", transactionResult); callback(undefined, ({ results: dalleResult, totalTokens: totalTokensToBeUsed})); } }); } }); } }) }