From 294d8897f77dfa3c00156c3703c712cb16ed1c10 Mon Sep 17 00:00:00 2001 From: Logan Cusano Date: Sun, 26 Feb 2023 22:09:16 -0500 Subject: [PATCH] Add pricing command --- commands/pricing.js | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 commands/pricing.js diff --git a/commands/pricing.js b/commands/pricing.js new file mode 100644 index 0000000..afc0601 --- /dev/null +++ b/commands/pricing.js @@ -0,0 +1,35 @@ +const { SlashCommandBuilder } = require('discord.js'); +const { DebugBuilder } = require("../utilities/debugBuilder"); +const log = new DebugBuilder("server", "pricing"); + +const { EmmeliaEmbedBuilder } = require("../libUtils"); + +module.exports = { + data: new SlashCommandBuilder() + .setName('pricing') + .setDescription('Replies with the pricing for tokens'), + example: "pricing", + isPrivileged: false, + requiresTokens: false, + defaultTokenUsage: 0, + deferInitialReply: false, + async execute(interaction) { + try{ + const pricingEmbed = new EmmeliaEmbedBuilder() + .setColor(0x0099FF) + .setTitle(`Emmelia's Pricing`) + .addFields( + { name: 'Tokens', value: `Tokens are a shared currency that is used between all AI models. Each model is charges tokens differently however, so do keep this in mind. $1 = 45,000 tokens` }, + { name: 'Text (ChatGPT)', value: `Tokens are used in the prompt and in the response of a generation. The max tokens will not be breached by the combined prompt and response. Keep this is mind when using text generations. Each syllable is one token. This section is 50 tokens.` }, + { name: 'Images (DALL-E)', value: `Tokens are used for each generation, variation, and upscale. The image size also affects the amount of tokens used: 256px = 800 tokens, 512px = 900 tokens, 1024px = 1000 tokens` } + ) + .setTimestamp() + + await interaction.reply({ embeds: [pricingEmbed] }); + + }catch(err){ + log.ERROR(err) + //await interaction.reply(err.toString()); + } + } +}; \ No newline at end of file