Implemented ChatGPT functionality
This commit is contained in:
@@ -31,10 +31,26 @@ module.exports = {
|
|||||||
defaultTokenUsage: 100,
|
defaultTokenUsage: 100,
|
||||||
deferInitialReply: true,
|
deferInitialReply: true,
|
||||||
async execute(interaction) {
|
async execute(interaction) {
|
||||||
|
const promptText = interaction.options.getString('prompt');
|
||||||
|
const temperature = interaction.options.getNumber('temperature') ?? undefined;
|
||||||
|
const maxTokens = interaction.options.getNumber('tokens') ?? undefined;
|
||||||
|
const discordAccountId = interaction.member.id;
|
||||||
try {
|
try {
|
||||||
submitPromptTransaction(interaction, async (err, result) => {
|
submitPromptTransaction(promptText, temperature, maxTokens, discordAccountId, async (err, result) => {
|
||||||
if (err) throw err;
|
if (err) throw err;
|
||||||
await interaction.editReply({ content: `${interaction.member.user} ${result.promptResult}`, ephemeral: false });
|
|
||||||
|
const gptEmbed = new EmbedBuilder()
|
||||||
|
.setColor(0x0099FF)
|
||||||
|
.setTitle(`New GPT response`)
|
||||||
|
.setDescription(`${interaction.member.user} sent: '${promptText}'`)
|
||||||
|
.addFields(
|
||||||
|
{ name: 'Generated Text', value: result.promptResult },
|
||||||
|
)
|
||||||
|
.addFields({ name: 'Tokens Used', value: `${result.totalTokens}`, inline: true })
|
||||||
|
.setTimestamp()
|
||||||
|
.setFooter({ text: 'Brought to you by Emmelia.' });
|
||||||
|
|
||||||
|
await interaction.editReply({ embeds: [gptEmbed], ephemeral: false });
|
||||||
});
|
});
|
||||||
|
|
||||||
// Needs reply code to reply to the generation
|
// Needs reply code to reply to the generation
|
||||||
|
|||||||
@@ -5,41 +5,35 @@ const { createTransaction } = require("./transactionController");
|
|||||||
|
|
||||||
const { Configuration, OpenAIApi } = require('openai');
|
const { Configuration, OpenAIApi } = require('openai');
|
||||||
const configuration = new Configuration({
|
const configuration = new Configuration({
|
||||||
organization: process.env.OPENAI_ORG,
|
organization: process.env.OPENAI_ORG,
|
||||||
apiKey: process.env.OPENAI_KEY
|
apiKey: process.env.OPENAI_KEY
|
||||||
});
|
});
|
||||||
|
|
||||||
const openai = new OpenAIApi(configuration);
|
const openai = new OpenAIApi(configuration);
|
||||||
|
|
||||||
|
|
||||||
async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) {
|
async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) {
|
||||||
|
// If the temperature is set to null
|
||||||
|
_temperature = _temperature ?? 0;
|
||||||
|
// If the tokens are set to null
|
||||||
|
_max_tokens = _max_tokens ?? 100;
|
||||||
|
|
||||||
|
// TODO - Get the tokens in the message and subtract that from the max tokens to be sent to the AI
|
||||||
|
|
||||||
log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens)
|
log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens)
|
||||||
try{
|
try{
|
||||||
/*
|
|
||||||
const response = await openai.createCompletion({
|
const response = await openai.createCompletion({
|
||||||
model: _model,
|
model: _model,
|
||||||
prompt: _prompt,
|
prompt: _prompt,
|
||||||
temperature: _temperature,
|
temperature: _temperature,
|
||||||
max_tokens: _max_tokens
|
max_tokens: _max_tokens
|
||||||
});
|
});
|
||||||
*/
|
if(!response?.data) return callback(new Error("Error in response data: ", response));
|
||||||
|
return callback(undefined, response.data);
|
||||||
var response = {
|
|
||||||
"id": "ABD123",
|
|
||||||
"usage": {
|
|
||||||
"total_tokens": _max_tokens
|
|
||||||
},
|
|
||||||
"data": {
|
|
||||||
"choices": [
|
|
||||||
{
|
|
||||||
"text": "ASKLDJHASLDJALSKDJAKLSDJLASKDJALSKD"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return callback(undefined, response);
|
|
||||||
} catch (err){
|
} catch (err){
|
||||||
return callback(err, undefined);
|
log.ERROR(err);
|
||||||
|
log.ERROR("Error when handing model request");
|
||||||
|
//return callback(err, undefined);
|
||||||
}
|
}
|
||||||
//var responseData = response.data.choices[0].text;
|
//var responseData = response.data.choices[0].text;
|
||||||
}
|
}
|
||||||
@@ -51,30 +45,25 @@ async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _
|
|||||||
* @param {*} param1 Default parameters can be modified
|
* @param {*} param1 Default parameters can be modified
|
||||||
* @returns
|
* @returns
|
||||||
*/
|
*/
|
||||||
exports.submitPromptTransaction = async (interaction, callback) => {
|
exports.submitPromptTransaction = async (prompt, temperature, max_tokens, discord_account_id, callback) => {
|
||||||
var params = {};
|
|
||||||
var promptText = interaction.options.getString('prompt');
|
|
||||||
var temperature = interaction.options.getNumber('temperature');
|
|
||||||
var maxTokens = interaction.options.getNumber('tokens');
|
|
||||||
|
|
||||||
if (temperature) params._temperature = temperature;
|
getGeneration(prompt, (err, gptResult) => {
|
||||||
if (maxTokens) params._max_tokens = maxTokens;
|
|
||||||
|
|
||||||
getGeneration(promptText, (err, gptResult) => {
|
|
||||||
if (err) callback(err, undefined);
|
if (err) callback(err, undefined);
|
||||||
|
|
||||||
// TODO - Use the pricing table to calculate discord tokens
|
// TODO - Use the pricing table to calculate discord tokens
|
||||||
|
log.DEBUG("GPT Response", gptResult);
|
||||||
const discordTokensUsed = gptResult.usage.total_tokens;
|
const discordTokensUsed = gptResult.usage.total_tokens;
|
||||||
|
|
||||||
if (gptResult){
|
if (gptResult){
|
||||||
createTransaction(gptResult.id, interaction.member.id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => {
|
createTransaction(gptResult.id, discord_account_id, discordTokensUsed, gptResult.usage.total_tokens, 1, async (err, transactionResult) => {
|
||||||
if (err) callback(err, undefined);
|
if (err) callback(err, undefined);
|
||||||
|
|
||||||
if (transactionResult){
|
if (transactionResult){
|
||||||
log.DEBUG("Transaction Created: ", transactionResult);
|
log.DEBUG("Transaction Created: ", transactionResult);
|
||||||
callback(undefined, ({ promptResult: gptResult.data.choices[0].text, totalTokens: discordTokensUsed}));
|
callback(undefined, ({ promptResult: gptResult.choices[0].text, totalTokens: discordTokensUsed}));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}, { _temperature: temperature, _max_tokens: maxTokens });
|
}, { _temperature: temperature, _max_tokens: max_tokens });
|
||||||
}
|
}
|
||||||
@@ -24,12 +24,13 @@ module.exports = {
|
|||||||
try {
|
try {
|
||||||
if (command.deferInitialReply) {
|
if (command.deferInitialReply) {
|
||||||
try {
|
try {
|
||||||
if (!interaction.options.getBool('public')) await interaction.deferReply({ ephemeral: true });
|
if (interaction.options.getBool('public') && interaction.options.getBool('public') == false) await interaction.deferReply({ ephemeral: true });
|
||||||
|
else await interaction.deferReply({ ephemeral: false });
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
if (err instanceof TypeError) {
|
if (err instanceof TypeError) {
|
||||||
// The public option doesn't exist in this command
|
// The public option doesn't exist in this command
|
||||||
await interaction.deferReply({ ephemeral: true });
|
await interaction.deferReply({ ephemeral: false });
|
||||||
} else {
|
} else {
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user