Added new module to tokenize prompts
- Used to subtract prompt tokens from max tokens
This commit is contained in:
@@ -3,6 +3,7 @@ const log = new DebugBuilder("server", "chatGptController");
|
||||
|
||||
const { createTransaction } = require("./transactionController");
|
||||
|
||||
const { encode } = require("gpt-3-encoder")
|
||||
const { Configuration, OpenAIApi } = require('openai');
|
||||
const configuration = new Configuration({
|
||||
organization: process.env.OPENAI_ORG,
|
||||
@@ -11,14 +12,15 @@ const configuration = new Configuration({
|
||||
|
||||
const openai = new OpenAIApi(configuration);
|
||||
|
||||
|
||||
async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) {
|
||||
// If the temperature is set to null
|
||||
_temperature = _temperature ?? 0;
|
||||
// If the tokens are set to null
|
||||
_max_tokens = _max_tokens ?? 100;
|
||||
|
||||
// TODO - Get the tokens in the message and subtract that from the max tokens to be sent to the AI
|
||||
const encodedPrompt = encode(_prompt);
|
||||
const promptTokens = encodedPrompt.length;
|
||||
_max_tokens = _max_tokens - promptTokens;
|
||||
|
||||
log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens)
|
||||
try{
|
||||
|
||||
Reference in New Issue
Block a user