Added new module to tokenize prompts
- Used to subtract prompt tokens from max tokens
This commit is contained in:
@@ -3,6 +3,7 @@ const log = new DebugBuilder("server", "chatGptController");
|
||||
|
||||
const { createTransaction } = require("./transactionController");
|
||||
|
||||
const { encode } = require("gpt-3-encoder")
|
||||
const { Configuration, OpenAIApi } = require('openai');
|
||||
const configuration = new Configuration({
|
||||
organization: process.env.OPENAI_ORG,
|
||||
@@ -11,14 +12,15 @@ const configuration = new Configuration({
|
||||
|
||||
const openai = new OpenAIApi(configuration);
|
||||
|
||||
|
||||
async function getGeneration(_prompt, callback, { _model = "text-davinci-003", _temperature = 0, _max_tokens = 100}) {
|
||||
// If the temperature is set to null
|
||||
_temperature = _temperature ?? 0;
|
||||
// If the tokens are set to null
|
||||
_max_tokens = _max_tokens ?? 100;
|
||||
|
||||
// TODO - Get the tokens in the message and subtract that from the max tokens to be sent to the AI
|
||||
const encodedPrompt = encode(_prompt);
|
||||
const promptTokens = encodedPrompt.length;
|
||||
_max_tokens = _max_tokens - promptTokens;
|
||||
|
||||
log.DEBUG("Getting chat with these properties: ", _prompt, _model, _temperature, _max_tokens)
|
||||
try{
|
||||
|
||||
@@ -24,7 +24,8 @@
|
||||
"ejs": "~2.6.1",
|
||||
"http-errors": "~1.6.3",
|
||||
"morgan": "~1.9.1",
|
||||
"node-html-markdown": "~1.3.0"
|
||||
"node-html-markdown": "~1.3.0",
|
||||
"gpt-3-encoder": "~1.1.4"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1",
|
||||
|
||||
Reference in New Issue
Block a user