From 0a6a9e40c13452dfa1c6a1b8ef97e4b2c2710375 Mon Sep 17 00:00:00 2001
From: Sean Hatfield <seanhatfield5@gmail.com>
Date: Fri, 10 May 2024 14:49:02 -0700
Subject: [PATCH] [FIX] Add max tokens field to generic OpenAI LLM connector
 (#1345)

* add max tokens field to generic openai llm connector

* add max_tokens property to generic openai agent provider
---
 .../LLMSelection/GenericOpenAiOptions/index.jsx   | 15 +++++++++++++++
 server/models/systemSettings.js                   |  1 +
 server/utils/AiProviders/genericOpenAi/index.js   |  3 +++
 .../agents/aibitat/providers/genericOpenAi.js     |  2 ++
 server/utils/helpers/updateENV.js                 |  4 ++++
 5 files changed, 25 insertions(+)

diff --git a/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx b/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
index 8f5a00b66..ac143e94a 100644
--- a/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
+++ b/frontend/src/components/LLMSelection/GenericOpenAiOptions/index.jsx
@@ -61,6 +61,21 @@ export default function GenericOpenAiOptions({ settings }) {
           autoComplete="off"
         />
       </div>
+      <div className="flex flex-col w-60">
+        <label className="text-white text-sm font-semibold block mb-4">
+          Max Tokens
+        </label>
+        <input
+          type="number"
+          name="GenericOpenAiMaxTokens"
+          className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:border-white block w-full p-2.5"
+          placeholder="Max tokens per request (eg: 1024)"
+          min={1}
+          defaultValue={settings?.GenericOpenAiMaxTokens || 1024}
+          required={true}
+          autoComplete="off"
+        />
+      </div>
     </div>
   );
 }
diff --git a/server/models/systemSettings.js b/server/models/systemSettings.js
index 9ac41db0c..21d7af217 100644
--- a/server/models/systemSettings.js
+++ b/server/models/systemSettings.js
@@ -373,6 +373,7 @@ const SystemSettings = {
       GenericOpenAiModelPref: process.env.GENERIC_OPEN_AI_MODEL_PREF,
       GenericOpenAiTokenLimit: process.env.GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT,
       GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
+      GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
 
       // Cohere API Keys
       CohereApiKey: !!process.env.COHERE_API_KEY,
diff --git a/server/utils/AiProviders/genericOpenAi/index.js b/server/utils/AiProviders/genericOpenAi/index.js
index 8c171b679..686d4c677 100644
--- a/server/utils/AiProviders/genericOpenAi/index.js
+++ b/server/utils/AiProviders/genericOpenAi/index.js
@@ -18,6 +18,7 @@ class GenericOpenAiLLM {
     });
     this.model =
       modelPreference ?? process.env.GENERIC_OPEN_AI_MODEL_PREF ?? null;
+    this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
     if (!this.model)
       throw new Error("GenericOpenAI must have a valid model set.");
     this.limits = {
@@ -94,6 +95,7 @@ class GenericOpenAiLLM {
         model: this.model,
         messages,
         temperature,
+        max_tokens: this.maxTokens,
       })
       .catch((e) => {
         throw new Error(e.response.data.error.message);
@@ -110,6 +112,7 @@ class GenericOpenAiLLM {
       stream: true,
       messages,
       temperature,
+      max_tokens: this.maxTokens,
     });
     return streamRequest;
   }
diff --git a/server/utils/agents/aibitat/providers/genericOpenAi.js b/server/utils/agents/aibitat/providers/genericOpenAi.js
index 3521bc7d0..e41476d2a 100644
--- a/server/utils/agents/aibitat/providers/genericOpenAi.js
+++ b/server/utils/agents/aibitat/providers/genericOpenAi.js
@@ -24,6 +24,7 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
     this._client = client;
     this.model = model;
     this.verbose = true;
+    this.maxTokens = process.env.GENERIC_OPEN_AI_MAX_TOKENS ?? 1024;
   }
 
   get client() {
@@ -36,6 +37,7 @@ class GenericOpenAiProvider extends InheritMultiple([Provider, UnTooled]) {
         model: this.model,
         temperature: 0,
         messages,
+        max_tokens: this.maxTokens,
       })
       .then((result) => {
         if (!result.hasOwnProperty("choices"))
diff --git a/server/utils/helpers/updateENV.js b/server/utils/helpers/updateENV.js
index 11be3db80..39223c334 100644
--- a/server/utils/helpers/updateENV.js
+++ b/server/utils/helpers/updateENV.js
@@ -173,6 +173,10 @@ const KEY_MAPPING = {
     envKey: "GENERIC_OPEN_AI_API_KEY",
     checks: [],
   },
+  GenericOpenAiMaxTokens: {
+    envKey: "GENERIC_OPEN_AI_MAX_TOKENS",
+    checks: [nonZero],
+  },
 
   EmbeddingEngine: {
     envKey: "EMBEDDING_ENGINE",