From a4d5b2588f4daec158a147effd68ba2a5b0dca1d Mon Sep 17 00:00:00 2001
From: timothycarambat <rambat1010@gmail.com>
Date: Mon, 3 Feb 2025 14:19:21 -0800
Subject: [PATCH] o3 model patch

---
 .../AgentConfig/AgentModelSelection/index.jsx     |  2 ++
 server/utils/AiProviders/modelMap.js              |  3 +++
 server/utils/AiProviders/openAi/index.js          | 15 ++++++++-------
 server/utils/helpers/customModels.js              |  2 +-
 4 files changed, 14 insertions(+), 8 deletions(-)

diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
index 713ceb4ac..c84014ef1 100644
--- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
+++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx
@@ -24,6 +24,8 @@ function supportedModel(provider, model = "") {
         "o1-preview-2024-09-12",
         "o1-mini",
         "o1-mini-2024-09-12",
+        "o3-mini",
+        "o3-mini-2025-01-31",
       ].includes(model) === false
     );
   }
diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js
index 153285d4c..3fdabc1cc 100644
--- a/server/utils/AiProviders/modelMap.js
+++ b/server/utils/AiProviders/modelMap.js
@@ -66,10 +66,13 @@ const MODEL_MAP = {
     "o1-preview-2024-09-12": 128_000,
     "o1-mini": 128_000,
     "o1-mini-2024-09-12": 128_000,
+    "o3-mini": 200_000,
+    "o3-mini-2025-01-31": 200_000,
   },
   deepseek: {
     "deepseek-chat": 128_000,
     "deepseek-coder": 128_000,
+    "deepseek-reasoner": 128_000,
   },
   xai: {
     "grok-beta": 131_072,
diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js
index 71a6a0edf..587dad81f 100644
--- a/server/utils/AiProviders/openAi/index.js
+++ b/server/utils/AiProviders/openAi/index.js
@@ -31,8 +31,8 @@ class OpenAiLLM {
    * Check if the model is an o1 model.
    * @returns {boolean}
    */
-  get isO1Model() {
-    return this.model.startsWith("o1");
+  get isOTypeModel() {
+    return this.model.startsWith("o");
   }
 
   #appendContext(contextTexts = []) {
@@ -48,7 +48,8 @@ class OpenAiLLM {
   }
 
   streamingEnabled() {
-    if (this.isO1Model) return false;
+    // o3-mini is the only o-type model that supports streaming
+    if (this.isOTypeModel && this.model !== "o3-mini") return false;
     return "streamGetChatCompletion" in this;
   }
 
@@ -68,7 +69,7 @@ class OpenAiLLM {
   async isValidChatCompletionModel(modelName = "") {
     const isPreset =
       modelName.toLowerCase().includes("gpt") ||
-      modelName.toLowerCase().includes("o1");
+      modelName.toLowerCase().startsWith("o");
     if (isPreset) return true;
 
     const model = await this.openai.models
@@ -117,7 +118,7 @@ class OpenAiLLM {
     // in order to combat this, we can use the "user" role as a replacement for now
     // https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880
     const prompt = {
-      role: this.isO1Model ? "user" : "system",
+      role: this.isOTypeModel ? "user" : "system",
       content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
     };
     return [
@@ -141,7 +142,7 @@ class OpenAiLLM {
         .create({
           model: this.model,
           messages,
-          temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
+          temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
         })
         .catch((e) => {
           throw new Error(e.message);
@@ -177,7 +178,7 @@ class OpenAiLLM {
         model: this.model,
         stream: true,
         messages,
-        temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
+        temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
       }),
       messages
       // runPromptTokenCalculation: true - We manually count the tokens because OpenAI does not provide them in the stream
diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js
index b3829eb4c..3f211d431 100644
--- a/server/utils/helpers/customModels.js
+++ b/server/utils/helpers/customModels.js
@@ -145,7 +145,7 @@ async function openAiModels(apiKey = null) {
     .filter(
       (model) =>
         (model.id.includes("gpt") && !model.id.startsWith("ft:")) ||
-        model.id.includes("o1")
+        model.id.startsWith("o") // o1, o1-mini, o3, etc
     )
     .filter(
       (model) =>