o3 model patch

This commit is contained in:
timothycarambat 2025-02-03 14:19:21 -08:00
parent 121fbea284
commit a4d5b2588f
4 changed files with 14 additions and 8 deletions
frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection
server/utils
AiProviders
helpers

View file

@ -24,6 +24,8 @@ function supportedModel(provider, model = "") {
"o1-preview-2024-09-12",
"o1-mini",
"o1-mini-2024-09-12",
"o3-mini",
"o3-mini-2025-01-31",
].includes(model) === false
);
}

View file

@ -66,10 +66,13 @@ const MODEL_MAP = {
"o1-preview-2024-09-12": 128_000,
"o1-mini": 128_000,
"o1-mini-2024-09-12": 128_000,
"o3-mini": 200_000,
"o3-mini-2025-01-31": 200_000,
},
deepseek: {
"deepseek-chat": 128_000,
"deepseek-coder": 128_000,
"deepseek-reasoner": 128_000,
},
xai: {
"grok-beta": 131_072,

View file

@ -31,8 +31,8 @@ class OpenAiLLM {
* Check if the model is an o1 model.
* @returns {boolean}
*/
get isO1Model() {
return this.model.startsWith("o1");
get isOTypeModel() {
return this.model.startsWith("o");
}
#appendContext(contextTexts = []) {
@ -48,7 +48,8 @@ class OpenAiLLM {
}
streamingEnabled() {
if (this.isO1Model) return false;
// o3-mini is the only o-type model that supports streaming
if (this.isOTypeModel && this.model !== "o3-mini") return false;
return "streamGetChatCompletion" in this;
}
@ -68,7 +69,7 @@ class OpenAiLLM {
async isValidChatCompletionModel(modelName = "") {
const isPreset =
modelName.toLowerCase().includes("gpt") ||
modelName.toLowerCase().includes("o1");
modelName.toLowerCase().startsWith("o");
if (isPreset) return true;
const model = await this.openai.models
@ -117,7 +118,7 @@ class OpenAiLLM {
// in order to combat this, we can use the "user" role as a replacement for now
// https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880
const prompt = {
role: this.isO1Model ? "user" : "system",
role: this.isOTypeModel ? "user" : "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
@ -141,7 +142,7 @@ class OpenAiLLM {
.create({
model: this.model,
messages,
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
})
.catch((e) => {
throw new Error(e.message);
@ -177,7 +178,7 @@ class OpenAiLLM {
model: this.model,
stream: true,
messages,
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
temperature: this.isOTypeModel ? 1 : temperature, // o1 models only accept temperature 1
}),
messages
// runPromptTokenCalculation: true - We manually count the tokens because OpenAI does not provide them in the stream

View file

@ -145,7 +145,7 @@ async function openAiModels(apiKey = null) {
.filter(
(model) =>
(model.id.includes("gpt") && !model.id.startsWith("ft:")) ||
model.id.includes("o1")
model.id.startsWith("o") // o1, o1-mini, o3, etc
)
.filter(
(model) =>