mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2025-03-13 21:52:22 +00:00
OpenAI o1 model support (#2427)
* support openai o1 models * Prevent O1 use for agents getter for isO1Model; --------- Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
parent
6674e5aab8
commit
fa528e0cf3
4 changed files with 30 additions and 7 deletions
frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection
server/utils
|
@ -6,12 +6,19 @@ import { useTranslation } from "react-i18next";
|
|||
import { Link, useParams } from "react-router-dom";
|
||||
|
||||
// These models do NOT support function calling
|
||||
// and therefore are not supported for agents.
|
||||
function supportedModel(provider, model = "") {
|
||||
if (provider !== "openai") return true;
|
||||
return (
|
||||
["gpt-3.5-turbo-0301", "gpt-4-turbo-2024-04-09", "gpt-4-turbo"].includes(
|
||||
model
|
||||
) === false
|
||||
[
|
||||
"gpt-3.5-turbo-0301",
|
||||
"gpt-4-turbo-2024-04-09",
|
||||
"gpt-4-turbo",
|
||||
"o1-preview",
|
||||
"o1-preview-2024-09-12",
|
||||
"o1-mini",
|
||||
"o1-mini-2024-09-12",
|
||||
].includes(model) === false
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,10 @@ const MODEL_MAP = {
|
|||
"gpt-4-turbo-preview": 128_000,
|
||||
"gpt-4": 8_192,
|
||||
"gpt-4-32k": 32_000,
|
||||
"o1-preview": 128_000,
|
||||
"o1-preview-2024-09-12": 128_000,
|
||||
"o1-mini": 128_000,
|
||||
"o1-mini-2024-09-12": 128_000,
|
||||
},
|
||||
deepseek: {
|
||||
"deepseek-chat": 128_000,
|
||||
|
|
|
@ -23,6 +23,14 @@ class OpenAiLLM {
|
|||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the model is an o1 model.
|
||||
* @returns {boolean}
|
||||
*/
|
||||
get isO1Model() {
|
||||
return this.model.startsWith("o1");
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
if (!contextTexts || !contextTexts.length) return "";
|
||||
return (
|
||||
|
@ -36,6 +44,7 @@ class OpenAiLLM {
|
|||
}
|
||||
|
||||
streamingEnabled() {
|
||||
if (this.isO1Model) return false;
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
|
@ -98,8 +107,11 @@ class OpenAiLLM {
|
|||
userPrompt = "",
|
||||
attachments = [], // This is the specific attachment for only this prompt
|
||||
}) {
|
||||
// o1 Models do not support the "system" role
|
||||
// in order to combat this, we can use the "user" role as a replacement for now
|
||||
// https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880
|
||||
const prompt = {
|
||||
role: "system",
|
||||
role: this.isO1Model ? "user" : "system",
|
||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||
};
|
||||
return [
|
||||
|
@ -122,7 +134,7 @@ class OpenAiLLM {
|
|||
.create({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature,
|
||||
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
|
||||
})
|
||||
.catch((e) => {
|
||||
throw new Error(e.message);
|
||||
|
@ -143,7 +155,7 @@ class OpenAiLLM {
|
|||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
temperature,
|
||||
temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1
|
||||
});
|
||||
return streamRequest;
|
||||
}
|
||||
|
|
|
@ -128,7 +128,7 @@ async function openAiModels(apiKey = null) {
|
|||
});
|
||||
|
||||
const gpts = allModels
|
||||
.filter((model) => model.id.startsWith("gpt"))
|
||||
.filter((model) => model.id.startsWith("gpt") || model.id.startsWith("o1"))
|
||||
.filter(
|
||||
(model) => !model.id.includes("vision") && !model.id.includes("instruct")
|
||||
)
|
||||
|
|
Loading…
Add table
Reference in a new issue