Enable dynamic GPT model dropdown ()

* Enable dynamic GPT model dropdown
This commit is contained in:
Timothy Carambat 2024-04-16 14:54:39 -07:00 committed by GitHub
parent e193bb8f59
commit 661563408a
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 117 additions and 76 deletions
frontend/src
components/LLMSelection/OpenAiOptions
hooks
server/utils
AiProviders/openAi
helpers

View file

@ -32,22 +32,26 @@ export default function OpenAiOptions({ settings }) {
}
function OpenAIModelSelection({ apiKey, settings }) {
const [customModels, setCustomModels] = useState([]);
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
if (!apiKey) {
setCustomModels([]);
setLoading(false);
return;
}
setLoading(true);
const { models } = await System.customModels(
"openai",
typeof apiKey === "boolean" ? null : apiKey
);
setCustomModels(models || []);
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
findCustomModels();
@ -82,41 +86,21 @@ function OpenAIModelSelection({ apiKey, settings }) {
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<optgroup label="General LLM models">
{[
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-4",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-32k",
].map((model) => {
return (
<option
key={model}
value={model}
selected={settings?.OpenAiModelPref === model}
>
{model}
</option>
);
})}
</optgroup>
{customModels.length > 0 && (
<optgroup label="Your fine-tuned models">
{customModels.map((model) => {
return (
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.OpenAiModelPref === model.id}
>
{model.id}
{model.name}
</option>
);
})}
</optgroup>
)}
))}
</optgroup>
))}
</select>
</div>
);

View file

@ -4,14 +4,7 @@ import { useEffect, useState } from "react";
// Providers which cannot use this feature for workspace<>model selection
export const DISABLED_PROVIDERS = ["azure", "lmstudio", "native"];
const PROVIDER_DEFAULT_MODELS = {
openai: [
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-4",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-32k",
],
openai: [],
gemini: ["gemini-pro"],
anthropic: [
"claude-instant-1.2",
@ -41,6 +34,7 @@ function groupModels(models) {
}, {});
}
const groupedProviders = ["togetherai", "openai"];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);
const [customModels, setCustomModels] = useState([]);
@ -50,9 +44,12 @@ export default function useGetProviderModels(provider = null) {
async function fetchProviderModels() {
if (!provider) return;
const { models = [] } = await System.customModels(provider);
if (PROVIDER_DEFAULT_MODELS.hasOwnProperty(provider))
if (
PROVIDER_DEFAULT_MODELS.hasOwnProperty(provider) &&
!groupedProviders.includes(provider)
)
setDefaultModels(PROVIDER_DEFAULT_MODELS[provider]);
provider === "togetherai"
groupedProviders.includes(provider)
? setCustomModels(groupModels(models))
: setCustomModels(models);
setLoading(false);

View file

@ -46,32 +46,28 @@ class OpenAiLLM {
promptWindowLimit() {
switch (this.model) {
case "gpt-3.5-turbo":
return 4096;
case "gpt-3.5-turbo-1106":
return 16385;
case "gpt-4":
return 8192;
return 16_385;
case "gpt-4-turbo":
case "gpt-4-1106-preview":
return 128000;
case "gpt-4-turbo-preview":
return 128000;
return 128_000;
case "gpt-4":
return 8_192;
case "gpt-4-32k":
return 32000;
return 32_000;
default:
return 4096; // assume a fine-tune 3.5
return 4_096; // assume a fine-tune 3.5?
}
}
// Short circuit if name has 'gpt' since we now fetch models from OpenAI API
// via the user API key, so the model must be relevant and real.
// and if somehow it is not, chat will fail but that is caught.
// we don't want to hit the OpenAI api every chat because it will get spammed
// and introduce latency for no reason.
async isValidChatCompletionModel(modelName = "") {
const validModels = [
"gpt-4",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-4-1106-preview",
"gpt-4-turbo-preview",
"gpt-4-32k",
];
const isPreset = validModels.some((model) => modelName === model);
const isPreset = modelName.toLowerCase().includes("gpt");
if (isPreset) return true;
const model = await this.openai

View file

@ -47,21 +47,85 @@ async function openAiModels(apiKey = null) {
apiKey: apiKey || process.env.OPEN_AI_KEY,
});
const openai = new OpenAIApi(config);
const models = (
await openai
.listModels()
.then((res) => res.data.data)
.catch((e) => {
console.error(`OpenAI:listModels`, e.message);
return [];
})
).filter(
(model) => !model.owned_by.includes("openai") && model.owned_by !== "system"
);
const allModels = await openai
.listModels()
.then((res) => res.data.data)
.catch((e) => {
console.error(`OpenAI:listModels`, e.message);
return [
{
name: "gpt-3.5-turbo",
id: "gpt-3.5-turbo",
object: "model",
created: 1677610602,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-4",
id: "gpt-4",
object: "model",
created: 1687882411,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-4-turbo",
id: "gpt-4-turbo",
object: "model",
created: 1712361441,
owned_by: "system",
organization: "OpenAi",
},
{
name: "gpt-4-32k",
id: "gpt-4-32k",
object: "model",
created: 1687979321,
owned_by: "openai",
organization: "OpenAi",
},
{
name: "gpt-3.5-turbo-16k",
id: "gpt-3.5-turbo-16k",
object: "model",
created: 1683758102,
owned_by: "openai-internal",
organization: "OpenAi",
},
];
});
const gpts = allModels
.filter((model) => model.id.startsWith("gpt"))
.filter(
(model) => !model.id.includes("vision") && !model.id.includes("instruct")
)
.map((model) => {
return {
...model,
name: model.id,
organization: "OpenAi",
};
});
const customModels = allModels
.filter(
(model) =>
!model.owned_by.includes("openai") && model.owned_by !== "system"
)
.map((model) => {
return {
...model,
name: model.id,
organization: "Your Fine-Tunes",
};
});
// Api Key was successful so lets save it for future uses
if (models.length > 0 && !!apiKey) process.env.OPEN_AI_KEY = apiKey;
return { models, error: null };
if ((gpts.length > 0 || customModels.length > 0) && !!apiKey)
process.env.OPEN_AI_KEY = apiKey;
return { models: [...gpts, ...customModels], error: null };
}
async function localAIModels(basePath = null, apiKey = null) {