Integrate Apipie support directly ()

resolves 
resolves 
Note: Streaming not supported
This commit is contained in:
Timothy Carambat 2024-10-15 12:36:06 -07:00 committed by GitHub
parent 1a0ddfcd20
commit bce7988683
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
20 changed files with 668 additions and 16 deletions
.vscode
docker
frontend/src
components/LLMSelection/ApiPieOptions
media/llmprovider
pages
GeneralSettings/LLMPreference
OnboardingFlow/Steps
DataHandling
LLMPreference
WorkspaceSettings/AgentConfig/AgentLLMSelection
server

View file

@ -5,6 +5,7 @@
"AIbitat",
"allm",
"anythingllm",
"Apipie",
"Astra",
"Chartable",
"cleancss",
@ -18,6 +19,7 @@
"elevenlabs",
"Embeddable",
"epub",
"fireworksai",
"GROQ",
"hljs",
"huggingface",
@ -40,14 +42,13 @@
"pagerender",
"Qdrant",
"royalblue",
"searxng",
"SearchApi",
"searxng",
"Serper",
"Serply",
"streamable",
"textgenwebui",
"togetherai",
"fireworksai",
"Unembed",
"vectordbs",
"Weaviate",

View file

@ -105,6 +105,10 @@ GID='1000'
# FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
# FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
# LLM_PROVIDER='apipie'
# APIPIE_LLM_API_KEY='sk-123abc'
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
###########################################
######## Embedding API SElECTION ##########
###########################################

View file

@ -0,0 +1,101 @@
import System from "@/models/system";
import { useState, useEffect } from "react";
export default function ApiPieLLMOptions({ settings }) {
return (
<div className="flex flex-col gap-y-4 mt-1.5">
<div className="flex gap-[36px]">
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
APIpie API Key
</label>
<input
type="password"
name="ApipieLLMApiKey"
className="bg-zinc-900 text-white placeholder:text-white/20 text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="APIpie API Key"
defaultValue={settings?.ApipieLLMApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<APIPieModelSelection settings={settings} />
)}
</div>
</div>
);
}
function APIPieModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function findCustomModels() {
setLoading(true);
const { models } = await System.customModels("apipie");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
findCustomModels();
}, []);
if (loading || Object.keys(groupedModels).length === 0) {
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="ApipieLLMModelPref"
disabled={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col w-60">
<label className="text-white text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="ApipieLLMModelPref"
required={true}
className="bg-zinc-900 border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.ApipieLLMModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}

Binary file not shown.

After

(image error) Size: 14 KiB

View file

@ -26,6 +26,7 @@ import CohereLogo from "@/media/llmprovider/cohere.png";
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@ -50,6 +51,7 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -221,6 +223,27 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run DeepSeek's powerful LLMs.",
requiredConfig: ["DeepSeekApiKey"],
},
{
name: "AWS Bedrock",
value: "bedrock",
logo: AWSBedrockLogo,
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
description: "Run powerful foundation models privately with AWS Bedrock.",
requiredConfig: [
"AwsBedrockLLMAccessKeyId",
"AwsBedrockLLMAccessKey",
"AwsBedrockLLMRegion",
"AwsBedrockLLMModel",
],
},
{
name: "APIpie",
value: "apipie",
logo: APIPieLogo,
options: (settings) => <ApiPieLLMOptions settings={settings} />,
description: "A unified API of AI services from leading providers",
requiredConfig: ["ApipieLLMApiKey", "ApipieLLMModelPref"],
},
{
name: "Generic OpenAI",
value: "generic-openai",
@ -235,19 +258,6 @@ export const AVAILABLE_LLM_PROVIDERS = [
"GenericOpenAiKey",
],
},
{
name: "AWS Bedrock",
value: "bedrock",
logo: AWSBedrockLogo,
options: (settings) => <AWSBedrockLLMOptions settings={settings} />,
description: "Run powerful foundation models privately with AWS Bedrock.",
requiredConfig: [
"AwsBedrockLLMAccessKeyId",
"AwsBedrockLLMAccessKey",
"AwsBedrockLLMRegion",
"AwsBedrockLLMModel",
],
},
{
name: "Native",
value: "native",

View file

@ -21,6 +21,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
@ -202,6 +203,13 @@ export const LLM_SELECTION_PRIVACY = {
description: ["Your model and chat contents are visible to DeepSeek"],
logo: DeepSeekLogo,
},
apipie: {
name: "APIpie.AI",
description: [
"Your model and chat contents are visible to APIpie in accordance with their terms of service.",
],
logo: APIPieLogo,
},
};
export const VECTOR_DB_PRIVACY = {

View file

@ -21,6 +21,7 @@ import TextGenWebUILogo from "@/media/llmprovider/text-generation-webui.png";
import LiteLLMLogo from "@/media/llmprovider/litellm.png";
import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@ -45,6 +46,7 @@ import TextGenWebUIOptions from "@/components/LLMSelection/TextGenWebUIOptions";
import LiteLLMOptions from "@/components/LLMSelection/LiteLLMOptions";
import AWSBedrockLLMOptions from "@/components/LLMSelection/AwsBedrockLLMOptions";
import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@ -195,6 +197,13 @@ const LLMS = [
options: (settings) => <DeepSeekOptions settings={settings} />,
description: "Run DeepSeek's powerful LLMs.",
},
{
name: "APIpie",
value: "apipie",
logo: APIPieLogo,
options: (settings) => <ApiPieLLMOptions settings={settings} />,
description: "A unified API of AI services from leading providers",
},
{
name: "Generic OpenAI",
value: "generic-openai",

View file

@ -24,6 +24,7 @@ const ENABLED_PROVIDERS = [
"bedrock",
"fireworksai",
"deepseek",
"apipie",
// TODO: More agent support.
// "cohere", // Has tool calling and will need to build explicit support
// "huggingface" // Can be done but already has issues with no-chat templated. Needs to be tested.

View file

@ -95,6 +95,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# COHERE_API_KEY=
# COHERE_MODEL_PREF='command-r'
# LLM_PROVIDER='apipie'
# APIPIE_LLM_API_KEY='sk-123abc'
# APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
###########################################
######## Embedding API SElECTION ##########
###########################################

View file

@ -512,6 +512,10 @@ const SystemSettings = {
// DeepSeek API Keys
DeepSeekApiKey: !!process.env.DEEPSEEK_API_KEY,
DeepSeekModelPref: process.env.DEEPSEEK_MODEL_PREF,
// APIPie LLM API Keys
ApipieLLMApiKey: !!process.env.APIPIE_LLM_API_KEY,
ApipieLLMModelPref: process.env.APIPIE_LLM_MODEL_PREF,
};
},

View file

@ -1,4 +1,5 @@
Xenova
downloaded/*
!downloaded/.placeholder
openrouter
openrouter
apipie

View file

@ -0,0 +1,336 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const { v4: uuidv4 } = require("uuid");
const {
writeResponseChunk,
clientAbortedHandler,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "apipie")
: path.resolve(__dirname, `../../../storage/models/apipie`)
);
class ApiPieLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.APIPIE_LLM_API_KEY)
throw new Error("No ApiPie LLM API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://apipie.ai/v1";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
});
this.model =
modelPreference ||
process.env.APIPIE_LLM_MODEL_PREF ||
"openrouter/mistral-7b-instruct";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
// from the current date. If it is, then we will refetch the API so that all the models are up
// to date.
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
// This function fetches the models from the ApiPie API and caches them locally.
// We do this because the ApiPie API has a lot of models, and we need to get the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
// This might slow down the first request, but we need the proper token context window
// for each model and this is a constructor property - so we can really only get it if this cache exists.
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from ApiPie API.");
await fetchApiPieModels();
return;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(modelName) {
const cacheModelPath = path.resolve(cacheFolder, "models.json");
const availableModels = fs.existsSync(cacheModelPath)
? safeJsonParse(
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
{}
)
: {};
return availableModels[modelName]?.maxLength || 4096;
}
promptWindowLimit() {
const availableModels = this.models();
return availableModels[this.model]?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return availableModels.hasOwnProperty(model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`ApiPie chat: ${this.model} is not valid for chat completion!`
);
const result = await this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
});
if (!result.hasOwnProperty("choices") || result.choices.length === 0)
return null;
return result.choices[0].message.content;
}
// APIPie says it supports streaming, but it does not work across all models and providers.
// Notably, it is not working for OpenRouter models at all.
// async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
// if (!(await this.isValidChatCompletionModel(this.model)))
// throw new Error(
// `ApiPie chat: ${this.model} is not valid for chat completion!`
// );
// const streamRequest = await this.openai.chat.completions.create({
// model: this.model,
// stream: true,
// messages,
// temperature,
// });
// return streamRequest;
// }
handleStream(response, stream, responseProps) {
const { uuid = uuidv4(), sources = [] } = responseProps;
return new Promise(async (resolve) => {
let fullText = "";
// Establish listener to early-abort a streaming response
// in case things go sideways or the user does not like the response.
// We preserve the generated text but continue as if chat was completed
// to preserve previously generated content.
const handleAbort = () => clientAbortedHandler(resolve, fullText);
response.on("close", handleAbort);
try {
for await (const chunk of stream) {
const message = chunk?.choices?.[0];
const token = message?.delta?.content;
if (token) {
fullText += token;
writeResponseChunk(response, {
uuid,
sources: [],
type: "textResponseChunk",
textResponse: token,
close: false,
error: false,
});
}
if (message === undefined || message.finish_reason !== null) {
writeResponseChunk(response, {
uuid,
sources,
type: "textResponseChunk",
textResponse: "",
close: true,
error: false,
});
response.removeListener("close", handleAbort);
resolve(fullText);
}
}
} catch (e) {
writeResponseChunk(response, {
uuid,
sources,
type: "abort",
textResponse: null,
close: true,
error: e.message,
});
response.removeListener("close", handleAbort);
resolve(fullText);
}
});
}
// handleStream(response, stream, responseProps) {
// return handleDefaultStreamResponseV2(response, stream, responseProps);
// }
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchApiPieModels(providedApiKey = null) {
const apiKey = providedApiKey || process.env.APIPIE_LLM_API_KEY || null;
return await fetch(`https://apipie.ai/v1/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
...(apiKey ? { Authorization: `Bearer ${apiKey}` } : {}),
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
models[`${model.provider}/${model.model}`] = {
id: `${model.provider}/${model.model}`,
name: `${model.provider}/${model.model}`,
organization: model.provider,
maxLength: model.max_tokens,
};
});
// Cache all response information
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
ApiPieLLM,
fetchApiPieModels,
};

View file

@ -785,6 +785,8 @@ ${this.getHistory({ to: route.to })
return new Providers.FireworksAIProvider({ model: config.model });
case "deepseek":
return new Providers.DeepSeekProvider({ model: config.model });
case "apipie":
return new Providers.ApiPieProvider({ model: config.model });
default:
throw new Error(

View file

@ -182,6 +182,14 @@ class Provider {
apiKey: process.env.DEEPSEEK_API_KEY ?? null,
...config,
});
case "apipie":
return new ChatOpenAI({
configuration: {
baseURL: "https://apipie.ai/v1",
},
apiKey: process.env.APIPIE_LLM_API_KEY ?? null,
...config,
});
default:
throw new Error(`Unsupported provider ${provider} for this task.`);
}

View file

@ -0,0 +1,116 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The agent provider for the OpenRouter provider.
*/
class ApiPieProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = "openrouter/llama-3.1-8b-instruct" } = config;
super();
const client = new OpenAI({
baseURL: "https://apipie.ai/v1",
apiKey: process.env.APIPIE_LLM_API_KEY,
maxRetries: 3,
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!result.hasOwnProperty("choices"))
throw new Error("ApiPie chat: No results!");
if (result.choices.length === 0)
throw new Error("ApiPie chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
try {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog(
"Will assume chat completion without tool call inputs."
);
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
} catch (error) {
throw error;
}
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
*/
getCost(_usage) {
return 0;
}
}
module.exports = ApiPieProvider;

View file

@ -15,6 +15,7 @@ const TextWebGenUiProvider = require("./textgenwebui.js");
const AWSBedrockProvider = require("./bedrock.js");
const FireworksAIProvider = require("./fireworksai.js");
const DeepSeekProvider = require("./deepseek.js");
const ApiPieProvider = require("./apipie.js");
module.exports = {
OpenAIProvider,
@ -34,4 +35,5 @@ module.exports = {
TextWebGenUiProvider,
AWSBedrockProvider,
FireworksAIProvider,
ApiPieProvider,
};

View file

@ -166,6 +166,10 @@ class AgentHandler {
if (!process.env.DEEPSEEK_API_KEY)
throw new Error("DeepSeek API Key must be provided to use agents.");
break;
case "apipie":
if (!process.env.APIPIE_LLM_API_KEY)
throw new Error("ApiPie API Key must be provided to use agents.");
break;
default:
throw new Error(
@ -212,6 +216,8 @@ class AgentHandler {
return null;
case "deepseek":
return "deepseek-chat";
case "apipie":
return null;
default:
return "unknown";
}

View file

@ -1,4 +1,5 @@
const { fetchOpenRouterModels } = require("../AiProviders/openRouter");
const { fetchApiPieModels } = require("../AiProviders/apipie");
const { perplexityModels } = require("../AiProviders/perplexity");
const { togetherAiModels } = require("../AiProviders/togetherAi");
const { fireworksAiModels } = require("../AiProviders/fireworksAi");
@ -19,6 +20,7 @@ const SUPPORT_CUSTOM_MODELS = [
"elevenlabs-tts",
"groq",
"deepseek",
"apipie",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@ -56,6 +58,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getGroqAiModels(apiKey);
case "deepseek":
return await getDeepSeekModels(apiKey);
case "apipie":
return await getAPIPieModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@ -355,6 +359,21 @@ async function getOpenRouterModels() {
return { models, error: null };
}
async function getAPIPieModels(apiKey = null) {
const knownModels = await fetchApiPieModels(apiKey);
if (!Object.keys(knownModels).length === 0)
return { models: [], error: null };
const models = Object.values(knownModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
async function getMistralModels(apiKey = null) {
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({

View file

@ -162,6 +162,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "deepseek":
const { DeepSeekLLM } = require("../AiProviders/deepseek");
return new DeepSeekLLM(embedder, model);
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return new ApiPieLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@ -285,6 +288,12 @@ function getLLMProviderClass({ provider = null } = {}) {
case "bedrock":
const { AWSBedrockLLM } = require("../AiProviders/bedrock");
return AWSBedrockLLM;
case "deepseek":
const { DeepSeekLLM } = require("../AiProviders/deepseek");
return DeepSeekLLM;
case "apipie":
const { ApiPieLLM } = require("../AiProviders/apipie");
return ApiPieLLM;
default:
return null;
}

View file

@ -515,6 +515,16 @@ const KEY_MAPPING = {
envKey: "DEEPSEEK_MODEL_PREF",
checks: [isNotEmpty],
},
// APIPie Options
ApipieLLMApiKey: {
envKey: "APIPIE_LLM_API_KEY",
checks: [isNotEmpty],
},
ApipieLLMModelPref: {
envKey: "APIPIE_LLM_MODEL_PREF",
checks: [isNotEmpty],
},
};
function isNotEmpty(input = "") {
@ -617,6 +627,7 @@ function supportedLLM(input = "") {
"generic-openai",
"bedrock",
"deepseek",
"apipie",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}