Add new model provider PPIO ()

* feat: add new model provider PPIO

* fix: fix ppio model fetching

* fix: code lint

* reorder LLM
update interface for streaming and chats to use valid keys
linting

---------

Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
cnJasonZ 2025-02-28 02:53:00 +08:00 committed by GitHub
parent b07240deee
commit 2aeb4c2961
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
26 changed files with 585 additions and 3 deletions
.vscode
README.md
docker
frontend/src
components/LLMSelection/PPIOLLMOptions
hooks
media/llmprovider
pages
GeneralSettings/LLMPreference
OnboardingFlow/Steps
DataHandling
LLMPreference
WorkspaceSettings/AgentConfig/AgentLLMSelection
locales
server

View file

@ -41,6 +41,7 @@
"opendocument",
"openrouter",
"pagerender",
"ppio",
"Qdrant",
"royalblue",
"SearchApi",

View file

@ -99,6 +99,7 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
- [Apipie](https://apipie.ai/)
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
**Embedder models:**

View file

@ -126,6 +126,10 @@ GID='1000'
# DEEPSEEK_API_KEY='your-deepseek-api-key-here'
# DEEPSEEK_MODEL_PREF='deepseek-chat'
# LLM_PROVIDER='ppio'
# PPIO_API_KEY='your-ppio-api-key-here'
# PPIO_MODEL_PREF=deepseek/deepseek-v3/community
###########################################
######## Embedding API SElECTION ##########
###########################################

View file

@ -0,0 +1,100 @@
import System from "@/models/system";
import { useState, useEffect } from "react";
export default function PPIOLLMOptions({ settings }) {
return (
<div className="w-full flex flex-col gap-y-7">
<div className="w-full flex items-start gap-[36px] mt-1.5">
<div className="flex flex-col w-60">
<label className="text-theme-text-primary text-sm font-semibold block mb-3">
PPIO API Key
</label>
<input
type="password"
name="PPIOApiKey"
className="border-none bg-theme-settings-input-bg text-theme-text-primary placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
placeholder="PPIO API Key"
defaultValue={settings?.PPIOApiKey ? "*".repeat(20) : ""}
required={true}
autoComplete="off"
spellCheck={false}
/>
</div>
{!settings?.credentialsOnly && (
<PPIOModelSelection settings={settings} />
)}
</div>
</div>
);
}
function PPIOModelSelection({ settings }) {
const [groupedModels, setGroupedModels] = useState({});
const [loading, setLoading] = useState(true);
useEffect(() => {
async function fetchModels() {
setLoading(true);
const { models } = await System.customModels("ppio");
if (models?.length > 0) {
const modelsByOrganization = models.reduce((acc, model) => {
acc[model.organization] = acc[model.organization] || [];
acc[model.organization].push(model);
return acc;
}, {});
setGroupedModels(modelsByOrganization);
}
setLoading(false);
}
fetchModels();
}, []);
if (loading || Object.keys(groupedModels).length === 0) {
return (
<div className="flex flex-col w-60">
<label className="text-theme-text-primary text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="PPIOModelPref"
required={true}
disabled={true}
className="bg-theme-settings-input-bg text-theme-text-primary text-sm rounded-lg focus:ring-primary-button focus:border-primary-button block w-full p-2.5"
>
<option disabled={true} selected={true}>
-- loading available models --
</option>
</select>
</div>
);
}
return (
<div className="flex flex-col">
<label className="text-theme-text-primary text-sm font-semibold block mb-3">
Chat Model Selection
</label>
<select
name="PPIOModelPref"
required={true}
className="border-none bg-theme-settings-input-bg text-theme-text-primary border-theme-border text-sm rounded-lg block w-full p-2.5"
>
{Object.keys(groupedModels)
.sort()
.map((organization) => (
<optgroup key={organization} label={organization}>
{groupedModels[organization].map((model) => (
<option
key={model.id}
value={model.id}
selected={settings?.PPIOModelPref === model.id}
>
{model.name}
</option>
))}
</optgroup>
))}
</select>
</div>
);
}

View file

@ -79,6 +79,7 @@ const groupedProviders = [
"openai",
"novita",
"openrouter",
"ppio",
];
export default function useGetProviderModels(provider = null) {
const [defaultModels, setDefaultModels] = useState([]);

Binary file not shown.

After

(image error) Size: 3.5 KiB

View file

@ -30,6 +30,7 @@ import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import PreLoader from "@/components/Preloader";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
@ -57,6 +58,7 @@ import DeepSeekOptions from "@/components/LLMSelection/DeepSeekOptions";
import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
@ -246,6 +248,15 @@ export const AVAILABLE_LLM_PROVIDERS = [
description: "Run DeepSeek's powerful LLMs.",
requiredConfig: ["DeepSeekApiKey"],
},
{
name: "PPIO",
value: "ppio",
logo: PPIOLogo,
options: (settings) => <PPIOLLMOptions settings={settings} />,
description:
"Run stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.",
requiredConfig: ["PPIOApiKey"],
},
{
name: "AWS Bedrock",
value: "bedrock",

View file

@ -25,7 +25,6 @@ import AWSBedrockLogo from "@/media/llmprovider/bedrock.png";
import DeepSeekLogo from "@/media/llmprovider/deepseek.png";
import APIPieLogo from "@/media/llmprovider/apipie.png";
import XAILogo from "@/media/llmprovider/xai.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import ZillizLogo from "@/media/vectordbs/zilliz.png";
import AstraDBLogo from "@/media/vectordbs/astraDB.png";
@ -36,6 +35,7 @@ import WeaviateLogo from "@/media/vectordbs/weaviate.png";
import QDrantLogo from "@/media/vectordbs/qdrant.png";
import MilvusLogo from "@/media/vectordbs/milvus.png";
import VoyageAiLogo from "@/media/embeddingprovider/voyageai.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import React, { useState, useEffect } from "react";
import paths from "@/utils/paths";
@ -226,6 +226,14 @@ export const LLM_SELECTION_PRIVACY = {
],
logo: XAILogo,
},
ppio: {
name: "PPIO",
description: [
"Your chats will not be used for training",
"Your prompts and document text used in response creation are visible to PPIO",
],
logo: PPIOLogo,
},
};
export const VECTOR_DB_PRIVACY = {

View file

@ -25,6 +25,8 @@ import NovitaLogo from "@/media/llmprovider/novita.png";
import XAILogo from "@/media/llmprovider/xai.png";
import NvidiaNimLogo from "@/media/llmprovider/nvidia-nim.png";
import CohereLogo from "@/media/llmprovider/cohere.png";
import PPIOLogo from "@/media/llmprovider/ppio.png";
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
import GenericOpenAiOptions from "@/components/LLMSelection/GenericOpenAiOptions";
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
@ -50,6 +52,7 @@ import ApiPieLLMOptions from "@/components/LLMSelection/ApiPieOptions";
import NovitaLLMOptions from "@/components/LLMSelection/NovitaLLMOptions";
import XAILLMOptions from "@/components/LLMSelection/XAiLLMOptions";
import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
import LLMItem from "@/components/LLMSelection/LLMItem";
import System from "@/models/system";
@ -213,6 +216,14 @@ const LLMS = [
options: (settings) => <DeepSeekOptions settings={settings} />,
description: "Run DeepSeek's powerful LLMs.",
},
{
name: "PPIO",
value: "ppio",
logo: PPIOLogo,
options: (settings) => <PPIOLLMOptions settings={settings} />,
description:
"Run stable and cost-efficient open-source LLM APIs, such as DeepSeek, Llama, Qwen etc.",
},
{
name: "APIpie",
value: "apipie",

View file

@ -25,6 +25,7 @@ const ENABLED_PROVIDERS = [
"bedrock",
"fireworksai",
"deepseek",
"ppio",
"litellm",
"apipie",
"xai",

View file

@ -103,6 +103,7 @@ AnythingLLM اسناد شما را به اشیایی به نام `workspaces` ت
- [Apipie](https://apipie.ai/)
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
<div dir="rtl">

View file

@ -90,6 +90,7 @@ AnythingLLMのいくつかのクールな機能
- [Groq](https://groq.com/)
- [Cohere](https://cohere.com/)
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
**埋め込みモデル:**

View file

@ -100,6 +100,7 @@ AnythingLLM, belgelerinizi **"çalışma alanları" (workspaces)** adı verilen
- [Apipie](https://apipie.ai/)
- [xAI](https://x.ai/)
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
**Embedder modelleri:**

View file

@ -86,6 +86,7 @@ AnythingLLM的一些酷炫特性
- [Groq](https://groq.com/)
- [Cohere](https://cohere.com/)
- [KoboldCPP](https://github.com/LostRuins/koboldcpp)
- [PPIO (聊天模型)](https://ppinfra.com?utm_source=github_anything-llm)
**支持的嵌入模型:**

View file

@ -116,6 +116,10 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
# NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
# NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
# LLM_PROVIDER='ppio'
# PPIO_API_KEY='your-ppio-api-key-here'
# PPIO_MODEL_PREF='deepseek/deepseek-v3/community'
###########################################
######## Embedding API SElECTION ##########
###########################################

View file

@ -560,6 +560,10 @@ const SystemSettings = {
NvidiaNimLLMBasePath: process.env.NVIDIA_NIM_LLM_BASE_PATH,
NvidiaNimLLMModelPref: process.env.NVIDIA_NIM_LLM_MODEL_PREF,
NvidiaNimLLMTokenLimit: process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT,
// PPIO API keys
PPIOApiKey: !!process.env.PPIO_API_KEY,
PPIOModelPref: process.env.PPIO_MODEL_PREF,
};
},

View file

@ -0,0 +1,266 @@
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
const fs = require("fs");
const path = require("path");
const { safeJsonParse } = require("../../http");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const cacheFolder = path.resolve(
process.env.STORAGE_DIR
? path.resolve(process.env.STORAGE_DIR, "models", "ppio")
: path.resolve(__dirname, `../../../storage/models/ppio`)
);
class PPIOLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.PPIO_API_KEY) throw new Error("No PPIO API key was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.basePath = "https://api.ppinfra.com/v3/openai/";
this.openai = new OpenAIApi({
baseURL: this.basePath,
apiKey: process.env.PPIO_API_KEY ?? null,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-API-Source": "anythingllm",
},
});
this.model =
modelPreference ||
process.env.PPIO_MODEL_PREF ||
"qwen/qwen2.5-32b-instruct";
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
this.log(`Loaded with model: ${this.model}`);
}
log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
async #syncModels() {
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
return false;
this.log("Model cache is not present or stale. Fetching from PPIO API.");
await fetchPPIOModels();
return;
}
#cacheIsStale() {
const MAX_STALE = 6.048e8; // 1 Week in MS
if (!fs.existsSync(this.cacheAtPath)) return true;
const now = Number(new Date());
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
return now - timestampMs > MAX_STALE;
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
models() {
if (!fs.existsSync(this.cacheModelPath)) return {};
return safeJsonParse(
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
{}
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
promptWindowLimit() {
const model = this.models()[this.model];
if (!model) return 4096; // Default to 4096 if we cannot find the model
return model?.maxLength || 4096;
}
async isValidChatCompletionModel(model = "") {
await this.#syncModels();
const availableModels = this.models();
return Object.prototype.hasOwnProperty.call(availableModels, model);
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
// attachments = [], - not supported
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`PPIO chat: ${this.model} is not valid for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.openai.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!Object.prototype.hasOwnProperty.call(result.output, "choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!(await this.isValidChatCompletionModel(this.model)))
throw new Error(
`PPIO chat: ${this.model} is not valid for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.openai.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages
);
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
async function fetchPPIOModels() {
return await fetch(`https://api.ppinfra.com/v3/openai/models`, {
method: "GET",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${process.env.PPIO_API_KEY}`,
},
})
.then((res) => res.json())
.then(({ data = [] }) => {
const models = {};
data.forEach((model) => {
const organization = model.id?.split("/")?.[0] || "PPIO";
models[model.id] = {
id: model.id,
name: model.display_name || model.title || model.id,
organization,
maxLength: model.context_size || 4096,
};
});
if (!fs.existsSync(cacheFolder))
fs.mkdirSync(cacheFolder, { recursive: true });
fs.writeFileSync(
path.resolve(cacheFolder, "models.json"),
JSON.stringify(models),
{
encoding: "utf-8",
}
);
fs.writeFileSync(
path.resolve(cacheFolder, ".cached_at"),
String(Number(new Date())),
{
encoding: "utf-8",
}
);
return models;
})
.catch((e) => {
console.error(e);
return {};
});
}
module.exports = {
PPIOLLM,
fetchPPIOModels,
};

View file

@ -795,7 +795,8 @@ ${this.getHistory({ to: route.to })
return new Providers.XAIProvider({ model: config.model });
case "novita":
return new Providers.NovitaProvider({ model: config.model });
case "ppio":
return new Providers.PPIOProvider({ model: config.model });
default:
throw new Error(
`Unknown provider: ${config.provider}. Please use a valid provider.`

View file

@ -163,6 +163,14 @@ class Provider {
apiKey: process.env.NOVITA_LLM_API_KEY ?? null,
...config,
});
case "ppio":
return new ChatOpenAI({
configuration: {
baseURL: "https://api.ppinfra.com/v3/openai",
},
apiKey: process.env.PPIO_API_KEY ?? null,
...config,
});
// OSS Model Runners
// case "anythingllm_ollama":

View file

@ -20,6 +20,7 @@ const ApiPieProvider = require("./apipie.js");
const XAIProvider = require("./xai.js");
const NovitaProvider = require("./novita.js");
const NvidiaNimProvider = require("./nvidiaNim.js");
const PPIOProvider = require("./ppio.js");
module.exports = {
OpenAIProvider,
@ -44,4 +45,5 @@ module.exports = {
XAIProvider,
NovitaProvider,
NvidiaNimProvider,
PPIOProvider,
};

View file

@ -0,0 +1,115 @@
const OpenAI = require("openai");
const Provider = require("./ai-provider.js");
const InheritMultiple = require("./helpers/classes.js");
const UnTooled = require("./helpers/untooled.js");
/**
* The agent provider for the PPIO AI provider.
*/
class PPIOProvider extends InheritMultiple([Provider, UnTooled]) {
model;
constructor(config = {}) {
const { model = "qwen/qwen2.5-32b-instruct" } = config;
super();
const client = new OpenAI({
baseURL: "https://api.ppinfra.com/v3/openai",
apiKey: process.env.PPIO_API_KEY,
maxRetries: 3,
defaultHeaders: {
"HTTP-Referer": "https://anythingllm.com",
"X-API-Source": "anythingllm",
},
});
this._client = client;
this.model = model;
this.verbose = true;
}
get client() {
return this._client;
}
async #handleFunctionCallChat({ messages = [] }) {
return await this.client.chat.completions
.create({
model: this.model,
temperature: 0,
messages,
})
.then((result) => {
if (!Object.prototype.hasOwnProperty.call(result, "choices"))
throw new Error("PPIO chat: No results!");
if (result.choices.length === 0)
throw new Error("PPIO chat: No results length!");
return result.choices[0].message.content;
})
.catch((_) => {
return null;
});
}
/**
* Create a completion based on the received messages.
*
* @param messages A list of messages to send to the API.
* @param functions
* @returns The completion.
*/
async complete(messages, functions = null) {
let completion;
if (functions.length > 0) {
const { toolCall, text } = await this.functionCall(
messages,
functions,
this.#handleFunctionCallChat.bind(this)
);
if (toolCall !== null) {
this.providerLog(`Valid tool call found - running ${toolCall.name}.`);
this.deduplicator.trackRun(toolCall.name, toolCall.arguments);
return {
result: null,
functionCall: {
name: toolCall.name,
arguments: toolCall.arguments,
},
cost: 0,
};
}
completion = { content: text };
}
if (!completion?.content) {
this.providerLog("Will assume chat completion without tool call inputs.");
const response = await this.client.chat.completions.create({
model: this.model,
messages: this.cleanMsgs(messages),
});
completion = response.choices[0].message;
}
// The UnTooled class inherited Deduplicator is mostly useful to prevent the agent
// from calling the exact same function over and over in a loop within a single chat exchange
// _but_ we should enable it to call previously used tools in a new chat interaction.
this.deduplicator.reset("runs");
return {
result: completion.content,
cost: 0,
};
}
/**
* Get the cost of the completion.
*
* @param _usage The completion to get the cost for.
* @returns The cost of the completion.
* Stubbed since PPIO has no cost basis.
*/
getCost() {
return 0;
}
}
module.exports = PPIOProvider;

View file

@ -184,6 +184,10 @@ class AgentHandler {
"NVIDIA NIM base path must be provided to use agents."
);
break;
case "ppio":
if (!process.env.PPIO_API_KEY)
throw new Error("PPIO API Key must be provided to use agents.");
break;
default:
throw new Error(
@ -249,6 +253,8 @@ class AgentHandler {
return process.env.NOVITA_LLM_MODEL_PREF ?? "deepseek/deepseek-r1";
case "nvidia-nim":
return process.env.NVIDIA_NIM_LLM_MODEL_PREF ?? null;
case "ppio":
return process.env.PPIO_MODEL_PREF ?? "qwen/qwen2.5-32b-instruct";
default:
return null;
}

View file

@ -7,6 +7,7 @@ const { ElevenLabsTTS } = require("../TextToSpeech/elevenLabs");
const { fetchNovitaModels } = require("../AiProviders/novita");
const { parseLMStudioBasePath } = require("../AiProviders/lmStudio");
const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
const { fetchPPIOModels } = require("../AiProviders/ppio");
const { GeminiLLM } = require("../AiProviders/gemini");
const SUPPORT_CUSTOM_MODELS = [
@ -29,6 +30,7 @@ const SUPPORT_CUSTOM_MODELS = [
"novita",
"xai",
"gemini",
"ppio",
];
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
@ -74,6 +76,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
return await getNvidiaNimModels(basePath);
case "gemini":
return await getGeminiModels(apiKey);
case "ppio":
return await getPPIOModels(apiKey);
default:
return { models: [], error: "Invalid provider for custom models" };
}
@ -571,6 +575,19 @@ async function getGeminiModels(_apiKey = null) {
return { models, error: null };
}
async function getPPIOModels() {
const ppioModels = await fetchPPIOModels();
if (!Object.keys(ppioModels).length === 0) return { models: [], error: null };
const models = Object.values(ppioModels).map((model) => {
return {
id: model.id,
organization: model.organization,
name: model.name,
};
});
return { models, error: null };
}
module.exports = {
getCustomModels,
};

View file

@ -197,6 +197,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
case "nvidia-nim":
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
return new NvidiaNimLLM(embedder, model);
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return new PPIOLLM(embedder, model);
default:
throw new Error(
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
@ -338,6 +341,9 @@ function getLLMProviderClass({ provider = null } = {}) {
case "nvidia-nim":
const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
return NvidiaNimLLM;
case "ppio":
const { PPIOLLM } = require("../AiProviders/ppio");
return PPIOLLM;
default:
return null;
}

View file

@ -615,6 +615,16 @@ const KEY_MAPPING = {
},
],
},
// PPIO Options
PPIOApiKey: {
envKey: "PPIO_API_KEY",
checks: [isNotEmpty],
},
PPIOModelPref: {
envKey: "PPIO_MODEL_PREF",
checks: [isNotEmpty],
},
};
function isNotEmpty(input = "") {
@ -721,6 +731,7 @@ function supportedLLM(input = "") {
"apipie",
"xai",
"nvidia-nim",
"ppio",
].includes(input);
return validSelection ? null : `${input} is not a valid LLM provider.`;
}