mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2025-03-13 05:32:24 +00:00
Update NVIDIA branding
This commit is contained in:
parent
4bdd921c75
commit
6192080635
10 changed files with 20 additions and 20 deletions
frontend/src
components/LLMSelection/NvidiaNimOptions
media/llmprovider
pages
server
models
utils
|
@ -5,7 +5,7 @@ import { NVIDIA_NIM_COMMON_URLS } from "@/utils/constants";
|
|||
import { useState, useEffect } from "react";
|
||||
|
||||
/**
|
||||
* This component is used to select a remote Nvidia NIM model endpoint
|
||||
* This component is used to select a remote NVIDIA NIM model endpoint
|
||||
* This is the default component and way to connect to NVIDIA NIM
|
||||
* as the "managed" provider can only work in the Desktop context.
|
||||
*/
|
||||
|
@ -26,7 +26,7 @@ export default function RemoteNvidiaNimOptions({ settings }) {
|
|||
<div className="flex flex-col w-60">
|
||||
<div className="flex justify-between items-center mb-2">
|
||||
<label className="text-white text-sm font-semibold">
|
||||
Nvidia Nim Base URL
|
||||
NVIDIA Nim Base URL
|
||||
</label>
|
||||
{loading ? (
|
||||
<PreLoader size="6" />
|
||||
|
@ -56,7 +56,7 @@ export default function RemoteNvidiaNimOptions({ settings }) {
|
|||
onBlur={basePath.onBlur}
|
||||
/>
|
||||
<p className="text-xs leading-[18px] font-base text-white text-opacity-60 mt-2">
|
||||
Enter the URL where Nvidia NIM is running.
|
||||
Enter the URL where NVIDIA NIM is running.
|
||||
</p>
|
||||
</div>
|
||||
{!settings?.credentialsOnly && (
|
||||
|
|
Binary file not shown.
Before ![]() (image error) Size: 64 KiB After ![]() (image error) Size: 25 KiB ![]() ![]() |
|
@ -96,12 +96,12 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
|||
requiredConfig: ["GeminiLLMApiKey"],
|
||||
},
|
||||
{
|
||||
name: "Nvidia NIM",
|
||||
name: "NVIDIA NIM",
|
||||
value: "nvidia-nim",
|
||||
logo: NvidiaNimLogo,
|
||||
options: (settings) => <NvidiaNimOptions settings={settings} />,
|
||||
description:
|
||||
"Run full parameter LLMs directly on your GPU using Nvidia's inference microservice via Docker.",
|
||||
"Run full parameter LLMs directly on your NVIDIA RTX GPU using NVIDIA NIM.",
|
||||
requiredConfig: ["NvidiaNimLLMBasePath"],
|
||||
},
|
||||
{
|
||||
|
|
|
@ -78,9 +78,9 @@ export const LLM_SELECTION_PRIVACY = {
|
|||
logo: GeminiLogo,
|
||||
},
|
||||
"nvidia-nim": {
|
||||
name: "Nvidia NIM",
|
||||
name: "NVIDIA NIM",
|
||||
description: [
|
||||
"Your model and chats are only accessible on the machine running the Nvidia NIM service",
|
||||
"Your model and chats are only accessible on the machine running the NVIDIA NIM",
|
||||
],
|
||||
logo: NvidiaNimLogo,
|
||||
},
|
||||
|
|
|
@ -92,12 +92,12 @@ const LLMS = [
|
|||
description: "Google's largest and most capable AI model",
|
||||
},
|
||||
{
|
||||
name: "Nvidia NIM",
|
||||
name: "NVIDIA NIM",
|
||||
value: "nvidia-nim",
|
||||
logo: NvidiaNimLogo,
|
||||
options: (settings) => <NvidiaNimOptions settings={settings} />,
|
||||
description:
|
||||
"Run full parameter LLMs directly on your GPU using Nvidia's inference microservice via Docker.",
|
||||
"Run full parameter LLMs directly on your NVIDIA RTX GPU using NVIDIA NIM.",
|
||||
},
|
||||
{
|
||||
name: "HuggingFace",
|
||||
|
|
|
@ -554,7 +554,7 @@ const SystemSettings = {
|
|||
XAIApiKey: !!process.env.XAI_LLM_API_KEY,
|
||||
XAIModelPref: process.env.XAI_LLM_MODEL_PREF,
|
||||
|
||||
// Nvidia NIM Keys
|
||||
// NVIDIA NIM Keys
|
||||
NvidiaNimLLMBasePath: process.env.NVIDIA_NIM_LLM_BASE_PATH,
|
||||
NvidiaNimLLMModelPref: process.env.NVIDIA_NIM_LLM_MODEL_PREF,
|
||||
NvidiaNimLLMTokenLimit: process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT,
|
||||
|
|
|
@ -10,7 +10,7 @@ const {
|
|||
class NvidiaNimLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
|
||||
throw new Error("No Nvidia NIM API Base Path was set.");
|
||||
throw new Error("No NVIDIA NIM API Base Path was set.");
|
||||
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
this.nvidiaNim = new OpenAIApi({
|
||||
|
@ -85,7 +85,7 @@ class NvidiaNimLLM {
|
|||
static promptWindowLimit(_modelName) {
|
||||
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
|
||||
if (!limit || isNaN(Number(limit)))
|
||||
throw new Error("No Nvidia NIM token context limit was set.");
|
||||
throw new Error("No NVIDIA NIM token context limit was set.");
|
||||
return Number(limit);
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ class NvidiaNimLLM {
|
|||
promptWindowLimit() {
|
||||
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
|
||||
if (!limit || isNaN(Number(limit)))
|
||||
throw new Error("No Nvidia NIM token context limit was set.");
|
||||
throw new Error("No NVIDIA NIM token context limit was set.");
|
||||
return Number(limit);
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ class NvidiaNimLLM {
|
|||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
`Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
|
||||
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
|
||||
);
|
||||
|
||||
const result = await LLMPerformanceMonitor.measureAsyncFunction(
|
||||
|
@ -190,7 +190,7 @@ class NvidiaNimLLM {
|
|||
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
`Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
|
||||
`NVIDIA NIM chat: ${this.model} is not valid or defined model for chat completion!`
|
||||
);
|
||||
|
||||
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
|
||||
|
|
|
@ -37,9 +37,9 @@ class NvidiaNimProvider extends InheritMultiple([Provider, UnTooled]) {
|
|||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Nvidia NIM chat: No results!");
|
||||
throw new Error("NVIDIA NIM chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Nvidia NIM chat: No results length!");
|
||||
throw new Error("NVIDIA NIM chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
|
|
|
@ -180,7 +180,7 @@ class AgentHandler {
|
|||
case "nvidia-nim":
|
||||
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
|
||||
throw new Error(
|
||||
"Nvidia NIM base path must be provided to use agents."
|
||||
"NVIDIA NIM base path must be provided to use agents."
|
||||
);
|
||||
break;
|
||||
|
||||
|
|
|
@ -550,8 +550,8 @@ async function getNvidiaNimModels(basePath = null) {
|
|||
|
||||
return { models, error: null };
|
||||
} catch (e) {
|
||||
console.error(`Nvidia NIM:getNvidiaNimModels`, e.message);
|
||||
return { models: [], error: "Could not fetch Nvidia NIM Models" };
|
||||
console.error(`NVIDIA NIM:getNvidiaNimModels`, e.message);
|
||||
return { models: [], error: "Could not fetch NVIDIA NIM Models" };
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue