anything-llm/server/utils/AiProviders/nvidiaNim/index.js
Timothy Carambat dd7c4675d3
LLM performance metric tracking ()
* WIP performance metric tracking

* fix: patch UI trying to .toFixed() null metric
Anthropic tracking migraiton
cleanup logs

* Apipie implmentation, not tested

* Cleanup Anthropic notes, Add support for AzureOpenAI tracking

* bedrock token metric tracking

* Cohere support

* feat: improve default stream handler to track for provider who are actually OpenAI compliant in usage reporting
add deepseek support

* feat: Add FireworksAI tracking reporting
fix: improve handler when usage:null is reported (why?)

* Add token reporting for GenericOpenAI

* token reporting for koboldcpp + lmstudio

* lint

* support Groq token tracking

* HF token tracking

* token tracking for togetherai

* LiteLLM token tracking

* linting + Mitral token tracking support

* XAI token metric reporting

* native provider runner

* LocalAI token tracking

* Novita token tracking

* OpenRouter token tracking

* Apipie stream metrics

* textwebgenui token tracking

* perplexity token reporting

* ollama token reporting

* lint

* put back comment

* Rip out LC ollama wrapper and use official library

* patch images with new ollama lib

* improve ollama offline message

* fix image handling in ollama llm provider

* lint

* NVIDIA NIM token tracking

* update openai compatbility responses

* UI/UX show/hide metrics on click for user preference

* update bedrock client

---------

Co-authored-by: shatfield4 <seanhatfield5@gmail.com>
2024-12-16 14:31:17 -08:00

245 lines
7 KiB
JavaScript

const { NativeEmbedder } = require("../../EmbeddingEngines/native");
const {
LLMPerformanceMonitor,
} = require("../../helpers/chat/LLMPerformanceMonitor");
const {
handleDefaultStreamResponseV2,
} = require("../../helpers/chat/responses");
class NvidiaNimLLM {
constructor(embedder = null, modelPreference = null) {
if (!process.env.NVIDIA_NIM_LLM_BASE_PATH)
throw new Error("No Nvidia NIM API Base Path was set.");
const { OpenAI: OpenAIApi } = require("openai");
this.nvidiaNim = new OpenAIApi({
baseURL: parseNvidiaNimBasePath(process.env.NVIDIA_NIM_LLM_BASE_PATH),
apiKey: null,
});
this.model = modelPreference || process.env.NVIDIA_NIM_LLM_MODEL_PREF;
this.limits = {
history: this.promptWindowLimit() * 0.15,
system: this.promptWindowLimit() * 0.15,
user: this.promptWindowLimit() * 0.7,
};
this.embedder = embedder ?? new NativeEmbedder();
this.defaultTemp = 0.7;
this.#log(
`Loaded with model: ${this.model} with context window: ${this.promptWindowLimit()}`
);
}
#log(text, ...args) {
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
}
#appendContext(contextTexts = []) {
if (!contextTexts || !contextTexts.length) return "";
return (
"\nContext:\n" +
contextTexts
.map((text, i) => {
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
})
.join("")
);
}
/**
* Set the model token limit `NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT` for the given model ID
* @param {string} modelId
* @param {string} basePath
* @returns {Promise<void>}
*/
static async setModelTokenLimit(modelId, basePath = null) {
if (!modelId) return;
const { OpenAI: OpenAIApi } = require("openai");
const openai = new OpenAIApi({
baseURL: parseNvidiaNimBasePath(
basePath || process.env.NVIDIA_NIM_LLM_BASE_PATH
),
apiKey: null,
});
const model = await openai.models
.list()
.then((results) => results.data)
.catch(() => {
return [];
});
if (!model.length) return;
const modelInfo = model.find((model) => model.id === modelId);
if (!modelInfo) return;
process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT = Number(
modelInfo.max_model_len || 4096
);
}
streamingEnabled() {
return "streamGetChatCompletion" in this;
}
static promptWindowLimit(_modelName) {
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No Nvidia NIM token context limit was set.");
return Number(limit);
}
// Ensure the user set a value for the token limit
// and if undefined - assume 4096 window.
promptWindowLimit() {
const limit = process.env.NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT || 4096;
if (!limit || isNaN(Number(limit)))
throw new Error("No Nvidia NIM token context limit was set.");
return Number(limit);
}
async isValidChatCompletionModel(_ = "") {
return true;
}
/**
* Generates appropriate content array for a message + attachments.
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
* @returns {string|object[]}
*/
#generateContent({ userPrompt, attachments = [] }) {
if (!attachments.length) {
return userPrompt;
}
const content = [{ type: "text", text: userPrompt }];
for (let attachment of attachments) {
content.push({
type: "image_url",
image_url: {
url: attachment.contentString,
detail: "auto",
},
});
}
return content.flat();
}
/**
* Construct the user prompt for this model.
* @param {{attachments: import("../../helpers").Attachment[]}} param0
* @returns
*/
constructPrompt({
systemPrompt = "",
contextTexts = [],
chatHistory = [],
userPrompt = "",
attachments = [],
}) {
const prompt = {
role: "system",
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
};
return [
prompt,
...chatHistory,
{
role: "user",
content: this.#generateContent({ userPrompt, attachments }),
},
];
}
async getChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const result = await LLMPerformanceMonitor.measureAsyncFunction(
this.nvidiaNim.chat.completions
.create({
model: this.model,
messages,
temperature,
})
.catch((e) => {
throw new Error(e.message);
})
);
if (
!result.output.hasOwnProperty("choices") ||
result.output.choices.length === 0
)
return null;
return {
textResponse: result.output.choices[0].message.content,
metrics: {
prompt_tokens: result.output.usage.prompt_tokens || 0,
completion_tokens: result.output.usage.completion_tokens || 0,
total_tokens: result.output.usage.total_tokens || 0,
outputTps: result.output.usage.completion_tokens / result.duration,
duration: result.duration,
},
};
}
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
if (!this.model)
throw new Error(
`Nvidia NIM chat: ${this.model} is not valid or defined model for chat completion!`
);
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
this.nvidiaNim.chat.completions.create({
model: this.model,
stream: true,
messages,
temperature,
}),
messages
);
return measuredStreamRequest;
}
handleStream(response, stream, responseProps) {
return handleDefaultStreamResponseV2(response, stream, responseProps);
}
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
async embedTextInput(textInput) {
return await this.embedder.embedTextInput(textInput);
}
async embedChunks(textChunks = []) {
return await this.embedder.embedChunks(textChunks);
}
async compressMessages(promptArgs = {}, rawHistory = []) {
const { messageArrayCompressor } = require("../../helpers/chat");
const messageArray = this.constructPrompt(promptArgs);
return await messageArrayCompressor(this, messageArray, rawHistory);
}
}
/**
* Parse the base path for the Nvidia NIM container API. Since the base path must end in /v1 and cannot have a trailing slash,
* and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format.
* @param {string} basePath
* @returns {string}
*/
function parseNvidiaNimBasePath(providedBasePath = "") {
try {
const baseURL = new URL(providedBasePath);
const basePath = `${baseURL.origin}/v1`;
return basePath;
} catch (e) {
return providedBasePath;
}
}
module.exports = {
NvidiaNimLLM,
parseNvidiaNimBasePath,
};