2024-02-23 17:18:58 -08:00
|
|
|
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
|
|
|
const { v4: uuidv4 } = require("uuid");
|
2024-03-12 15:21:27 -07:00
|
|
|
const {
|
|
|
|
writeResponseChunk,
|
|
|
|
clientAbortedHandler,
|
2025-01-16 13:49:06 -08:00
|
|
|
formatChatHistory,
|
2024-03-12 15:21:27 -07:00
|
|
|
} = require("../../helpers/chat/responses");
|
2024-04-23 11:10:54 -07:00
|
|
|
const fs = require("fs");
|
|
|
|
const path = require("path");
|
|
|
|
const { safeJsonParse } = require("../../http");
|
2024-12-16 14:31:17 -08:00
|
|
|
const {
|
|
|
|
LLMPerformanceMonitor,
|
|
|
|
} = require("../../helpers/chat/LLMPerformanceMonitor");
|
2024-04-26 15:58:30 -07:00
|
|
|
const cacheFolder = path.resolve(
|
|
|
|
process.env.STORAGE_DIR
|
|
|
|
? path.resolve(process.env.STORAGE_DIR, "models", "openrouter")
|
|
|
|
: path.resolve(__dirname, `../../../storage/models/openrouter`)
|
|
|
|
);
|
2024-02-23 17:18:58 -08:00
|
|
|
|
|
|
|
class OpenRouterLLM {
|
|
|
|
constructor(embedder = null, modelPreference = null) {
|
|
|
|
if (!process.env.OPENROUTER_API_KEY)
|
|
|
|
throw new Error("No OpenRouter API key was set.");
|
|
|
|
|
2024-04-30 12:33:42 -07:00
|
|
|
const { OpenAI: OpenAIApi } = require("openai");
|
2024-04-23 11:10:54 -07:00
|
|
|
this.basePath = "https://openrouter.ai/api/v1";
|
2024-04-30 12:33:42 -07:00
|
|
|
this.openai = new OpenAIApi({
|
|
|
|
baseURL: this.basePath,
|
|
|
|
apiKey: process.env.OPENROUTER_API_KEY ?? null,
|
|
|
|
defaultHeaders: {
|
2024-07-22 11:05:34 -07:00
|
|
|
"HTTP-Referer": "https://anythingllm.com",
|
2024-04-30 12:33:42 -07:00
|
|
|
"X-Title": "AnythingLLM",
|
2024-02-23 17:18:58 -08:00
|
|
|
},
|
|
|
|
});
|
|
|
|
this.model =
|
|
|
|
modelPreference || process.env.OPENROUTER_MODEL_PREF || "openrouter/auto";
|
|
|
|
this.limits = {
|
|
|
|
history: this.promptWindowLimit() * 0.15,
|
|
|
|
system: this.promptWindowLimit() * 0.15,
|
|
|
|
user: this.promptWindowLimit() * 0.7,
|
|
|
|
};
|
|
|
|
|
2024-05-16 17:25:05 -07:00
|
|
|
this.embedder = embedder ?? new NativeEmbedder();
|
2024-02-23 17:18:58 -08:00
|
|
|
this.defaultTemp = 0.7;
|
2024-07-29 11:49:14 -07:00
|
|
|
this.timeout = this.#parseTimeout();
|
2024-04-23 11:10:54 -07:00
|
|
|
|
2024-04-26 15:58:30 -07:00
|
|
|
if (!fs.existsSync(cacheFolder))
|
|
|
|
fs.mkdirSync(cacheFolder, { recursive: true });
|
2024-04-23 11:10:54 -07:00
|
|
|
this.cacheModelPath = path.resolve(cacheFolder, "models.json");
|
|
|
|
this.cacheAtPath = path.resolve(cacheFolder, ".cached_at");
|
2025-01-16 13:49:06 -08:00
|
|
|
this.log("Initialized with model:", this.model);
|
2024-04-23 11:10:54 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
log(text, ...args) {
|
|
|
|
console.log(`\x1b[36m[${this.constructor.name}]\x1b[0m ${text}`, ...args);
|
|
|
|
}
|
|
|
|
|
2024-07-29 11:49:14 -07:00
|
|
|
/**
|
|
|
|
* OpenRouter has various models that never return `finish_reasons` and thus leave the stream open
|
|
|
|
* which causes issues in subsequent messages. This timeout value forces us to close the stream after
|
|
|
|
* x milliseconds. This is a configurable value via the OPENROUTER_TIMEOUT_MS value
|
|
|
|
* @returns {number} The timeout value in milliseconds (default: 500)
|
|
|
|
*/
|
|
|
|
#parseTimeout() {
|
|
|
|
this.log(
|
|
|
|
`OpenRouter timeout is set to ${process.env.OPENROUTER_TIMEOUT_MS ?? 500}ms`
|
|
|
|
);
|
|
|
|
if (isNaN(Number(process.env.OPENROUTER_TIMEOUT_MS))) return 500;
|
|
|
|
const setValue = Number(process.env.OPENROUTER_TIMEOUT_MS);
|
|
|
|
if (setValue < 500) return 500;
|
|
|
|
return setValue;
|
|
|
|
}
|
|
|
|
|
2024-04-23 11:10:54 -07:00
|
|
|
// This checks if the .cached_at file has a timestamp that is more than 1Week (in millis)
|
|
|
|
// from the current date. If it is, then we will refetch the API so that all the models are up
|
|
|
|
// to date.
|
|
|
|
#cacheIsStale() {
|
|
|
|
const MAX_STALE = 6.048e8; // 1 Week in MS
|
|
|
|
if (!fs.existsSync(this.cacheAtPath)) return true;
|
|
|
|
const now = Number(new Date());
|
|
|
|
const timestampMs = Number(fs.readFileSync(this.cacheAtPath));
|
|
|
|
return now - timestampMs > MAX_STALE;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The OpenRouter model API has a lot of models, so we cache this locally in the directory
|
|
|
|
// as if the cache directory JSON file is stale or does not exist we will fetch from API and store it.
|
|
|
|
// This might slow down the first request, but we need the proper token context window
|
|
|
|
// for each model and this is a constructor property - so we can really only get it if this cache exists.
|
|
|
|
// We used to have this as a chore, but given there is an API to get the info - this makes little sense.
|
|
|
|
async #syncModels() {
|
|
|
|
if (fs.existsSync(this.cacheModelPath) && !this.#cacheIsStale())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
this.log(
|
|
|
|
"Model cache is not present or stale. Fetching from OpenRouter API."
|
|
|
|
);
|
2024-04-26 15:58:30 -07:00
|
|
|
await fetchOpenRouterModels();
|
2024-04-23 11:10:54 -07:00
|
|
|
return;
|
2024-02-23 17:18:58 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
#appendContext(contextTexts = []) {
|
|
|
|
if (!contextTexts || !contextTexts.length) return "";
|
|
|
|
return (
|
|
|
|
"\nContext:\n" +
|
|
|
|
contextTexts
|
|
|
|
.map((text, i) => {
|
|
|
|
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
|
|
|
})
|
|
|
|
.join("")
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2024-04-23 11:10:54 -07:00
|
|
|
models() {
|
|
|
|
if (!fs.existsSync(this.cacheModelPath)) return {};
|
|
|
|
return safeJsonParse(
|
|
|
|
fs.readFileSync(this.cacheModelPath, { encoding: "utf-8" }),
|
|
|
|
{}
|
|
|
|
);
|
2024-02-23 17:18:58 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
streamingEnabled() {
|
2024-05-01 16:52:28 -07:00
|
|
|
return "streamGetChatCompletion" in this;
|
2024-02-23 17:18:58 -08:00
|
|
|
}
|
|
|
|
|
2024-08-15 12:13:28 -07:00
|
|
|
static promptWindowLimit(modelName) {
|
|
|
|
const cacheModelPath = path.resolve(cacheFolder, "models.json");
|
|
|
|
const availableModels = fs.existsSync(cacheModelPath)
|
|
|
|
? safeJsonParse(
|
|
|
|
fs.readFileSync(cacheModelPath, { encoding: "utf-8" }),
|
|
|
|
{}
|
|
|
|
)
|
|
|
|
: {};
|
|
|
|
return availableModels[modelName]?.maxLength || 4096;
|
|
|
|
}
|
|
|
|
|
2024-02-23 17:18:58 -08:00
|
|
|
promptWindowLimit() {
|
2024-04-23 11:10:54 -07:00
|
|
|
const availableModels = this.models();
|
2024-02-23 17:18:58 -08:00
|
|
|
return availableModels[this.model]?.maxLength || 4096;
|
|
|
|
}
|
|
|
|
|
|
|
|
async isValidChatCompletionModel(model = "") {
|
2024-04-23 11:10:54 -07:00
|
|
|
await this.#syncModels();
|
|
|
|
const availableModels = this.models();
|
2024-02-23 17:18:58 -08:00
|
|
|
return availableModels.hasOwnProperty(model);
|
|
|
|
}
|
|
|
|
|
2024-07-31 10:47:49 -07:00
|
|
|
/**
|
|
|
|
* Generates appropriate content array for a message + attachments.
|
|
|
|
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
|
|
|
|
* @returns {string|object[]}
|
|
|
|
*/
|
|
|
|
#generateContent({ userPrompt, attachments = [] }) {
|
|
|
|
if (!attachments.length) {
|
|
|
|
return userPrompt;
|
|
|
|
}
|
|
|
|
|
|
|
|
const content = [{ type: "text", text: userPrompt }];
|
|
|
|
for (let attachment of attachments) {
|
|
|
|
content.push({
|
|
|
|
type: "image_url",
|
|
|
|
image_url: {
|
|
|
|
url: attachment.contentString,
|
|
|
|
detail: "auto",
|
|
|
|
},
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return content.flat();
|
|
|
|
}
|
|
|
|
|
2025-01-30 14:32:38 -08:00
|
|
|
/**
|
|
|
|
* Parses and prepends reasoning from the response and returns the full text response.
|
|
|
|
* @param {Object} response
|
|
|
|
* @returns {string}
|
|
|
|
*/
|
|
|
|
#parseReasoningFromResponse({ message }) {
|
|
|
|
let textResponse = message?.content;
|
|
|
|
if (!!message?.reasoning && message.reasoning.trim().length > 0)
|
|
|
|
textResponse = `<think>${message.reasoning}</think>${textResponse}`;
|
|
|
|
return textResponse;
|
|
|
|
}
|
|
|
|
|
2024-02-23 17:18:58 -08:00
|
|
|
constructPrompt({
|
|
|
|
systemPrompt = "",
|
|
|
|
contextTexts = [],
|
|
|
|
chatHistory = [],
|
|
|
|
userPrompt = "",
|
2024-07-31 10:47:49 -07:00
|
|
|
attachments = [],
|
2024-02-23 17:18:58 -08:00
|
|
|
}) {
|
|
|
|
const prompt = {
|
|
|
|
role: "system",
|
|
|
|
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
|
|
|
};
|
2024-07-31 10:47:49 -07:00
|
|
|
return [
|
|
|
|
prompt,
|
2025-01-16 13:49:06 -08:00
|
|
|
...formatChatHistory(chatHistory, this.#generateContent),
|
2024-07-31 10:47:49 -07:00
|
|
|
{
|
|
|
|
role: "user",
|
|
|
|
content: this.#generateContent({ userPrompt, attachments }),
|
|
|
|
},
|
|
|
|
];
|
2024-02-23 17:18:58 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
|
|
|
if (!(await this.isValidChatCompletionModel(this.model)))
|
|
|
|
throw new Error(
|
|
|
|
`OpenRouter chat: ${this.model} is not valid for chat completion!`
|
|
|
|
);
|
|
|
|
|
2024-12-16 14:31:17 -08:00
|
|
|
const result = await LLMPerformanceMonitor.measureAsyncFunction(
|
|
|
|
this.openai.chat.completions
|
|
|
|
.create({
|
|
|
|
model: this.model,
|
|
|
|
messages,
|
|
|
|
temperature,
|
2025-01-30 14:32:38 -08:00
|
|
|
// This is an OpenRouter specific option that allows us to get the reasoning text
|
|
|
|
// before the token text.
|
|
|
|
include_reasoning: true,
|
2024-12-16 14:31:17 -08:00
|
|
|
})
|
|
|
|
.catch((e) => {
|
|
|
|
throw new Error(e.message);
|
|
|
|
})
|
|
|
|
);
|
2024-02-23 17:18:58 -08:00
|
|
|
|
2024-12-16 14:31:17 -08:00
|
|
|
if (
|
2025-01-30 14:32:38 -08:00
|
|
|
!result?.output?.hasOwnProperty("choices") ||
|
|
|
|
result?.output?.choices?.length === 0
|
2024-12-16 14:31:17 -08:00
|
|
|
)
|
2025-01-30 14:32:38 -08:00
|
|
|
throw new Error(
|
|
|
|
`Invalid response body returned from OpenRouter: ${result.output?.error?.message || "Unknown error"} ${result.output?.error?.code || "Unknown code"}`
|
|
|
|
);
|
2024-12-16 14:31:17 -08:00
|
|
|
|
|
|
|
return {
|
2025-01-30 14:32:38 -08:00
|
|
|
textResponse: this.#parseReasoningFromResponse(result.output.choices[0]),
|
2024-12-16 14:31:17 -08:00
|
|
|
metrics: {
|
|
|
|
prompt_tokens: result.output.usage.prompt_tokens || 0,
|
|
|
|
completion_tokens: result.output.usage.completion_tokens || 0,
|
|
|
|
total_tokens: result.output.usage.total_tokens || 0,
|
|
|
|
outputTps: result.output.usage.completion_tokens / result.duration,
|
|
|
|
duration: result.duration,
|
|
|
|
},
|
|
|
|
};
|
2024-02-23 17:18:58 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
|
|
|
if (!(await this.isValidChatCompletionModel(this.model)))
|
|
|
|
throw new Error(
|
|
|
|
`OpenRouter chat: ${this.model} is not valid for chat completion!`
|
|
|
|
);
|
|
|
|
|
2024-12-16 14:31:17 -08:00
|
|
|
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
|
|
|
|
this.openai.chat.completions.create({
|
|
|
|
model: this.model,
|
|
|
|
stream: true,
|
|
|
|
messages,
|
|
|
|
temperature,
|
2025-01-30 14:32:38 -08:00
|
|
|
// This is an OpenRouter specific option that allows us to get the reasoning text
|
|
|
|
// before the token text.
|
|
|
|
include_reasoning: true,
|
2024-12-16 14:31:17 -08:00
|
|
|
}),
|
|
|
|
messages
|
|
|
|
// We have to manually count the tokens
|
|
|
|
// OpenRouter has a ton of providers and they all can return slightly differently
|
|
|
|
// some return chunk.usage on STOP, some do it after stop, its inconsistent.
|
|
|
|
// So it is possible reported metrics are inaccurate since we cannot reliably
|
|
|
|
// catch the metrics before resolving the stream - so we just pretend this functionality
|
|
|
|
// is not available.
|
|
|
|
);
|
|
|
|
|
|
|
|
return measuredStreamRequest;
|
2024-02-23 17:18:58 -08:00
|
|
|
}
|
|
|
|
|
2024-12-16 14:31:17 -08:00
|
|
|
/**
|
|
|
|
* Handles the default stream response for a chat.
|
|
|
|
* @param {import("express").Response} response
|
|
|
|
* @param {import('../../helpers/chat/LLMPerformanceMonitor').MonitoredStream} stream
|
|
|
|
* @param {Object} responseProps
|
|
|
|
* @returns {Promise<string>}
|
|
|
|
*/
|
2024-02-23 17:18:58 -08:00
|
|
|
handleStream(response, stream, responseProps) {
|
2024-07-29 11:49:14 -07:00
|
|
|
const timeoutThresholdMs = this.timeout;
|
2024-02-23 17:18:58 -08:00
|
|
|
const { uuid = uuidv4(), sources = [] } = responseProps;
|
|
|
|
|
2024-04-30 12:33:42 -07:00
|
|
|
return new Promise(async (resolve) => {
|
2024-02-23 17:18:58 -08:00
|
|
|
let fullText = "";
|
2025-01-30 14:32:38 -08:00
|
|
|
let reasoningText = "";
|
2024-02-23 17:18:58 -08:00
|
|
|
let lastChunkTime = null; // null when first token is still not received.
|
|
|
|
|
2024-03-12 15:21:27 -07:00
|
|
|
// Establish listener to early-abort a streaming response
|
|
|
|
// in case things go sideways or the user does not like the response.
|
|
|
|
// We preserve the generated text but continue as if chat was completed
|
|
|
|
// to preserve previously generated content.
|
2024-12-16 14:31:17 -08:00
|
|
|
const handleAbort = () => {
|
|
|
|
stream?.endMeasurement({
|
|
|
|
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
|
|
|
|
});
|
|
|
|
clientAbortedHandler(resolve, fullText);
|
|
|
|
};
|
2024-03-12 15:21:27 -07:00
|
|
|
response.on("close", handleAbort);
|
|
|
|
|
2024-02-23 17:18:58 -08:00
|
|
|
// NOTICE: Not all OpenRouter models will return a stop reason
|
|
|
|
// which keeps the connection open and so the model never finalizes the stream
|
|
|
|
// like the traditional OpenAI response schema does. So in the case the response stream
|
|
|
|
// never reaches a formal close state we maintain an interval timer that if we go >=timeoutThresholdMs with
|
|
|
|
// no new chunks then we kill the stream and assume it to be complete. OpenRouter is quite fast
|
|
|
|
// so this threshold should permit most responses, but we can adjust `timeoutThresholdMs` if
|
|
|
|
// we find it is too aggressive.
|
|
|
|
const timeoutCheck = setInterval(() => {
|
|
|
|
if (lastChunkTime === null) return;
|
|
|
|
|
|
|
|
const now = Number(new Date());
|
|
|
|
const diffMs = now - lastChunkTime;
|
|
|
|
if (diffMs >= timeoutThresholdMs) {
|
|
|
|
console.log(
|
|
|
|
`OpenRouter stream did not self-close and has been stale for >${timeoutThresholdMs}ms. Closing response stream.`
|
|
|
|
);
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources,
|
|
|
|
type: "textResponseChunk",
|
|
|
|
textResponse: "",
|
|
|
|
close: true,
|
|
|
|
error: false,
|
|
|
|
});
|
|
|
|
clearInterval(timeoutCheck);
|
2024-03-12 15:21:27 -07:00
|
|
|
response.removeListener("close", handleAbort);
|
2024-12-16 14:31:17 -08:00
|
|
|
stream?.endMeasurement({
|
|
|
|
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
|
|
|
|
});
|
2024-02-23 17:18:58 -08:00
|
|
|
resolve(fullText);
|
|
|
|
}
|
|
|
|
}, 500);
|
|
|
|
|
2024-08-02 12:23:39 -07:00
|
|
|
try {
|
|
|
|
for await (const chunk of stream) {
|
|
|
|
const message = chunk?.choices?.[0];
|
|
|
|
const token = message?.delta?.content;
|
2025-01-30 14:32:38 -08:00
|
|
|
const reasoningToken = message?.delta?.reasoning;
|
2024-08-02 12:23:39 -07:00
|
|
|
lastChunkTime = Number(new Date());
|
|
|
|
|
2025-01-30 14:32:38 -08:00
|
|
|
// Reasoning models will always return the reasoning text before the token text.
|
|
|
|
// can be null or ''
|
|
|
|
if (reasoningToken) {
|
|
|
|
// If the reasoning text is empty (''), we need to initialize it
|
|
|
|
// and send the first chunk of reasoning text.
|
|
|
|
if (reasoningText.length === 0) {
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources: [],
|
|
|
|
type: "textResponseChunk",
|
|
|
|
textResponse: `<think>${reasoningToken}`,
|
|
|
|
close: false,
|
|
|
|
error: false,
|
|
|
|
});
|
|
|
|
reasoningText += `<think>${reasoningToken}`;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
// If the reasoning text is not empty, we need to append the reasoning text
|
|
|
|
// to the existing reasoning text.
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources: [],
|
|
|
|
type: "textResponseChunk",
|
|
|
|
textResponse: reasoningToken,
|
|
|
|
close: false,
|
|
|
|
error: false,
|
|
|
|
});
|
|
|
|
reasoningText += reasoningToken;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the reasoning text is not empty, but the reasoning token is empty
|
|
|
|
// and the token text is not empty we need to close the reasoning text and begin sending the token text.
|
|
|
|
if (!!reasoningText && !reasoningToken && token) {
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources: [],
|
|
|
|
type: "textResponseChunk",
|
|
|
|
textResponse: `</think>`,
|
|
|
|
close: false,
|
|
|
|
error: false,
|
|
|
|
});
|
|
|
|
fullText += `${reasoningText}</think>`;
|
|
|
|
reasoningText = "";
|
|
|
|
}
|
|
|
|
|
2024-08-02 12:23:39 -07:00
|
|
|
if (token) {
|
|
|
|
fullText += token;
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources: [],
|
|
|
|
type: "textResponseChunk",
|
|
|
|
textResponse: token,
|
|
|
|
close: false,
|
|
|
|
error: false,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if (message.finish_reason !== null) {
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources,
|
|
|
|
type: "textResponseChunk",
|
|
|
|
textResponse: "",
|
|
|
|
close: true,
|
|
|
|
error: false,
|
|
|
|
});
|
|
|
|
response.removeListener("close", handleAbort);
|
2024-12-16 14:31:17 -08:00
|
|
|
clearInterval(timeoutCheck);
|
|
|
|
stream?.endMeasurement({
|
|
|
|
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
|
|
|
|
});
|
2024-08-02 12:23:39 -07:00
|
|
|
resolve(fullText);
|
|
|
|
}
|
2024-04-30 12:33:42 -07:00
|
|
|
}
|
2024-08-02 12:23:39 -07:00
|
|
|
} catch (e) {
|
|
|
|
writeResponseChunk(response, {
|
|
|
|
uuid,
|
|
|
|
sources,
|
|
|
|
type: "abort",
|
|
|
|
textResponse: null,
|
|
|
|
close: true,
|
|
|
|
error: e.message,
|
|
|
|
});
|
|
|
|
response.removeListener("close", handleAbort);
|
2024-12-16 14:31:17 -08:00
|
|
|
clearInterval(timeoutCheck);
|
|
|
|
stream?.endMeasurement({
|
|
|
|
completion_tokens: LLMPerformanceMonitor.countTokens(fullText),
|
|
|
|
});
|
2024-08-02 12:23:39 -07:00
|
|
|
resolve(fullText);
|
2024-04-30 12:33:42 -07:00
|
|
|
}
|
2024-02-23 17:18:58 -08:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
|
|
|
async embedTextInput(textInput) {
|
|
|
|
return await this.embedder.embedTextInput(textInput);
|
|
|
|
}
|
|
|
|
async embedChunks(textChunks = []) {
|
|
|
|
return await this.embedder.embedChunks(textChunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
async compressMessages(promptArgs = {}, rawHistory = []) {
|
|
|
|
const { messageArrayCompressor } = require("../../helpers/chat");
|
|
|
|
const messageArray = this.constructPrompt(promptArgs);
|
|
|
|
return await messageArrayCompressor(this, messageArray, rawHistory);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-04-26 15:58:30 -07:00
|
|
|
async function fetchOpenRouterModels() {
|
|
|
|
return await fetch(`https://openrouter.ai/api/v1/models`, {
|
|
|
|
method: "GET",
|
|
|
|
headers: {
|
|
|
|
"Content-Type": "application/json",
|
|
|
|
},
|
|
|
|
})
|
|
|
|
.then((res) => res.json())
|
|
|
|
.then(({ data = [] }) => {
|
|
|
|
const models = {};
|
|
|
|
data.forEach((model) => {
|
|
|
|
models[model.id] = {
|
|
|
|
id: model.id,
|
|
|
|
name: model.name,
|
|
|
|
organization:
|
|
|
|
model.id.split("/")[0].charAt(0).toUpperCase() +
|
|
|
|
model.id.split("/")[0].slice(1),
|
|
|
|
maxLength: model.context_length,
|
|
|
|
};
|
|
|
|
});
|
|
|
|
|
|
|
|
// Cache all response information
|
|
|
|
if (!fs.existsSync(cacheFolder))
|
|
|
|
fs.mkdirSync(cacheFolder, { recursive: true });
|
|
|
|
fs.writeFileSync(
|
|
|
|
path.resolve(cacheFolder, "models.json"),
|
|
|
|
JSON.stringify(models),
|
|
|
|
{
|
|
|
|
encoding: "utf-8",
|
|
|
|
}
|
|
|
|
);
|
|
|
|
fs.writeFileSync(
|
|
|
|
path.resolve(cacheFolder, ".cached_at"),
|
|
|
|
String(Number(new Date())),
|
|
|
|
{
|
|
|
|
encoding: "utf-8",
|
|
|
|
}
|
|
|
|
);
|
|
|
|
|
|
|
|
return models;
|
|
|
|
})
|
|
|
|
.catch((e) => {
|
|
|
|
console.error(e);
|
|
|
|
return {};
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
2024-02-23 17:18:58 -08:00
|
|
|
module.exports = {
|
|
|
|
OpenRouterLLM,
|
2024-04-26 15:58:30 -07:00
|
|
|
fetchOpenRouterModels,
|
2024-02-23 17:18:58 -08:00
|
|
|
};
|