mirror of
https://github.com/Mintplex-Labs/anything-llm.git
synced 2025-04-17 18:18:11 +00:00
add support for mistral api (#610)
* add support for mistral api * update docs to show support for Mistral * add default temp to all providers, suggest different results per provider --------- Co-authored-by: timothycarambat <rambat1010@gmail.com>
This commit is contained in:
parent
90df37582b
commit
c2c8fe9756
25 changed files with 412 additions and 22 deletions
|
@ -71,6 +71,7 @@ Some cool features of AnythingLLM
|
|||
- [LM Studio (all models)](https://lmstudio.ai)
|
||||
- [LocalAi (all models)](https://localai.io/)
|
||||
- [Together AI (chat models)](https://www.together.ai/)
|
||||
- [Mistral](https://mistral.ai/)
|
||||
|
||||
**Supported Embedding models:**
|
||||
|
||||
|
|
|
@ -44,6 +44,10 @@ GID='1000'
|
|||
# TOGETHER_AI_API_KEY='my-together-ai-key'
|
||||
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
|
||||
|
||||
# LLM_PROVIDER='mistral'
|
||||
# MISTRAL_API_KEY='example-mistral-ai-api-key'
|
||||
# MISTRAL_MODEL_PREF='mistral-tiny'
|
||||
|
||||
###########################################
|
||||
######## Embedding API SElECTION ##########
|
||||
###########################################
|
||||
|
|
103
frontend/src/components/LLMSelection/MistralOptions/index.jsx
Normal file
103
frontend/src/components/LLMSelection/MistralOptions/index.jsx
Normal file
|
@ -0,0 +1,103 @@
|
|||
import { useState, useEffect } from "react";
|
||||
import System from "@/models/system";
|
||||
|
||||
export default function MistralOptions({ settings }) {
|
||||
const [inputValue, setInputValue] = useState(settings?.MistralApiKey);
|
||||
const [mistralKey, setMistralKey] = useState(settings?.MistralApiKey);
|
||||
|
||||
return (
|
||||
<div className="flex gap-x-4">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Mistral API Key
|
||||
</label>
|
||||
<input
|
||||
type="password"
|
||||
name="MistralApiKey"
|
||||
className="bg-zinc-900 text-white placeholder-white placeholder-opacity-60 text-sm rounded-lg focus:border-white block w-full p-2.5"
|
||||
placeholder="Mistral API Key"
|
||||
defaultValue={settings?.MistralApiKey ? "*".repeat(20) : ""}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
onChange={(e) => setInputValue(e.target.value)}
|
||||
onBlur={() => setMistralKey(inputValue)}
|
||||
/>
|
||||
</div>
|
||||
<MistralModelSelection settings={settings} apiKey={mistralKey} />
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
function MistralModelSelection({ apiKey, settings }) {
|
||||
const [customModels, setCustomModels] = useState([]);
|
||||
const [loading, setLoading] = useState(true);
|
||||
|
||||
useEffect(() => {
|
||||
async function findCustomModels() {
|
||||
if (!apiKey) {
|
||||
setCustomModels([]);
|
||||
setLoading(false);
|
||||
return;
|
||||
}
|
||||
setLoading(true);
|
||||
const { models } = await System.customModels(
|
||||
"mistral",
|
||||
typeof apiKey === "boolean" ? null : apiKey
|
||||
);
|
||||
setCustomModels(models || []);
|
||||
setLoading(false);
|
||||
}
|
||||
findCustomModels();
|
||||
}, [apiKey]);
|
||||
|
||||
if (loading || customModels.length == 0) {
|
||||
return (
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Chat Model Selection
|
||||
</label>
|
||||
<select
|
||||
name="MistralModelPref"
|
||||
disabled={true}
|
||||
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
<option disabled={true} selected={true}>
|
||||
{!!apiKey
|
||||
? "-- loading available models --"
|
||||
: "-- waiting for API key --"}
|
||||
</option>
|
||||
</select>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-4">
|
||||
Chat Model Selection
|
||||
</label>
|
||||
<select
|
||||
name="MistralModelPref"
|
||||
required={true}
|
||||
className="bg-zinc-900 border border-gray-500 text-white text-sm rounded-lg block w-full p-2.5"
|
||||
>
|
||||
{customModels.length > 0 && (
|
||||
<optgroup label="Available Mistral Models">
|
||||
{customModels.map((model) => {
|
||||
return (
|
||||
<option
|
||||
key={model.id}
|
||||
value={model.id}
|
||||
selected={settings?.MistralModelPref === model.id}
|
||||
>
|
||||
{model.id}
|
||||
</option>
|
||||
);
|
||||
})}
|
||||
</optgroup>
|
||||
)}
|
||||
</select>
|
||||
</div>
|
||||
);
|
||||
}
|
|
@ -27,11 +27,21 @@ function castToType(key, value) {
|
|||
return definitions[key].cast(value);
|
||||
}
|
||||
|
||||
function recommendedSettings(provider = null) {
|
||||
switch (provider) {
|
||||
case "mistral":
|
||||
return { temp: 0 };
|
||||
default:
|
||||
return { temp: 0.7 };
|
||||
}
|
||||
}
|
||||
|
||||
export default function WorkspaceSettings({ active, workspace, settings }) {
|
||||
const { slug } = useParams();
|
||||
const formEl = useRef(null);
|
||||
const [saving, setSaving] = useState(false);
|
||||
const [hasChanges, setHasChanges] = useState(false);
|
||||
const defaults = recommendedSettings(settings?.LLMProvider);
|
||||
|
||||
const handleUpdate = async (e) => {
|
||||
setSaving(true);
|
||||
|
@ -143,20 +153,20 @@ export default function WorkspaceSettings({ active, workspace, settings }) {
|
|||
This setting controls how "random" or dynamic your chat
|
||||
responses will be.
|
||||
<br />
|
||||
The higher the number (2.0 maximum) the more random and
|
||||
The higher the number (1.0 maximum) the more random and
|
||||
incoherent.
|
||||
<br />
|
||||
<i>Recommended: 0.7</i>
|
||||
<i>Recommended: {defaults.temp}</i>
|
||||
</p>
|
||||
</div>
|
||||
<input
|
||||
name="openAiTemp"
|
||||
type="number"
|
||||
min={0.0}
|
||||
max={2.0}
|
||||
max={1.0}
|
||||
step={0.1}
|
||||
onWheel={(e) => e.target.blur()}
|
||||
defaultValue={workspace?.openAiTemp ?? 0.7}
|
||||
defaultValue={workspace?.openAiTemp ?? defaults.temp}
|
||||
className="bg-zinc-900 text-white text-sm rounded-lg focus:ring-blue-500 focus:border-blue-500 block w-full p-2.5"
|
||||
placeholder="0.7"
|
||||
required={true}
|
||||
|
|
BIN
frontend/src/media/llmprovider/mistral.jpeg
Normal file
BIN
frontend/src/media/llmprovider/mistral.jpeg
Normal file
Binary file not shown.
After ![]() (image error) Size: 4.4 KiB |
|
@ -12,6 +12,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
|
|||
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
|
||||
import LocalAiLogo from "@/media/llmprovider/localai.png";
|
||||
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
|
||||
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
|
||||
import PreLoader from "@/components/Preloader";
|
||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
|
||||
|
@ -21,9 +22,10 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
|
|||
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
|
||||
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
|
||||
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
|
||||
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
|
||||
import MistralOptions from "@/components/LLMSelection/MistralOptions";
|
||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import { MagnifyingGlass } from "@phosphor-icons/react";
|
||||
import TogetherAiOptions from "@/components/LLMSelection/TogetherAiOptions";
|
||||
|
||||
export default function GeneralLLMPreference() {
|
||||
const [saving, setSaving] = useState(false);
|
||||
|
@ -134,6 +136,13 @@ export default function GeneralLLMPreference() {
|
|||
options: <TogetherAiOptions settings={settings} />,
|
||||
description: "Run open source models from Together AI.",
|
||||
},
|
||||
{
|
||||
name: "Mistral",
|
||||
value: "mistral",
|
||||
logo: MistralLogo,
|
||||
options: <MistralOptions settings={settings} />,
|
||||
description: "Run open source models from Mistral AI.",
|
||||
},
|
||||
{
|
||||
name: "Native",
|
||||
value: "native",
|
||||
|
|
|
@ -9,6 +9,7 @@ import OllamaLogo from "@/media/llmprovider/ollama.png";
|
|||
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
|
||||
import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
|
||||
import LocalAiLogo from "@/media/llmprovider/localai.png";
|
||||
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
|
||||
import ChromaLogo from "@/media/vectordbs/chroma.png";
|
||||
import PineconeLogo from "@/media/vectordbs/pinecone.png";
|
||||
import LanceDbLogo from "@/media/vectordbs/lancedb.png";
|
||||
|
@ -91,6 +92,13 @@ const LLM_SELECTION_PRIVACY = {
|
|||
],
|
||||
logo: TogetherAILogo,
|
||||
},
|
||||
mistral: {
|
||||
name: "Mistral",
|
||||
description: [
|
||||
"Your prompts and document text used in response creation are visible to Mistral",
|
||||
],
|
||||
logo: MistralLogo,
|
||||
},
|
||||
};
|
||||
|
||||
const VECTOR_DB_PRIVACY = {
|
||||
|
|
|
@ -9,6 +9,7 @@ import LMStudioLogo from "@/media/llmprovider/lmstudio.png";
|
|||
import LocalAiLogo from "@/media/llmprovider/localai.png";
|
||||
import TogetherAILogo from "@/media/llmprovider/togetherai.png";
|
||||
import AnythingLLMIcon from "@/media/logo/anything-llm-icon.png";
|
||||
import MistralLogo from "@/media/llmprovider/mistral.jpeg";
|
||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||
import AzureAiOptions from "@/components/LLMSelection/AzureAiOptions";
|
||||
import AnthropicAiOptions from "@/components/LLMSelection/AnthropicAiOptions";
|
||||
|
@ -17,6 +18,7 @@ import LocalAiOptions from "@/components/LLMSelection/LocalAiOptions";
|
|||
import NativeLLMOptions from "@/components/LLMSelection/NativeLLMOptions";
|
||||
import GeminiLLMOptions from "@/components/LLMSelection/GeminiLLMOptions";
|
||||
import OllamaLLMOptions from "@/components/LLMSelection/OllamaLLMOptions";
|
||||
import MistralOptions from "@/components/LLMSelection/MistralOptions";
|
||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import System from "@/models/system";
|
||||
import paths from "@/utils/paths";
|
||||
|
@ -109,6 +111,13 @@ export default function LLMPreference({
|
|||
options: <TogetherAiOptions settings={settings} />,
|
||||
description: "Run open source models from Together AI.",
|
||||
},
|
||||
{
|
||||
name: "Mistral",
|
||||
value: "mistral",
|
||||
logo: MistralLogo,
|
||||
options: <MistralOptions settings={settings} />,
|
||||
description: "Run open source models from Mistral AI.",
|
||||
},
|
||||
{
|
||||
name: "Native",
|
||||
value: "native",
|
||||
|
|
|
@ -41,6 +41,10 @@ JWT_SECRET="my-random-string-for-seeding" # Please generate random string at lea
|
|||
# TOGETHER_AI_API_KEY='my-together-ai-key'
|
||||
# TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
|
||||
|
||||
# LLM_PROVIDER='mistral'
|
||||
# MISTRAL_API_KEY='example-mistral-ai-api-key'
|
||||
# MISTRAL_MODEL_PREF='mistral-tiny'
|
||||
|
||||
###########################################
|
||||
######## Embedding API SElECTION ##########
|
||||
###########################################
|
||||
|
|
|
@ -159,6 +159,18 @@ const SystemSettings = {
|
|||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
||||
}
|
||||
: {}),
|
||||
...(llmProvider === "mistral"
|
||||
? {
|
||||
MistralApiKey: !!process.env.MISTRAL_API_KEY,
|
||||
MistralModelPref: process.env.MISTRAL_MODEL_PREF,
|
||||
|
||||
// For embedding credentials when mistral is selected.
|
||||
OpenAiKey: !!process.env.OPEN_AI_KEY,
|
||||
AzureOpenAiEndpoint: process.env.AZURE_OPENAI_ENDPOINT,
|
||||
AzureOpenAiKey: !!process.env.AZURE_OPENAI_KEY,
|
||||
AzureOpenAiEmbeddingModelPref: process.env.EMBEDDING_MODEL_PREF,
|
||||
}
|
||||
: {}),
|
||||
...(llmProvider === "native"
|
||||
? {
|
||||
NativeLLMModelPref: process.env.NATIVE_LLM_MODEL_PREF,
|
||||
|
|
|
@ -26,6 +26,7 @@ class AnthropicLLM {
|
|||
);
|
||||
this.embedder = embedder;
|
||||
this.answerKey = v4().split("-")[0];
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
|
|
|
@ -25,6 +25,7 @@ class AzureOpenAiLLM {
|
|||
"No embedding provider defined for AzureOpenAiLLM - falling back to AzureOpenAiEmbedder for embedding!"
|
||||
);
|
||||
this.embedder = !embedder ? new AzureOpenAiEmbedder() : embedder;
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
|
@ -93,7 +94,7 @@ class AzureOpenAiLLM {
|
|||
);
|
||||
const textResponse = await this.openai
|
||||
.getChatCompletions(this.model, messages, {
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
})
|
||||
.then((res) => {
|
||||
|
@ -130,7 +131,7 @@ class AzureOpenAiLLM {
|
|||
this.model,
|
||||
messages,
|
||||
{
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
}
|
||||
);
|
||||
|
|
|
@ -22,6 +22,7 @@ class GeminiLLM {
|
|||
"INVALID GEMINI LLM SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Gemini as your LLM."
|
||||
);
|
||||
this.embedder = embedder;
|
||||
this.defaultTemp = 0.7; // not used for Gemini
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
|
|
|
@ -25,6 +25,7 @@ class LMStudioLLM {
|
|||
"INVALID LM STUDIO SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LMStudio as your LLM."
|
||||
);
|
||||
this.embedder = embedder;
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
|
@ -85,7 +86,7 @@ class LMStudioLLM {
|
|||
const textResponse = await this.lmstudio
|
||||
.createChatCompletion({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
@ -122,7 +123,7 @@ class LMStudioLLM {
|
|||
const streamRequest = await this.lmstudio.createChatCompletion(
|
||||
{
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
stream: true,
|
||||
messages: await this.compressMessages(
|
||||
|
|
|
@ -27,6 +27,7 @@ class LocalAiLLM {
|
|||
"INVALID LOCAL AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use LocalAI as your LLM."
|
||||
);
|
||||
this.embedder = embedder;
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
|
@ -85,7 +86,7 @@ class LocalAiLLM {
|
|||
const textResponse = await this.openai
|
||||
.createChatCompletion({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
@ -123,7 +124,7 @@ class LocalAiLLM {
|
|||
{
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
|
184
server/utils/AiProviders/mistral/index.js
Normal file
184
server/utils/AiProviders/mistral/index.js
Normal file
|
@ -0,0 +1,184 @@
|
|||
const { chatPrompt } = require("../../chats");
|
||||
|
||||
class MistralLLM {
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
const { Configuration, OpenAIApi } = require("openai");
|
||||
if (!process.env.MISTRAL_API_KEY)
|
||||
throw new Error("No Mistral API key was set.");
|
||||
|
||||
const config = new Configuration({
|
||||
basePath: "https://api.mistral.ai/v1",
|
||||
apiKey: process.env.MISTRAL_API_KEY,
|
||||
});
|
||||
this.openai = new OpenAIApi(config);
|
||||
this.model =
|
||||
modelPreference || process.env.MISTRAL_MODEL_PREF || "mistral-tiny";
|
||||
this.limits = {
|
||||
history: this.promptWindowLimit() * 0.15,
|
||||
system: this.promptWindowLimit() * 0.15,
|
||||
user: this.promptWindowLimit() * 0.7,
|
||||
};
|
||||
|
||||
if (!embedder)
|
||||
console.warn(
|
||||
"No embedding provider defined for MistralLLM - falling back to OpenAiEmbedder for embedding!"
|
||||
);
|
||||
this.embedder = embedder;
|
||||
this.defaultTemp = 0.0;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
if (!contextTexts || !contextTexts.length) return "";
|
||||
return (
|
||||
"\nContext:\n" +
|
||||
contextTexts
|
||||
.map((text, i) => {
|
||||
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||
})
|
||||
.join("")
|
||||
);
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamChat" in this && "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
return 32000;
|
||||
}
|
||||
|
||||
async isValidChatCompletionModel(modelName = "") {
|
||||
return true;
|
||||
}
|
||||
|
||||
constructPrompt({
|
||||
systemPrompt = "",
|
||||
contextTexts = [],
|
||||
chatHistory = [],
|
||||
userPrompt = "",
|
||||
}) {
|
||||
const prompt = {
|
||||
role: "system",
|
||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||
};
|
||||
return [prompt, ...chatHistory, { role: "user", content: userPrompt }];
|
||||
}
|
||||
|
||||
async isSafe(_ = "") {
|
||||
return { safe: true, reasons: [] };
|
||||
}
|
||||
|
||||
async sendChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Mistral chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const textResponse = await this.openai
|
||||
.createChatCompletion({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
})
|
||||
.then((json) => {
|
||||
const res = json.data;
|
||||
if (!res.hasOwnProperty("choices"))
|
||||
throw new Error("Mistral chat: No results!");
|
||||
if (res.choices.length === 0)
|
||||
throw new Error("Mistral chat: No results length!");
|
||||
return res.choices[0].message.content;
|
||||
})
|
||||
.catch((error) => {
|
||||
throw new Error(
|
||||
`Mistral::createChatCompletion failed with: ${error.message}`
|
||||
);
|
||||
});
|
||||
|
||||
return textResponse;
|
||||
}
|
||||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Mistral chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.createChatCompletion(
|
||||
{
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
systemPrompt: chatPrompt(workspace),
|
||||
userPrompt: prompt,
|
||||
chatHistory,
|
||||
},
|
||||
rawHistory
|
||||
),
|
||||
},
|
||||
{ responseType: "stream" }
|
||||
);
|
||||
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Mistral chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const { data } = await this.openai.createChatCompletion({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature,
|
||||
});
|
||||
|
||||
if (!data.hasOwnProperty("choices")) return null;
|
||||
return data.choices[0].message.content;
|
||||
}
|
||||
|
||||
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!(await this.isValidChatCompletionModel(this.model)))
|
||||
throw new Error(
|
||||
`Mistral chat: ${this.model} is not valid for chat completion!`
|
||||
);
|
||||
|
||||
const streamRequest = await this.openai.createChatCompletion(
|
||||
{
|
||||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
temperature,
|
||||
},
|
||||
{ responseType: "stream" }
|
||||
);
|
||||
return streamRequest;
|
||||
}
|
||||
|
||||
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||
async embedTextInput(textInput) {
|
||||
return await this.embedder.embedTextInput(textInput);
|
||||
}
|
||||
async embedChunks(textChunks = []) {
|
||||
return await this.embedder.embedChunks(textChunks);
|
||||
}
|
||||
|
||||
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||
const messageArray = this.constructPrompt(promptArgs);
|
||||
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
MistralLLM,
|
||||
};
|
|
@ -29,6 +29,7 @@ class NativeLLM {
|
|||
|
||||
// Make directory when it does not exist in existing installations
|
||||
if (!fs.existsSync(this.cacheDir)) fs.mkdirSync(this.cacheDir);
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
async #initializeLlamaModel(temperature = 0.7) {
|
||||
|
@ -132,7 +133,7 @@ class NativeLLM {
|
|||
);
|
||||
|
||||
const model = await this.#llamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const response = await model.call(messages);
|
||||
return response.content;
|
||||
|
@ -145,7 +146,7 @@ class NativeLLM {
|
|||
|
||||
async streamChat(chatHistory = [], prompt, workspace = {}, rawHistory = []) {
|
||||
const model = await this.#llamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const messages = await this.compressMessages(
|
||||
{
|
||||
|
|
|
@ -20,6 +20,7 @@ class OllamaAILLM {
|
|||
"INVALID OLLAMA SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Ollama as your LLM."
|
||||
);
|
||||
this.embedder = embedder;
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#ollamaClient({ temperature = 0.07 }) {
|
||||
|
@ -113,7 +114,7 @@ class OllamaAILLM {
|
|||
);
|
||||
|
||||
const model = this.#ollamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const textResponse = await model
|
||||
.pipe(new StringOutputParser())
|
||||
|
@ -136,7 +137,7 @@ class OllamaAILLM {
|
|||
);
|
||||
|
||||
const model = this.#ollamaClient({
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
});
|
||||
const stream = await model
|
||||
.pipe(new StringOutputParser())
|
||||
|
|
|
@ -23,6 +23,7 @@ class OpenAiLLM {
|
|||
"No embedding provider defined for OpenAiLLM - falling back to OpenAiEmbedder for embedding!"
|
||||
);
|
||||
this.embedder = !embedder ? new OpenAiEmbedder() : embedder;
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
|
@ -127,7 +128,7 @@ class OpenAiLLM {
|
|||
const textResponse = await this.openai
|
||||
.createChatCompletion({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
@ -165,7 +166,7 @@ class OpenAiLLM {
|
|||
{
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
|
|
@ -28,6 +28,7 @@ class TogetherAiLLM {
|
|||
"INVALID TOGETHER AI SETUP. No embedding engine has been set. Go to instance settings and set up an embedding interface to use Together AI as your LLM."
|
||||
);
|
||||
this.embedder = embedder;
|
||||
this.defaultTemp = 0.7;
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
|
@ -89,7 +90,7 @@ class TogetherAiLLM {
|
|||
const textResponse = await this.openai
|
||||
.createChatCompletion({
|
||||
model: this.model,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
@ -127,7 +128,7 @@ class TogetherAiLLM {
|
|||
{
|
||||
model: this.model,
|
||||
stream: true,
|
||||
temperature: Number(workspace?.openAiTemp ?? 0.7),
|
||||
temperature: Number(workspace?.openAiTemp ?? this.defaultTemp),
|
||||
n: 1,
|
||||
messages: await this.compressMessages(
|
||||
{
|
||||
|
|
|
@ -171,7 +171,7 @@ async function chatWithWorkspace(
|
|||
|
||||
// Send the text completion.
|
||||
const textResponse = await LLMConnector.getChatCompletion(messages, {
|
||||
temperature: workspace?.openAiTemp ?? 0.7,
|
||||
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
|
||||
});
|
||||
|
||||
if (!textResponse) {
|
||||
|
|
|
@ -141,7 +141,7 @@ async function streamChatWithWorkspace(
|
|||
`\x1b[31m[STREAMING DISABLED]\x1b[0m Streaming is not available for ${LLMConnector.constructor.name}. Will use regular chat method.`
|
||||
);
|
||||
completeText = await LLMConnector.getChatCompletion(messages, {
|
||||
temperature: workspace?.openAiTemp ?? 0.7,
|
||||
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
|
||||
});
|
||||
writeResponseChunk(response, {
|
||||
uuid,
|
||||
|
@ -153,7 +153,7 @@ async function streamChatWithWorkspace(
|
|||
});
|
||||
} else {
|
||||
const stream = await LLMConnector.streamGetChatCompletion(messages, {
|
||||
temperature: workspace?.openAiTemp ?? 0.7,
|
||||
temperature: workspace?.openAiTemp ?? LLMConnector.defaultTemp,
|
||||
});
|
||||
completeText = await handleStreamResponses(response, stream, {
|
||||
uuid,
|
||||
|
|
|
@ -5,6 +5,7 @@ const SUPPORT_CUSTOM_MODELS = [
|
|||
"ollama",
|
||||
"native-llm",
|
||||
"togetherai",
|
||||
"mistral",
|
||||
];
|
||||
|
||||
async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
||||
|
@ -20,6 +21,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
|||
return await ollamaAIModels(basePath);
|
||||
case "togetherai":
|
||||
return await getTogetherAiModels();
|
||||
case "mistral":
|
||||
return await getMistralModels(apiKey);
|
||||
case "native-llm":
|
||||
return nativeLLMModels();
|
||||
default:
|
||||
|
@ -117,6 +120,26 @@ async function getTogetherAiModels() {
|
|||
return { models, error: null };
|
||||
}
|
||||
|
||||
async function getMistralModels(apiKey = null) {
|
||||
const { Configuration, OpenAIApi } = require("openai");
|
||||
const config = new Configuration({
|
||||
apiKey: apiKey || process.env.MISTRAL_API_KEY,
|
||||
basePath: "https://api.mistral.ai/v1",
|
||||
});
|
||||
const openai = new OpenAIApi(config);
|
||||
const models = await openai
|
||||
.listModels()
|
||||
.then((res) => res.data.data.filter((model) => !model.id.includes("embed")))
|
||||
.catch((e) => {
|
||||
console.error(`Mistral:listModels`, e.message);
|
||||
return [];
|
||||
});
|
||||
|
||||
// Api Key was successful so lets save it for future uses
|
||||
if (models.length > 0 && !!apiKey) process.env.MISTRAL_API_KEY = apiKey;
|
||||
return { models, error: null };
|
||||
}
|
||||
|
||||
function nativeLLMModels() {
|
||||
const fs = require("fs");
|
||||
const path = require("path");
|
||||
|
|
|
@ -52,6 +52,9 @@ function getLLMProvider(modelPreference = null) {
|
|||
case "togetherai":
|
||||
const { TogetherAiLLM } = require("../AiProviders/togetherAi");
|
||||
return new TogetherAiLLM(embedder, modelPreference);
|
||||
case "mistral":
|
||||
const { MistralLLM } = require("../AiProviders/mistral");
|
||||
return new MistralLLM(embedder, modelPreference);
|
||||
case "native":
|
||||
const { NativeLLM } = require("../AiProviders/native");
|
||||
return new NativeLLM(embedder, modelPreference);
|
||||
|
@ -76,6 +79,7 @@ function getEmbeddingEngineSelection() {
|
|||
return new LocalAiEmbedder();
|
||||
case "native":
|
||||
const { NativeEmbedder } = require("../EmbeddingEngines/native");
|
||||
console.log("\x1b[34m[INFO]\x1b[0m Using Native Embedder");
|
||||
return new NativeEmbedder();
|
||||
default:
|
||||
return null;
|
||||
|
|
|
@ -95,6 +95,15 @@ const KEY_MAPPING = {
|
|||
checks: [nonZero],
|
||||
},
|
||||
|
||||
MistralApiKey: {
|
||||
envKey: "MISTRAL_API_KEY",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
MistralModelPref: {
|
||||
envKey: "MISTRAL_MODEL_PREF",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
|
||||
// Native LLM Settings
|
||||
NativeLLMModelPref: {
|
||||
envKey: "NATIVE_LLM_MODEL_PREF",
|
||||
|
@ -268,6 +277,7 @@ function supportedLLM(input = "") {
|
|||
"ollama",
|
||||
"native",
|
||||
"togetherai",
|
||||
"mistral",
|
||||
].includes(input);
|
||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue